content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
from model import Graph
app = Flask(__name__)
cors = CORS(app, resources={r'/': {"origins": "http://localhost:3000"}})
@app.route('/', methods=['POST', ])
@cross_origin()
if __name__ == '__main__':
app.run(debug=True) | [
6738,
42903,
1330,
46947,
11,
2581,
11,
33918,
1958,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
11,
3272,
62,
47103,
198,
6738,
2746,
1330,
29681,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
66,
669,
796,
327,
20673... | 2.745614 | 114 |
if __name__ == '__main__':
print(Soluiton().isHappy(19))
"""
Time Complexity = O(k), k is the number of steps to get the happy number
Space Complexity = O(k)
Write an algorithm to determine if a number is "happy".
A happy number is a number defined by the following process: Starting with any positive integer,
replace the number by the sum of the squares of its digits, and repeat the process until the number
equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those
numbers for which this process ends in 1 are happy numbers.
Example:
Input:Input: 19
19 Output:Output: true
true Explanation:
Explanati 12 + 92 = 82
82 + 22 = 68
62 + 82 = 100
12 + 02 + 02 = 1
"""
| [
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
7,
36949,
5013,
261,
22446,
271,
25082,
7,
1129,
4008,
628,
628,
628,
198,
220,
220,
220,
220,
220,
220,
220,
37227,
628,
220,
220,
220,
220,
... | 2.397906 | 382 |
from ._internal.frameworks.mlflow import load
from ._internal.frameworks.mlflow import save
from ._internal.frameworks.mlflow import load_runner
from ._internal.frameworks.mlflow import import_from_uri
__all__ = ["import_from_uri", "load", "load_runner", "save"]
| [
6738,
47540,
32538,
13,
19298,
19653,
13,
76,
1652,
9319,
1330,
3440,
198,
6738,
47540,
32538,
13,
19298,
19653,
13,
76,
1652,
9319,
1330,
3613,
198,
6738,
47540,
32538,
13,
19298,
19653,
13,
76,
1652,
9319,
1330,
3440,
62,
16737,
198,
... | 3.219512 | 82 |
from app import app
from floodgate.client_state_machine.layer1_transition_map.reply import ReplyState as _TransitionState
from floodgate.client_state_machine.layer2_decoration_event.clean_up_vote import clean_up_vote
def create():
"""ステート生成"""
return _DecoratedState()
| [
6738,
598,
1330,
598,
198,
6738,
6947,
10494,
13,
16366,
62,
5219,
62,
30243,
13,
29289,
16,
62,
7645,
653,
62,
8899,
13,
47768,
1330,
14883,
9012,
355,
4808,
8291,
653,
9012,
198,
6738,
6947,
10494,
13,
16366,
62,
5219,
62,
30243,
... | 3.043478 | 92 |
from typing import Tuple
import logging
import math
from easyagents import core
class _LogCallbackBase(core.AgentCallback):
"""Base class for Callback loggers"""
def __init__(self, logger: logging.Logger = None, prefix: str = None):
"""Writes all calls to logger with the given prefix.
Args:
logger: the logger to log (if None a new logger with level debug is created)
prefix: a string written in front of each log msg
"""
self._logger = logger
if self._logger is None:
self._logger = logging.getLogger()
self._prefix = prefix
if self._prefix is None:
self._prefix = ''
class _Callbacks(_LogCallbackBase):
"""Logs all AgentCallback calls to a Logger"""
def __init__(self, logger: logging.Logger = None, prefix: str = None):
"""Writes all calls to a callback function to logger with the given prefix.
Args:
logger: the logger to log (if None a new logger with level debug is created)
prefix: a string written in front of each log msg
"""
super().__init__(logger=logger, prefix=prefix)
class _AgentContext(_LogCallbackBase):
"""Logs the agent context and its subcontexts after every training iteration / episode played """
class Agent(_LogCallbackBase):
"""Logs agent activities to a python logger."""
class Duration(_LogCallbackBase):
"""Logs training / play duration definition summary to a logger."""
class Iteration(_LogCallbackBase):
"""Logs training iteration summaries to a python logger."""
def __init__(self, eval_only:bool=False, logger: logging.Logger = None, prefix: str = None):
"""Logs the completion of each training iteration. On an iteration with policy evaluation the
current average reward/episode and steps/episode is logged as well.
Args:
eval_only: if set a log is only created if the policy was re-evaluated in the current iteration.
logger: the logger to log (if None a new logger with level debug is created)
prefix: a string written in front of each log msg
"""
self._eval_only:bool=eval_only
super().__init__(logger=logger,prefix=prefix)
class Step(_LogCallbackBase):
"""Logs each environment step to a python logger."""
| [
6738,
19720,
1330,
309,
29291,
198,
11748,
18931,
198,
11748,
10688,
198,
198,
6738,
2562,
49638,
1330,
4755,
628,
198,
4871,
4808,
11187,
47258,
14881,
7,
7295,
13,
36772,
47258,
2599,
198,
220,
220,
220,
37227,
14881,
1398,
329,
4889,
... | 2.83274 | 843 |
import os
import unittest
import struct
from tempfile import mkdtemp
from shutil import rmtree
import errno
from openmdao.api import Problem, Component, Group, ExecComp, FileRef
from openmdao.util.file_util import build_directory
if __name__ == '__main__':
unittest.main()
| [
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
11748,
2878,
198,
6738,
20218,
7753,
1330,
33480,
67,
29510,
198,
6738,
4423,
346,
1330,
374,
16762,
631,
198,
11748,
11454,
3919,
198,
198,
6738,
1280,
9132,
5488,
13,
15042,
1330,
206... | 3.120879 | 91 |
# Copyright (C) 2018-2019 Amano Team <contact@amanoteam.ml>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import html
import re
import aiohttp
from amanobot.namedtuple import InlineKeyboardMarkup
from config import bot, version
| [
2,
15069,
357,
34,
8,
2864,
12,
23344,
42614,
78,
4816,
1279,
32057,
31,
10546,
1258,
321,
13,
4029,
29,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
198,
2,
428,
... | 3.766667 | 330 |
import unittest2 as unittest
from celery import events
| [
11748,
555,
715,
395,
17,
355,
555,
715,
395,
198,
198,
6738,
18725,
1924,
1330,
2995,
628,
628,
198
] | 3.157895 | 19 |
"""Tree-LSTM encoder.
Based on Tai et al., 2015,
"Improved Semantic Representations From
Tree-Structured Long Short-Term Memory Networks."
Actually works on any DAG.
By convention, the root of the tree has only outgoing edges.
In DAG terminology, this means we start at sink nodes
and end at source nodes.
"""
import theano
from theano import tensor as T
from theano.ifelse import ifelse
import __init__ as ntu
from .. import log
def encode_child_sum(x_vecs, topo_order, adj_mat, c0, h0, W, U, Uf):
"""Run a child-sum tree-LSTM on a DAG.
Args:
x_vecs: n x e vector of node embeddings
topo_order: a permutation of range(n) that gives a topological sort
i.e. topo_order[i]'s children are in topo_order[:i]
adj_mat: matrix where adj_mat[i,j] == 1 iff there is an i -> j edge.
c0, h0, W, U, Uf: parameters of sizes 1 x d, 1 x d, e x 4d, d x 3d, d x d, respectively.
"""
n = x_vecs.shape[0]
d = U.shape[0]
(c_list, h_list), _ = theano.scan(
recurrence, sequences=[topo_order],
outputs_info=[T.zeros((n, d)), T.zeros((n, d))],
non_sequences=[n, d, x_vecs, adj_mat, c0, h0, W, U, Uf])
return c_list[-1], h_list[-1]
| [
37811,
27660,
12,
43,
2257,
44,
2207,
12342,
13,
198,
198,
15001,
319,
11144,
2123,
435,
1539,
1853,
11,
198,
1,
35453,
12449,
5109,
10858,
602,
3574,
198,
27660,
12,
44909,
1522,
5882,
10073,
12,
40596,
14059,
27862,
526,
198,
198,
2... | 2.474684 | 474 |
#! /usr/bin/env python
from setuptools import setup
setup(
name="unbalancedot",
distname="",
version='0.0.1',
description="Functionals derived from the theory of entropically "
"regularized unbalanced optimal transport ",
author='Thibault Sejourne',
author_email='thibault.sejourne@ens.fr',
url='https://github.com/thibsej/unbalanced-ot-functionals',
packages=['unbalancedot', 'unbalancedot.tests'],
install_requires=[
'numpy',
'torch',
'scipy',
'pytest'
],
license="MIT",
)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
403,
27753,
313,
1600,
198,
220,
220,
220,
1233,
3672,
2625,
1600,
198,
220,
220,
220,
2196... | 2.208178 | 269 |
# Blender modules
# 2020-03-28
import bpy
from bpy import *
import bpy.path
from bpy.path import abspath
from mathutils import *
# Python standard modules
from urllib.parse import urlencode
from urllib.request import *
from html.parser import *
from smtplib import *
from email.mime.text import MIMEText
import time
import platform
import os
import codecs
import base64
from math import *
import pickle
import shutil
import subprocess
import sys
import traceback
import copy
from .BioBlender2 import *
from .BB2_GUI_PDB_IMPORT import *
from . import BB2_PANEL_VIEW as panel
from .BB2_MLP_PANEL import *
from .BB2_PDB_OUTPUT_PANEL import *
from .BB2_OUTPUT_PANEL import *
from .BB2_NMA_PANEL import *
from .BB2_EP_PANEL import *
geList = []
global Parents
Parents = {}
bpy.utils.register_class(bb2_operator_interactive)
bpy.utils.register_class(bb2_operator_ge_refresh)
if __name__ == "__main__":
print("PHYSICS_SIM module created")
| [
2,
1086,
2194,
13103,
201,
198,
2,
12131,
12,
3070,
12,
2078,
201,
198,
11748,
275,
9078,
201,
198,
6738,
275,
9078,
1330,
1635,
201,
198,
11748,
275,
9078,
13,
6978,
201,
198,
6738,
275,
9078,
13,
6978,
1330,
2352,
6978,
201,
198,
... | 2.534005 | 397 |
from django.db import models
from django.contrib.auth.models import User
from utils import get_domain
from django.template.defaultfilters import slugify
import settings
#from sorl.thumbnail import ImageField
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
201,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
201,
198,
6738,
3384,
4487,
1330,
651,
62,
27830,
201,
198,
6738,
42625,
14208,
13,
28243,
13,
12286,
10379,
1010,... | 2.9 | 80 |
import os
import torch
import torchvision
import torchvision.transforms as transforms
from trainer import CapsNetTrainer
import yaml, argparse
from utils.util import ensure_dir
from logger.logger import Logger #
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Capsules Networks')
parser.add_argument('-c', '--config', default='config.yaml', type=str,
help='config file path (default: config.yaml)')
parser.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
# Use multiple GPUs? '--multi_gpu' will store multi_gpu as True
parser.add_argument('--multi_gpu', action='store_true',
help='Flag whether to use multiple GPUs.')
# Select GPU device
parser.add_argument('--disable_gpu', action='store_true',
help='Flag whether to use disable GPU')
args = parser.parse_args()
main(args)
| [
11748,
28686,
198,
11748,
28034,
198,
11748,
28034,
10178,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
6738,
21997,
1330,
23534,
7934,
2898,
10613,
198,
11748,
331,
43695,
11,
1822,
29572,
198,
6738,
3384,
4487,
13,
22602,
... | 2.695652 | 368 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# transform_config_hw.py -- Generate svn_private_config.h
# from svn_private_config.hw while editing SVN_BUILD_HOST
import os
import re
import sys
import platform
_wincpu_map = {
'x86': 'x86',
'x64': 'x86_64',
'amd64': 'x86_64',
'x86_64': 'x86_64',
'ia64': 'ia64',
'powerpc': 'powerpc',
'alpha': 'alpha',
}
_arch_map = _wincpu_map.copy()
_arch_map.update({
'win32': 'x86',
})
_interesting_rx = re.compile(
r'^\s*#\s*define\s+SVN_BUILD_HOST\s+(?P<host>"[^"]+")\s*$')
if __name__ == '__main__':
if os.name != 'nt':
usage_and_exit('This script should only be run on Windows')
if len(sys.argv) < 3 or len(sys.argv) > 4:
usage_and_exit('Incorrect number of arguments')
architecture = sys.argv[1]
input_filepath = sys.argv[2]
if len(sys.argv) > 3:
output_file = open(sys.argv[3], 'w')
else:
output_file = sys.stdout
main(input_filepath, output_file, architecture)
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 2.693598 | 656 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
198
] | 3.166667 | 6 |
# Generated by Django 2.2.4 on 2021-01-12 15:15
from django.conf import settings
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
19,
319,
33448,
12,
486,
12,
1065,
1315,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.1 | 40 |
a = int(raw_input())
seta = set(map(int,raw_input().split()))
n = int(raw_input())
for every in range(n):
operation = (raw_input() + " ").split(" ")
temp=set(map(int,raw_input().split()))
eval("seta.{0}(temp)".format(operation[0]))
print sum(seta) | [
64,
796,
493,
7,
1831,
62,
15414,
28955,
198,
2617,
64,
796,
900,
7,
8899,
7,
600,
11,
1831,
62,
15414,
22446,
35312,
3419,
4008,
198,
77,
796,
493,
7,
1831,
62,
15414,
28955,
198,
198,
1640,
790,
287,
2837,
7,
77,
2599,
198,
22... | 2.284483 | 116 |
'''074 - MAIOR E MENOR COM TUPLA.
PROGRAMA QUE MOSTRE 05 VALORES QUE TEM NA TUPLA DE FORMA ALEATÓRIA E DIGA QUAL O MENOR E O MAIOR.'''
from random import randint
numeros = (randint(0, 10), randint(0, 10), randint(0, 10), randint(0, 10), randint(0, 10))
print('Os valores gerados foram: ', end='')
for n in numeros:
print(f'{n}', end=' ')
print(f'\nO MAIOR valor gerado foi: {max(numeros)}')
print(f'O MENOR valor gerado foi: {min(numeros)}')
| [
7061,
6,
2998,
19,
532,
8779,
41254,
412,
41597,
1581,
9440,
309,
52,
45710,
13,
198,
4805,
7730,
24115,
32,
1195,
8924,
337,
10892,
2200,
8870,
26173,
1581,
1546,
1195,
8924,
309,
3620,
11746,
309,
52,
45710,
5550,
7473,
5673,
32318,
... | 2.269036 | 197 |
import time
import msvcrt
__all__ = ['read_input']
| [
11748,
640,
201,
198,
11748,
13845,
85,
6098,
83,
201,
198,
201,
198,
201,
198,
834,
439,
834,
796,
37250,
961,
62,
15414,
20520,
201,
198,
201,
198
] | 2.142857 | 28 |
from rss.examples.proton_proton import proton_proton_collision
from rss.examples.lepton_lepton import lepton_lepton_collision
| [
6738,
374,
824,
13,
1069,
12629,
13,
1676,
1122,
62,
1676,
1122,
1330,
386,
1122,
62,
1676,
1122,
62,
26000,
1166,
198,
6738,
374,
824,
13,
1069,
12629,
13,
293,
10972,
62,
293,
10972,
1330,
443,
10972,
62,
293,
10972,
62,
26000,
11... | 2.863636 | 44 |
'''
This module provides a DataStore based on in-memory dictionaries.
@author: Thomas Wanderer
'''
# free.dm Imports
from freedm.data.store import DataStore
from freedm.data.object import DataObject
from typing import Union, Any, Type
class MemoryStore(DataStore):
'''
A data store which reads and writes its data simply to a dictionary in the memory.
The token has no length/depth limit as every new token get mapped to a new dictionary.
'''
# Attributes
_persistent: bool = False
_writable: bool = True
_default_name: str = 'Cache'
_default_filetype: str = ''
description: str = 'An ephemeral memory store'
# Set up the Memory store
def __init__(self, *args, **kwargs):
'''
Just sets the path to something else to avoid a warning
'''
# Init the store
super().__init__(*args, **kwargs)
# Set the path to "False" so this store never gets a path configured by a data manager
self.path = False
# Implement data setters & getter
# Implement domain loading and unloading | [
7061,
6,
198,
1212,
8265,
3769,
257,
6060,
22658,
1912,
319,
287,
12,
31673,
48589,
3166,
13,
198,
31,
9800,
25,
5658,
22420,
11882,
198,
7061,
6,
198,
198,
2,
1479,
13,
36020,
1846,
3742,
198,
6738,
13459,
76,
13,
7890,
13,
8095,
... | 2.884211 | 380 |
from rest_framework import generics
from django.http import JsonResponse
from .serializers import BoardSerializer, CardSerializer, TaskListSerializer, BoardUserRelationshipSerializer, BoardDetailSerializer, TaskListDetailSerializer, UserSerializer
from .models import Board, TaskList, Card, BoardUserRelationship, User
#
| [
6738,
1334,
62,
30604,
1330,
1152,
873,
198,
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
198,
198,
6738,
764,
46911,
11341,
1330,
5926,
32634,
7509,
11,
5172,
32634,
7509,
11,
15941,
8053,
32634,
7509,
11,
5926,
12982,
47117,
... | 3.904762 | 84 |
import os
import pathlib
import pytest
# Import Operator
import yaml
from airflow.models import Connection, DagRun
from airflow.models import TaskInstance as TI
from airflow.utils.session import create_session
from astro import sql as aql
@pytest.fixture(scope="session", autouse=True)
| [
11748,
28686,
198,
11748,
3108,
8019,
198,
198,
11748,
12972,
9288,
198,
198,
2,
17267,
35946,
198,
11748,
331,
43695,
198,
6738,
45771,
13,
27530,
1330,
26923,
11,
32167,
10987,
198,
6738,
45771,
13,
27530,
1330,
15941,
33384,
355,
31598... | 3.6375 | 80 |
import hashlib
import math
from http import HTTPStatus
from fastapi import Request
from lnurl import ( # type: ignore
LnurlErrorResponse,
LnurlPayActionResponse,
LnurlPayResponse,
)
from starlette.exceptions import HTTPException
from lnbits.core.services import create_invoice
from lnbits.utils.exchange_rates import get_fiat_rate_satoshis
from . import lnurlp_ext
from .crud import increment_pay_link
@lnurlp_ext.get(
"/api/v1/lnurl/{link_id}",
status_code=HTTPStatus.OK,
name="lnurlp.api_lnurl_response",
)
@lnurlp_ext.get(
"/api/v1/lnurl/cb/{link_id}",
status_code=HTTPStatus.OK,
name="lnurlp.api_lnurl_callback",
)
| [
11748,
12234,
8019,
198,
11748,
10688,
198,
6738,
2638,
1330,
14626,
19580,
198,
198,
6738,
3049,
15042,
1330,
19390,
198,
6738,
300,
77,
6371,
1330,
357,
220,
1303,
2099,
25,
8856,
198,
220,
220,
220,
406,
77,
6371,
12331,
31077,
11,
... | 2.54023 | 261 |
# -*- coding: utf-8 -*-
""" Tests for converter functions
Run with
nosetests test_convert.py -s -v
"""
import pytest
# from nose.tools import *
import numpy as np
import reda.eis.convert as sip_convert
import numpy.testing
from_keys = sip_convert.from_converters.keys()
class TestClass_input_styles(object):
"""
Test the three input styles:
* 1D
* 2D - one spectrum
* 2D - multiple spectra
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
30307,
329,
38394,
5499,
198,
198,
10987,
351,
198,
198,
39369,
316,
3558,
1332,
62,
1102,
1851,
13,
9078,
532,
82,
532,
85,
198,
37811,
198,
11748,
12972,
9288,
... | 2.564706 | 170 |
#!/usr/bin/env python3.5
import datetime
import time
import os
import argparse
import json
import logging
import sys
from queue_manager import QueuesManager
from api_controller import WorkersManager
from sql_builder import DBExtractor
from classification.ml_classifier import UserClassifier
if __name__ == '__main__':
initialize_logger(os.path.join(sys.path[0], 'logging'))
parser = argparse.ArgumentParser(description='This a social network site crawler')
parser.add_argument('-t',
action='store',
dest='running_time',
required = True,
help='The crawling running time')
parser.add_argument("-s",
dest='start_date',
help="Crawling and classification of tweets start date - format YYYY-MM-DD",
required=True,
type=valid_date)
parser.add_argument('-e', action='store',
dest='end_date',
help='Crawling and classification of tweets end date',
required=True,
type=valid_date)
results = parser.parse_args()
my_crawler = Crawler(float(results.running_time), results.start_date, results.end_date)
my_crawler.crawl()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
20,
198,
11748,
4818,
8079,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
25064,
198,
198,
6738,
16834,
62,
37153,
133... | 2.200658 | 608 |
#file used is romeo.txt
fname = input("Enter file name: ")
fh = open(fname)
lst = list()
for line in fh:
for i in line.split():
if not i in lst:
lst.append(i)
lst.sort()
print(lst)
#Output
#['Arise', 'But', 'It', 'Juliet', 'Who', 'already', 'and', 'breaks', 'east', 'envious', 'fair', 'grief', 'is', 'kill', 'light', 'moon', 'pale', 'sick', 'soft', 'sun', 'the', 'through', 'what', 'window', 'with', 'yonder']
| [
2,
7753,
973,
318,
374,
462,
78,
13,
14116,
198,
69,
3672,
796,
5128,
7203,
17469,
2393,
1438,
25,
366,
8,
198,
69,
71,
796,
1280,
7,
69,
3672,
8,
198,
75,
301,
796,
1351,
3419,
198,
1640,
1627,
287,
277,
71,
25,
198,
220,
220... | 2.253886 | 193 |
from __future__ import print_function
from nose.tools import assert_equal
from numpy.testing import assert_almost_equal
from matplotlib.transforms import Affine2D, BlendedGenericTransform
from matplotlib.path import Path
from matplotlib.scale import LogScale
import numpy as np
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
9686,
13,
31391,
1330,
6818,
62,
40496,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
28177,
62,
40496,
198,
6738,
2603,
29487,
8019,
13,
7645,
23914,
1330,
6708,
500,
17,
... | 3.808219 | 73 |
from .interpolate import variety_lex_leading_terms, nf_lex_points
from .PyPolyBoRi import easy_linear_factors
def easy_linear_polynomials(p):
r"""
Get linear polynomials implied by given polynomial.
EXAMPLES::
sage: from sage.rings.polynomial.pbori.frontend import x
sage: from sage.rings.polynomial.pbori.easy_polynomials import easy_linear_polynomials
sage: easy_linear_polynomials(x(1)*x(2) + 1)
[x(1) + 1, x(2) + 1]
sage: easy_linear_polynomials(x(1)*x(2) + 0)
[]
sage: easy_linear_polynomials(x(0)*x(1) + x(0)*x(2) + 1)
[x(0) + 1, x(1) + x(2) + 1]
"""
res = []
if p.deg() >= 2:
if p.vars_as_monomial().deg() > 8:
opp = p + 1
for q in easy_linear_factors(opp):
res.append(q + 1)
else:
res = easy_linear_polynomials_via_interpolation(p)
return res
def easy_linear_polynomials_via_interpolation(p):
r"""
Get linear polynomials implied by given polynomial using interpolation of the variety.
TESTS::
sage: from sage.rings.polynomial.pbori.frontend import x
sage: from sage.rings.polynomial.pbori.easy_polynomials import easy_linear_polynomials_via_interpolation
sage: easy_linear_polynomials_via_interpolation(x(1)*x(2) + 1)
[x(1) + 1, x(2) + 1]
sage: easy_linear_polynomials_via_interpolation(x(1)*x(2) + 0)
[]
sage: easy_linear_polynomials_via_interpolation(x(0)*x(1) + x(0)*x(2) + 1)
[x(0) + 1, x(1) + x(2) + 1]
"""
res = []
p_vars = p.vars_as_monomial()
space = p_vars.divisors()
zeros = p.zeros_in(space)
lex_leads = variety_lex_leading_terms(zeros, p_vars)
for m in lex_leads:
if m.deg() == 1:
red = m + nf_lex_points(m, zeros)
if red.lead_deg() == 1: # normal ordering
res.append(red)
return res
| [
6738,
764,
3849,
16104,
378,
1330,
4996,
62,
2588,
62,
12294,
62,
38707,
11,
299,
69,
62,
2588,
62,
13033,
198,
6738,
764,
20519,
34220,
16635,
49,
72,
1330,
2562,
62,
29127,
62,
22584,
669,
628,
198,
4299,
2562,
62,
29127,
62,
3542... | 2.019895 | 955 |
def bresenham(start, end):
"""Yield a line ray from start to end
"""
((x0, y0), (x1, y1)) = (start, end)
dx, dy = (x1 - x0), (y1 - y0)
x_step, dx = (1, dx) if dx >= 0 else (-1, -dx)
y_step, dy = (1, dy) if dy >= 0 else (-1, -dy)
if dx > dy:
xx, xy, yx, yy = x_step, 0, 0, y_step
else:
dx, dy = dy, dx # note the swap here
xx, xy, yx, yy = 0, y_step, x_step, 0
error = 2 * dy - dx
y = 0
for x in range(dx + 1):
yield x0 + x * xx + y * yx, y0 + x * xy + y * yy
if error >= 0:
y += 1
error = error - 2 * dx
error = error + 2 * dy
if __name__ == '__main__':
import random
from kelte.vendored import click
@click.command()
@click.argument('x0', default=random.randint(0, 80), type=int)
@click.argument('y0', default=random.randint(0, 50), type=int)
@click.argument('x1', default=random.randint(0, 80), type=int)
@click.argument('y1', default=random.randint(0, 50), type=int)
cli()
| [
4299,
275,
411,
268,
2763,
7,
9688,
11,
886,
2599,
198,
220,
220,
220,
37227,
56,
1164,
257,
1627,
26842,
422,
923,
284,
886,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
14808,
87,
15,
11,
331,
15,
828,
357,
87,
16,
11,
331,
... | 2.041502 | 506 |
from malcolm.core import Part, config_tag, NumberMeta, PartRegistrar, \
APartName, Widget
class CounterPart(Part):
"""Defines a counter `Attribute` with zero and increment `Method` objects"""
def zero(self):
"""Zero the counter attribute"""
self.counter.set_value(0)
def increment(self):
"""Add one to the counter attribute"""
self.counter.set_value(self.counter.value + 1)
| [
6738,
6428,
18414,
13,
7295,
1330,
2142,
11,
4566,
62,
12985,
11,
7913,
48526,
11,
2142,
8081,
396,
20040,
11,
3467,
198,
220,
220,
220,
3486,
433,
5376,
11,
370,
17484,
628,
198,
4871,
15034,
7841,
7,
7841,
2599,
198,
220,
220,
220... | 2.821192 | 151 |
import re
import spacy
import pathlib
import torch
import dill
temp = pathlib.PosixPath
pathlib.PosixPath = pathlib.WindowsPath
| [
11748,
302,
198,
11748,
599,
1590,
198,
11748,
3108,
8019,
198,
11748,
28034,
198,
11748,
288,
359,
198,
29510,
796,
3108,
8019,
13,
21604,
844,
15235,
198,
6978,
8019,
13,
21604,
844,
15235,
796,
3108,
8019,
13,
11209,
15235,
628,
198
... | 3.170732 | 41 |
from .convnet import ConvNet
from .densenet import DenseNet
from .densenet169 import DenseNet169
from .resnet import ResNet18
| [
6738,
764,
42946,
3262,
1330,
34872,
7934,
198,
6738,
764,
67,
18756,
316,
1330,
360,
1072,
7934,
198,
6738,
764,
67,
18756,
316,
22172,
1330,
360,
1072,
7934,
22172,
198,
6738,
764,
411,
3262,
1330,
1874,
7934,
1507,
198
] | 3.230769 | 39 |
import cirq
import numpy as np
qubits = [cirq.GridQubit(0, i) for i in range(3)]
alice = qubits[:2]
bob = [qubits[-1]]
x = 0.2
y = 0.4
q0 = cirq.LineQubit(0)
sim = cirq.Simulator(seed=3)
c = cirq.Circuit([cirq.X(q0)**x, cirq.Y(q0)**y])
message = sim.simulate(c)
c.append(cirq.measure(q0, key='test'))
#print(c)
run(c, 'test', 1000)
print("Input message:", message.bloch_vector_of(q0))
circuit = cirq.Circuit()
bell_state(alice[1], bob[0], circuit)
teleport(qubits, circuit, x, y)
| [
11748,
10774,
80,
198,
11748,
299,
32152,
355,
45941,
198,
198,
421,
9895,
796,
685,
66,
343,
80,
13,
41339,
48,
549,
270,
7,
15,
11,
1312,
8,
329,
1312,
287,
2837,
7,
18,
15437,
198,
282,
501,
796,
627,
9895,
58,
25,
17,
60,
... | 2.073276 | 232 |
from flask import render_template, url_for, Blueprint, redirect, flash, request, current_app, send_from_directory
views = Blueprint('views', __name__)
@views.route('/')
@views.route('/index') | [
6738,
42903,
1330,
8543,
62,
28243,
11,
19016,
62,
1640,
11,
39932,
11,
18941,
11,
7644,
11,
2581,
11,
1459,
62,
1324,
11,
3758,
62,
6738,
62,
34945,
198,
198,
33571,
796,
39932,
10786,
33571,
3256,
11593,
3672,
834,
8,
198,
198,
31... | 3.327586 | 58 |
"""
Deep inverse problems in Python
models submodule
A Model object transforms a variable z to a new variable w
"""
from .resnet.resnet import ResNet5Block, ResNet
from .unroll.unroll import UnrollNet
| [
37811,
198,
29744,
34062,
2761,
287,
11361,
198,
198,
27530,
850,
21412,
198,
32,
9104,
2134,
31408,
257,
7885,
1976,
284,
257,
649,
7885,
266,
198,
37811,
198,
198,
6738,
764,
411,
3262,
13,
411,
3262,
1330,
1874,
7934,
20,
12235,
11... | 3.561404 | 57 |
#importing required modules
import requests
from bs4 import BeautifulSoup
import json
import os
import datetime
import itertools
from requests import ConnectionError
#scraping india covid data
URL_india = "https://www.mygov.in/covid-19/"
r_india = requests.get(URL_india)
htmlContent_india = r_india.content
soup_india = BeautifulSoup(htmlContent_india,'html.parser')
#scraping covid news
URL_news = "https://www.google.com/search?q=covid+news+world&sxsrf=ALeKk00b35wAS9ijIarr1VQGONbceXOxjQ:1623640431140&source=lnms&tbm=nws&sa=X&ved=2ahUKEwjJ94_5k5bxAhVTILcAHf29CZ8Q_AUoAXoECAEQAw&biw=1280&bih=591&dpr=1.5"
r_news = requests.get(URL_news)
htmlContent_news = r_news.content
soup_news = BeautifulSoup(htmlContent_news,'html.parser')
#scraping covid symptoms
URL_symptoms = 'https://www.cdc.gov/coronavirus/2019-ncov/symptoms-testing/symptoms.html'
r_symptoms = requests.get(URL_symptoms)
htmlContent_symptoms = r_symptoms.content
soup_symptoms = BeautifulSoup(htmlContent_symptoms,'html.parser')
#scraping covid intro
URL_intro = 'https://openwho.org/courses/introduction-to-ncov'
r_intro = requests.get(URL_intro)
htmlContent_intro = r_intro.content
soup_intro = BeautifulSoup(htmlContent_intro,'html.parser')
#covid intro fuction
#covid symptoms function
#covid news function
#india total count function
#world code starts here
#scraping world deaths cases
URL_world_deaths = 'https://www.indexmundi.com/coronavirus/'
r_world_deaths = requests.get(URL_world_deaths)
htmlContent_world_deaths = r_world_deaths.content
soup_world_deaths = BeautifulSoup(htmlContent_world_deaths,'html.parser')
#scraping world data
URL_world = 'https://www.worldometers.info/coronavirus/'
r_world = requests.get(URL_world)
htmlContent_world = r_world.content
soup_world = BeautifulSoup(htmlContent_world,'html.parser')
#scraping world vaccination data
URL_world_vacination = 'https://www.pharmaceutical-technology.com/covid-19-vaccination-tracker/'
r_world_vaccination = requests.get(URL_world_vacination)
htmlContent_world_vaccination = r_world_vaccination.content
soup_world_vaccination = BeautifulSoup(htmlContent_world_vaccination,'html.parser')
#world death count
#world vacination
#world vacinaton pervios day
#world total cases
#world active cases
# world code ends here
# search by state code starts here
_url = 'https://www.mohfw.gov.in/data/datanew.json'
# path to current file
path = os.path.dirname(os.path.realpath(__file__))
#serch by state code ends here
| [
2,
11748,
278,
2672,
13103,
220,
201,
198,
11748,
7007,
201,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
201,
198,
11748,
33918,
201,
198,
11748,
28686,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
340,
861,
10141,
201,
198,
6738... | 2.541955 | 1,013 |
from .alias import Base, Column, Integer, String, Float, Boolean, relationship
| [
6738,
764,
26011,
1330,
7308,
11,
29201,
11,
34142,
11,
10903,
11,
48436,
11,
41146,
11,
2776,
628
] | 4.444444 | 18 |
# range
r = 9
bound = round(r/2)
for y in range(r):
for x in range(r):
if (x >= bound-y AND x <= bound+y):
print("*", end='')
else:
print(" ", end='')
print();
| [
2,
2837,
198,
81,
796,
860,
198,
7784,
796,
2835,
7,
81,
14,
17,
8,
198,
198,
1640,
331,
287,
2837,
7,
81,
2599,
198,
220,
329,
2124,
287,
2837,
7,
81,
2599,
198,
220,
220,
220,
611,
357,
87,
18189,
5421,
12,
88,
5357,
2124,
... | 2.078652 | 89 |
""" class which builds most of our basic models """
import keras.layers as layers
import keras.backend as K
from keras.models import Model
import tensorflow as tf
from ball import MAX_CTX_CHAR_LENGTH, MAX_TITLE_CHAR_LNGTH
# pylint: disable=too-many-locals,too-many-instance-attributes
# pylint: disable=too-many-arguments,too-many-branches,too-many-statements
# pylint: disable=cell-var-from-loop
#############################
# Model Helper for Attention
#############################
def build_attn_with_layer(seq, controller, layer, cell_size=300):
""" Build attention mechanism in computation graph. """
controller_repeated = layers.RepeatVector(20)(controller)
controller_repeated = layer(controller_repeated)
attention = layers.Lambda(my_dot, output_shape=(20,))([controller_repeated, seq])
attention_s = layers.Flatten()(attention)
attention = layers.Lambda(to_prob, output_shape=(20,))(attention_s)
attention_repeated = layers.RepeatVector(cell_size)(attention)
attention_repeated = layers.Permute((2, 1))(attention_repeated)
weighted = layers.merge([attention_repeated, seq], mode='mul')
summed = layers.Lambda(sum_seq, output_shape=(cell_size,))(weighted)
return summed, attention
###################
# Lambda Functions
###################
def sum_seq(seq):
""" Lambda wrapper for sum. """
return K.sum(seq, axis=1, keepdims=False)
def to_prob(vec):
""" Lambda wrapper for softmax. """
return K.softmax(vec)
def my_dot(inputs):
""" Lambda wrapper for dot product. """
ele_wise = inputs[0] * inputs[1]
return K.sum(ele_wise, axis=-1, keepdims=True)
######################
# Model Builder Class
######################
class ModelBuilder(object):
""" Class that builds models based off of a config """
def __init__(self, config, word_vecs, ch_vecs, features, ent_vecs):
""" Initialize all layers. """
self._config = config
self._feature_size = len(features)
self.emb_word = layers.Embedding(len(word_vecs),
300,
input_length=20,
weights=[word_vecs],
trainable=True,
name='word-emb')
self.emb_ent = layers.Embedding(len(ent_vecs),
300,
weights=[ent_vecs],
trainable=True,
name='ent-emb')
# Character CNN over candidate entity title and context
if self._config['character_cnn']:
self.window_size = config['ccnn_window_size']
# universal character embeddings
self.emb_ch = layers.Embedding(len(ch_vecs),
300,
weights=[ch_vecs],
trainable=True,
name='char-emb')
self.ent_reduc = layers.Dense(300, name='ent-reduc-layer')
self.ch_ctx_cnn = layers.Conv1D(100,
self.window_size,
activation='relu',
name='ch-cnn')
self.ch_title_cnn = layers.Conv1D(100,
self.window_size,
activation='relu',
name='ch-title-cnn')
self.ch_ctx_pool = layers.MaxPooling1D(pool_size=MAX_CTX_CHAR_LENGTH
- self.window_size + 1,
name='ch-pool')
self.ch_title_pool = layers.MaxPooling1D(pool_size=MAX_TITLE_CHAR_LNGTH
- self.window_size + 1,
name='ch-titile-pool')
else:
self.ch_ctx_cnn = None
self.ch_ctx_pool = None
self.ch_title_cnn = None
self.ch_title_pool = None
# left and right context encoders w/ attention
self.cell_size = 300
self.left_rnn = layers.GRU(self.cell_size, return_sequences=True, name='left-rnn')
self.right_rnn = layers.GRU(self.cell_size, return_sequences=True, name='right-rnn')
self.left_attn = layers.Dense(self.cell_size, name='left-attn')
self.right_attn = layers.Dense(self.cell_size, name='right-attn')
self.lattn_dist = layers.TimeDistributed(self.left_attn, name='lattn-dist')
self.rattn_dist = layers.TimeDistributed(self.right_attn, name='rattn-tdist')
# binary classification layer
self.reduce_layer = layers.Dense(1, activation='relu', name='final-reduce-layer')
def build_trainable_model(self, neg_sample_size=4, weights=None):
"""Compiles the trainable model.
Negative sample size = how many negative candidates this model will
be trained on in addition to the gold candidate.
"""
k = neg_sample_size + 1 # candidate count
inputs = []
ent_in = layers.Input(shape=(k,), dtype='int32', name='ent_in')
inputs.append(ent_in)
l_words_in = layers.Input(shape=(20,), dtype='int32',
name='lwords_in')
r_words_in = layers.Input(shape=(20,), dtype='int32',
name='rwords_in')
inputs += [l_words_in, r_words_in]
with tf.device('/cpu:0'):
lctx_emb, rctx_emb = self.get_word_embs(l_words_in, r_words_in)
if self._config['features']:
feats_in = layers.Input(shape=(k, self._feature_size,),
dtype='float32',
name='feats_in')
inputs.append(feats_in)
if self._config['character_cnn']:
l_ch_in = layers.Input(shape=(20, MAX_CTX_CHAR_LENGTH,),
dtype='int32', name='lchars_in')
r_ch_in = layers.Input(shape=(20, MAX_CTX_CHAR_LENGTH,),
dtype='int32', name='rchars_in')
title_ch_in = layers.Input(shape=(k, MAX_TITLE_CHAR_LNGTH,),
dtype='int32',
name='titlechars_in')
inputs += [l_ch_in, r_ch_in, title_ch_in]
with tf.device('/cpu:0'):
lch_emb, rch_emb = self.get_ch_embs(l_ch_in, r_ch_in)
l_filters, r_filters = self.filter_chars(lch_emb, rch_emb)
else:
l_filters, r_filters = (None, None)
l_rnn_out = self.build_seq_output(lctx_emb, 'left', l_filters)
r_rnn_out = self.build_seq_output(rctx_emb, 'right', r_filters)
l_out = l_rnn_out
r_out = r_rnn_out
outs = []
for i in range(k):
ent_sl = layers.Lambda(lambda x: x[:, i])(ent_in)
ent_emb = self.get_ent_emb(ent_sl)
if self._config['character_cnn']:
title_ch_sl = layers.Lambda(lambda x: x[:, i, :])(title_ch_in)
tch_emb = self.get_title_ch_emb(title_ch_sl)
title_filters = self.filter_title(tch_emb)
ent_emb = layers.Concatenate(axis=1)([ent_emb, title_filters])
ent_emb = self.ent_reduc(ent_emb)
else:
title_filters = None
if self._config['features']:
feat_sl = layers.Lambda(lambda x: x[:, i, :])(feats_in)
feats = layers.Reshape((self._feature_size,))(feat_sl)
else:
feats = None
l_out, _ = build_attn_with_layer(l_rnn_out, ent_emb,
self.lattn_dist,
self.cell_size)
r_out, _ = build_attn_with_layer(r_rnn_out, ent_emb,
self.rattn_dist,
self.cell_size)
out = self.compare_and_score(l_out, r_out, ent_emb, feats)
outs.append(out)
f_out_layer = layers.Concatenate(name='concat_output')(outs)
probs = layers.Activation(K.softmax)(f_out_layer)
model = Model(inputs=inputs, outputs=probs)
model.compile(optimizer=tf.train.AdagradOptimizer(self._config['lr']),
loss='categorical_crossentropy')
if weights is not None:
model.load_weights(weights, by_name=True)
return model
def build_f(self, weights=None):
""" Builds f, the single-candidate scoring function.
This function is used for inference.
It's very similar to the previous method, in fact I'm not sure what
the difference is.
"""
# word and entity input
inputs = []
ent_in = layers.Input(shape=(1,), dtype='int32', name='ent_in')
inputs.append(ent_in)
l_words_in = layers.Input(shape=(20,), dtype='int32',
name='lwords_in')
r_words_in = layers.Input(shape=(20,), dtype='int32',
name='rwords_in')
inputs += [l_words_in, r_words_in]
with tf.device('/cpu:0'):
lctx_emb, rctx_emb = self.get_word_embs(l_words_in, r_words_in)
ent_emb = self.get_ent_emb(ent_in)
# feature input
if self._config['features']:
feats_in = layers.Input(shape=(1, self._feature_size,),
dtype='float32', name='feats_in')
inputs.append(feats_in)
feats = layers.Reshape((self._feature_size,))(feats_in)
else:
feats = None
# character level input
if self._config['character_cnn']:
l_ch_in = layers.Input(shape=(20, MAX_CTX_CHAR_LENGTH,),
dtype='int32',
name='lchars_in')
r_ch_in = layers.Input(shape=(20, MAX_CTX_CHAR_LENGTH,),
dtype='int32',
name='rchars_in')
title_ch_in = layers.Input(shape=(MAX_TITLE_CHAR_LNGTH,),
dtype='int32',
name='titlechars_in')
inputs += [l_ch_in, r_ch_in, title_ch_in]
with tf.device('/cpu:0'):
lch_emb, rch_emb = self.get_ch_embs(l_ch_in, r_ch_in)
tch_emb = self.get_title_ch_emb(title_ch_in)
l_filters, r_filters = self.filter_chars(lch_emb, rch_emb)
title_filters = self.filter_title(tch_emb)
ent_emb = layers.Concatenate(axis=1)([ent_emb, title_filters])
ent_emb = self.ent_reduc(ent_emb)
else:
l_filters, r_filters = (None, None)
title_filters = None
l_rnn_out = self.build_seq_output(lctx_emb, 'left', l_filters)
r_rnn_out = self.build_seq_output(rctx_emb, 'right', r_filters)
l_rnn_out, _ = build_attn_with_layer(l_rnn_out, ent_emb,
self.lattn_dist,
cell_size=self.cell_size)
r_rnn_out, _ = build_attn_with_layer(r_rnn_out, ent_emb,
self.rattn_dist,
cell_size=self.cell_size)
out = self.compare_and_score(l_rnn_out, r_rnn_out, ent_emb, feats)
model = Model(inputs=inputs, outputs=out)
if weights is not None:
model.load_weights(weights, by_name=True)
return model
def get_word_embs(self, l_words, r_words):
""" get context word embeddings """
l_emb = self.emb_word(l_words)
r_emb = self.emb_word(r_words)
return l_emb, r_emb
def get_ch_embs(self, l_chs, r_chs):
""" get context character embeddings """
lch_emb = self.emb_ch(l_chs)
rch_emb = self.emb_ch(r_chs)
return lch_emb, rch_emb
def get_ent_emb(self, ent):
""" get both title-character and entity embeddings """
ent_emb = self.emb_ent(ent)
ent_emb = layers.Reshape((300,))(ent_emb)
return ent_emb
def get_title_ch_emb(self, title_chs):
""" gets the character embedding for a title """
tch_emb = self.emb_ch(title_chs)
return tch_emb
def build_seq_output(self, words, side, ch_filters=None):
""" builds a sequence output. concatenates CNN filters to GRU inputs
and feeds that to a GRU. Will output a sequence if attention is enabled
"""
if side == 'left':
rnn = self.left_rnn
elif side == 'right':
rnn = self.right_rnn
if ch_filters is not None:
words = layers.Concatenate(axis=2)([words, ch_filters])
rnn_out = rnn(words)
return rnn_out
def filter_chars(self, left_chs, right_chs):
""" builds an array of character cnn filters (max-pooled). one set of
filters for each word, and then returns it as an array """
l_filter_list = []
r_filter_list = []
# pylint: disable=cell-var-from-loop
for i in range(0, 20):
left_slice = layers.Lambda(lambda x: x[:, i, :])(left_chs)
right_slice = layers.Lambda(lambda x: x[:, i, :])(right_chs)
filters = self.ch_ctx_cnn(left_slice)
pooled_filters = self.ch_ctx_pool(filters)
l_filter_list.append(pooled_filters)
filters = self.ch_ctx_cnn(right_slice)
pooled_filters = self.ch_ctx_pool(filters)
r_filter_list.append(pooled_filters)
l_filters = layers.Concatenate(axis=1)(l_filter_list)
r_filters = layers.Concatenate(axis=1)(r_filter_list)
return l_filters, r_filters
def filter_title(self, title_chs):
""" computes CNN max-pooled filters over title characters (title
characters should be on long sequence """
filters = self.ch_title_cnn(title_chs)
pool = self.ch_title_pool(filters)
pool = layers.Reshape((100,))(pool)
return pool
def compare_and_score(self, left, right, ent, feats):
""" Final layer of the compiled model
Concatenates several comparisons between the vectors of left and right
contexts and the entity vector.
Final dense layer takes all of these comparisons, and the final feature
vector, and outputs a binary prediction.
"""
comparisons = []
left_dot = layers.Dot(axes=1, normalize=True)([left, ent])
right_dot = layers.Dot(axes=1, normalize=True)([right, ent])
comparisons += [left_dot, right_dot]
left_diff = layers.Subtract()([left, ent])
right_diff = layers.Subtract()([right, ent])
comparisons += [left_diff, right_diff]
left_diff_sq = layers.Multiply()([left_diff, left_diff])
right_diff_sq = layers.Multiply()([right_diff, right_diff])
comparisons += [left_diff_sq, right_diff_sq]
left_mult = layers.Multiply()([left, ent])
right_mult = layers.Multiply()([right, ent])
comparisons += [left_mult, right_mult]
if feats is not None:
comparisons.append(feats)
comparisons_concat = layers.Concatenate(axis=1)(comparisons)
out = self.reduce_layer(comparisons_concat)
return out
| [
37811,
1398,
543,
12188,
749,
286,
674,
4096,
4981,
37227,
198,
198,
11748,
41927,
292,
13,
75,
6962,
355,
11685,
198,
11748,
41927,
292,
13,
1891,
437,
355,
509,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
198,
11748,
11192,
273,
1... | 1.90661 | 8,245 |
from random import *
#######
#Task 1a#
#######
#Question 1
def new_game(n):
"""Returns a nxn matrix with all entries of zeros
Purpose: To initiate the 2048 game!
>>> new_game(1)
[[0]]
>>> new_game(2)
[[0, 0], [0, 0]]
>>> new_game(3)
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]
"""
"***YOUR CODE HERE***"
###########
# Task 1b #
###########
#Question 2
def add_two_fixed(mat):
"""Mutate and Return a modified matrix 'mat' with the number 2 added to
the matrix. It is important that you must mutate the input mat.
Purpose: After each move, a block '2' must be added to continue the game.
IMPORTANT NOTE: Although the original 2048 game adds the number 2 randomly,
it is impossible to check the accuracy of the code for randomness. So I
would suggest adding the number 2 as you encounter the smallest row with
a zero entry. Check the test cases to make more sense.
IMPORTANT NOTE 2: DO MODIFY THE INPUT MAT
>>> add_two_fixed([[0]])
[[2]]
>>> add_two_fixed([[2, 2, 2], [2, 2, 2], [2, 2, 0]])
[[2, 2, 2], [2, 2, 2], [2, 2, 2]]
>>> add_two_fixed([[2, 0, 2], [0, 2, 2], [2, 2, 2]])
[[2, 2, 2], [0, 2, 2], [2, 2, 2]]
>>> add_two_fixed([[0, 0], [0, 0]])
[[2, 0], [0, 0]]
"""
"***YOUR CODE HERE***"
#The function add_two will be the one that will be used for the puzzle.py demonstration.
###########
# Task 1c #
###########
#Question 3
def game_state(mat):
"""Return either 'win', 'not over', or 'lose' based on the matrix given.
The description of the condition are as followed:
'win': If you have at least 1 entry with 2048, you will return 'win'
'not over': 1. If there exists a same number on subsequent rows or columns, you will return 'not over'
2. If there exists a zero entry, you will return 'not over'
'lose': If either 'win' or 'not over' conditions are not satisfied, you will return 'lose'
Check the test cases to make more sense
Purpose: After each move, the game can decide whether you've finished the game or not.
>>> game_state([[0, 0, 0, 0],[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 2048]])
'win'
>>> game_state([[2, 4, 2, 4],[4, 2, 2048, 2], [4, 2, 2, 2], [4, 4, 2, 4]])
'win'
>>> game_state([[2, 4, 2, 4], [4, 2, 4, 2], [2, 4, 2, 4], [4, 2, 8, 8]])
'not over'
>>> game_state([[2, 4, 2, 4], [4, 0, 4, 2], [2, 4, 2, 4], [4, 2, 4, 2]])
'not over'
>>> game_state([[2, 4, 2, 4], [4, 2, 4, 2], [2, 4, 2, 8], [4, 2, 4, 8]])
'not over'
>>> game_state([[2, 4, 2, 4], [4, 2, 4, 2], [2, 4, 2, 4], [4, 2, 4, 2]])
'lose'
"""
"***YOUR CODE HERE***"
###########
# Task 2a #
###########
def reverse(mat):
"""Return a new matrix where each row is flipped in reverse.
Purpose: Based on your movements, (up, down, left, right), We will be using
reverse and transpose functions so that we could unify the merge and cover_up
functions later on. For better understanding please check the up, down, left,
and right implementation on the very bottom of logic.py
IMPORTANT NOTE: DO NOT MODIFY THE INPUT MAT
>>> reverse([[3, 2, 1], [6, 5, 4], [9, 8 ,7]])
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> reverse([[1, 0], [0, 2]])
[[0, 1], [2, 0]]
"""
"***YOUR CODE HERE***"
###########
# Task 2b #
###########
def transpose(mat):
""" Return a new matrix, which is a transpose of the input mat.
Purpose: same as reverse
IMPORTANT NOTE: DO NOT MODIFY THE INPUT MAT
>>> transpose([[1, 3], [2, 4]])
[[1, 2], [3, 4]]
>>> transpose([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
>>> transpose([[1, 4, 7], [2, 5, 8], [3, 6, 9]])
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
"""
"***YOUR CODE HERE***"
##########
# Task 3 #
##########
def cover_up(mat):
"""Return a tuple of a matrix and a boolean. For a matrix, you will push
all the entries to the left. For a boolean, you will put True if at least
one number is pushed. A tuple is a sequence of immutable Python objects,
use round parentheses: (1, 2) or ([1], True)
Purpose: Based on the user input, the matrix will change its entries by
pushing and merging. You will implement the pushing part here. By having
the boolean, you can decide whether the user input does nothing or something.
IMPORTANT NOTE: DO NOT MODIFY THE INPUT MAT
>>> cover_up([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], False)
>>> cover_up([[2, 4, 2, 4], [4, 2, 4, 2], [2, 4, 2, 4], [4, 2, 4, 0]])
([[2, 4, 2, 4], [4, 2, 4, 2], [2, 4, 2, 4], [4, 2, 4, 0]], False)
>>> cover_up([[2, 0, 4, 0], [2, 0, 2, 0], [0, 4, 0, 8], [2, 0, 8, 0]])
([[2, 4, 0, 0], [2, 2, 0, 0], [4, 8, 0, 0], [2, 8, 0, 0]], True)
>>> cover_up([[2, 0, 2, 0], [0, 0, 0, 16], [2, 4, 0, 0], [0, 0, 16, 0]])
([[2, 2, 0, 0], [16, 0, 0, 0], [2, 4, 0, 0], [16, 0, 0, 0]], True)
"""
"***YOUR CODE HERE***"
def merge(mat):
"""Return a tuple of a matrix and a boolean. For a matrix, you will merge
the numbers that are next to each other and place it on the left. For a boolean,
you will put True if at least one number is merged.
Purpose: Similar to cover_up, you will implement the merging part here.
IMPORTANT NOTE: DO MODIFY THE INPUT MAT
>>> merge([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], False)
>>> merge([[2, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
([[4, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], True)
>>> merge([[2, 2, 2, 2], [4, 4, 4, 2], [2, 0, 2, 0], [2, 0, 0, 2]])
([[4, 0, 4, 0], [8, 0, 4, 2], [2, 0, 2, 0], [2, 0, 0, 2]], True)
>>> merge([[2, 4, 0, 0], [2, 0, 0, 0], [4, 2, 0, 0], [2, 4, 2, 0]])
([[2, 4, 0, 0], [2, 0, 0, 0], [4, 2, 0, 0], [2, 4, 2, 0]], False)
"""
"***YOUR CODE HERE***"
#I didn't have time to create test cases for questions below, so I've provided
#the codes below. Please understand the purpose of the functions below.
| [
198,
6738,
4738,
1330,
1635,
198,
198,
4242,
21017,
198,
2,
25714,
352,
64,
2,
198,
4242,
21017,
198,
198,
2,
24361,
352,
198,
4299,
649,
62,
6057,
7,
77,
2599,
198,
220,
220,
220,
37227,
35561,
257,
299,
87,
77,
17593,
351,
477,
... | 2.334599 | 2,633 |
# Zen Tetris - Tetris clone with two-player battle mode
# Uses Python arcade library: https://arcade.academy/index.html
# Background images are from https://pixabay.com/
# Sound data are from 魔王魂 at https://maoudamashii.jokersounds.com/
import arcade
from arcade import Matrix3x3
import random
import os
import timeit
WIDTH = 800 # window width in pixel
HEIGHT = 600 # window height in pixel
ASPECT = 1 # background image aspect ratio
SPRITE_SCALING = 0.7
PLWIDTH = 10 # game area width in number of blocks
PLHEIGHT = 20 # game area height in number of blocks
PLLEFT = 320 # game area left edge location within window in pixel
PLBOTTOM = 80 # game area top edge location within window in pixel
PLLEFT1 = 120 # PLLEFT for player 1 in two-player game mode
PLLEFT2 = 480 # PLLEFT for player 2 in two-player game mode
# Tetris shape colors
BLUE = 1
RED = 2
PURPLE = 3
GREEN = 4
AQUA = 5
YELLOW = 6
ORANGE = 7
GRAY = 8 # for wall/frame
# Show game title
class PauseView(arcade.View):
"""Switch from/to GameView temporalily"""
class GameView(arcade.View):
"""Tetris game main"""
class Player():
"""Represents a player to achieve two-player game battle"""
def can_move(self):
"""Check if shape can be located in current x, y, rotation count"""
for pos in self.game_view.tetris_shapes[self.shape][1][self.shape_cnt]:
x = int(self.x + pos % 4)
y = int(self.y - pos // 4)
if x < 0 or x >= PLWIDTH:
return False
if y < 0:
return False
if self.game_area[y][x] != 0:
return False
return True
def animation(self):
"""Delete animation"""
# Animate to-be-deleted lines before actually delete them
if self.delete_animation_counter <= 0:
self.block_changed = True
self.delete_animation_counter = 0.1 # 0.1 sec/frame
self.delete_animation_index += 1
if self.delete_animation_index > 7:
# Animation done
self.delete_animation = False
self.generate_tetris = True
self.delete_animation_index = 0
self.delete_counter += len(self.delete_animation_lines)
# delete 4 lines -> level up
if self.delete_counter >= 4:
self.delete_counter = 0
if self.level < len(self.game_view.fall_counter_init):
self.level += 1
arcade.play_sound(self.game_view.levelup_sound)
self.score += 10 * (2**(len(self.delete_animation_lines)-1))
# Attack the other player
if self.game_view.window.game_mode == 1:
for player in self.game_view.players:
if self != player:
player.damage_lines = \
len(self.delete_animation_lines) - 1
# Delete lines and append new lines
for y in self.delete_animation_lines:
del self.game_area[y]
area_line = []
for x in range(0, PLWIDTH):
area_line.append(0)
self.game_area.append(area_line)
self.delete_animation_lines = []
arcade.play_sound(self.game_view.delete_sound)
def shape_move(self):
"""Move player shape based on key input"""
if self.up_pressed:
self.player_moved = True
prev_cnt = self.shape_cnt
self.shape_cnt += 1
if self.shape_cnt >= \
len(self.game_view.tetris_shapes[self.shape][1]):
self.shape_cnt = 0
if not self.can_move():
self.shape_cnt = prev_cnt
self.up_pressed = False
if self.left_pressed:
self.player_moved = True
prev_x = self.x
self.x -= 1
if not self.can_move():
self.x = prev_x
self.left_pressed = False
if self.right_pressed:
self.player_moved = True
prev_x = self.x
self.x += 1
if not self.can_move():
self.x = prev_x
self.right_pressed = False
if self.down_pressed:
self.player_moved = True
self.fall_flag = True
self.down_pressed = False
def shape_fall(self):
"""Drop player shape one line or reach the bottom"""
if self.fall_counter <= 0 or self.fall_flag is True:
self.fall_counter = self.game_view.fall_counter_init[self.level]
self.player_moved = True
if self.generate_tetris is True:
# Generate new shape at top of game area
self.shape = random.randint(0, 6)
self.shape_cnt = 0
self.x = PLWIDTH/2 - 2
self.y = PLHEIGHT - 1
self.fall_flag = False
self.generate_tetris = False
# Gameover check
if not self.can_move():
self.game_over = True
self.gameover_counter = 0
self.player_moved = True
arcade.play_sound(self.game_view.gameover_sound)
else:
# Fall one line
prev_y = self.y
self.y -= 1
self.score += 2
if not self.can_move():
# Stuck at bottom and can't move anymore
arcade.play_sound(self.game_view.bottom_sound)
self.y = prev_y
color = self.game_view.tetris_shapes[self.shape][0]
for pos in (self.game_view.tetris_shapes
[self.shape][1][self.shape_cnt]):
x = int(self.x + pos % 4)
y = int(self.y - pos // 4)
self.game_area[y][x] = color
# Delete line check (and delete)
for y in range(0, PLHEIGHT):
if 0 not in self.game_area[PLHEIGHT-y-1]:
# Start delete animation
self.delete_animation = True
self.delete_animation_counter = 0.1 # 0.1 sec
self.delete_animation_index = 1
self.delete_animation_lines.append(PLHEIGHT-y-1)
if self.delete_animation is False:
# No delete line
self.generate_tetris = True
def player_attacked(self):
"""The other player deleted two or more lines and incurred
additional lines to me"""
for i in range(self.damage_lines):
del self.game_area[PLHEIGHT-1]
area_line = []
for x in range(PLWIDTH):
if random.randint(0, 99) < 50: # 50%
area_line.append(0)
else:
area_line.append(GRAY)
self.game_area.insert(0, area_line)
if self.damage_lines != 0:
arcade.play_sound(self.game_view.attacked_sound)
self.damage_lines = 0
def player_game_over(self):
"""Change block color to GRAY from bottom to top"""
if self.gameover_counter == 0:
# Move player sprites to block_list
# Without this, player sprites doesn't turn to GRAY
for sprite in self.player_list:
self.block_list.append(sprite)
self.player_list = arcade.SpriteList()
elif self.gameover_counter >= PLHEIGHT:
return
for x in range(PLWIDTH):
if self.game_area[self.gameover_counter][x] != 0:
self.game_area[self.gameover_counter][x] = GRAY
self.gameover_counter += 1
self.block_changed = True
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
if __name__ == "__main__":
main()
| [
2,
14760,
27351,
2442,
532,
27351,
2442,
17271,
351,
734,
12,
7829,
3344,
4235,
198,
2,
36965,
11361,
27210,
5888,
25,
3740,
1378,
5605,
671,
13,
330,
324,
3065,
14,
9630,
13,
6494,
198,
2,
25353,
4263,
389,
422,
3740,
1378,
79,
844... | 1.929879 | 4,207 |
import unittest
from src.response_getter import ResponseGetter
from unittest.mock import MagicMock
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
12351,
13,
26209,
62,
1136,
353,
1330,
18261,
3855,
353,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
... | 2.865385 | 52 |
from errant.en.classifier import conts
conts_expanded = {
"would": "'d",
"will": "'ll",
"am": "'m",
"not": "n't",
"are": "'re",
"is": "'s",
"has": "'s",
"have": "'ve",
}
# Input: An Edit object
# Output: Whether the Edit adds apostrophe to a contraction token
# e.g. nt -> n't
# its -> it 's
# Input: An Edit object
# Output: Whether the Edit decomposes a contraction token without apostrophe
# e.g. its -> it is
# Input: An Edit object
# Output: The same Edit object with an updated importance level
# 1: Trivial: punctuations (except apostrophe), casing
# 2: Moderate: informal words (abbreviations), apostrophe for contraction
# 3: Major: grammatically incorrect
| [
6738,
1931,
5250,
13,
268,
13,
4871,
7483,
1330,
542,
82,
198,
198,
3642,
82,
62,
11201,
12249,
796,
1391,
198,
220,
220,
220,
366,
19188,
1298,
24018,
67,
1600,
198,
220,
220,
220,
366,
10594,
1298,
24018,
297,
1600,
198,
220,
220,... | 2.873984 | 246 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
class SimpleCache(models.Model):
"""Not a database model, used to set names and permissions for the dashboard."""
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
628,
198,
4871,
17427,
30562,
7,
27530,
13,
17633,
2599,
628,
220,
220,
220,
37227,
3673,
257,
6... | 3.678571 | 56 |
#!/usr/bin/env python
"""Tests for `func_adl_spark` package."""
import unittest
from func_adl_spark import func_adl_spark
import ast, qastle, astpretty, ast_scope
from func_adl_spark.spark_translation import (
generate_python_source,
python_ast_to_python_source,
)
from func_adl_spark.transformers import (
LambdaSelectVisitor,
RebindLambdaToSparkDFTransformer,
)
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
51,
3558,
329,
4600,
20786,
62,
324,
75,
62,
2777,
668,
63,
5301,
526,
15931,
628,
198,
11748,
555,
715,
395,
198,
198,
6738,
25439,
62,
324,
75,
62,
2777,
668,
1330,
2... | 2.514451 | 173 |
import re | [
11748,
302
] | 4.5 | 2 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 30 16:28:49 2020
@author: logical
"""
import os
import waveClass
class ParaviewClass:
"""
Parameters
----------
t : int
the current time
X : list
(m x n) matrix of X coordinates at which to calculate
the wave elevation
Y : list
(m x n) matrix of Y coordinates at which to calculate
the wave elevation.
Returns
-------
Z - (m x n) matrix of Z coordinates of the wave elevation
"""
if (self.wType == 'noWave') and (self.wType == 'noWaveCIC') and (self.wType == 'etaImport'):
Z = np.zeros((np.size(X),np.size(X)))
elif (self.wType == 'regular') and (self.wType == 'regularCIC'):
Xt = X*np.cos(self.waveDir*np.pi/180) + Y*np.sin(self.waveDir*np.pi/180)
Z = self.A*np.cos(-1*self.k*Xt + self.wF*t)
elif (self.wType == 'irregular') and (self.wType == 'spectrumImport'):
Z = np.zeros((np.size(X),np.size(X)))
Xt = X*np.cos(self.waveDir*np.pi/180) + Y*np.sin(self.waveDir*np.pi/180)
for iw in range(1,len(self.wF)+1):
Z = Z + np.sqrt(self.wA(iw)*self.dw(iw))*np.cos(-1*self.k(iw)*Xt + self.wF(iw)*t + self.phase(iw))
return(Z)
def write_paraview_vtp_wave(self, t, numPointsX, numPointsY, domainSize, model, simdate, mooring):
"""
Write vtp files for visualization using Paraview
Parameters
----------
t : TYPE
DESCRIPTION.
numPointsX : TYPE
DESCRIPTION.
numPointsY : TYPE
DESCRIPTION.
domainSize : TYPE
DESCRIPTION.
model : TYPE
DESCRIPTION.
simdate : TYPE
DESCRIPTION.
mooring : TYPE
DESCRIPTION.
Returns
-------
None.
"""
#ground plane
hDir = os.getcwd()
os.mkdir('vtk')
vDir = os.path.join(hDir, 'vtk')
os.chdir(vDir)
filename = 'ground.txt'
fid = open(filename,"w+")
fid.write(str(domainSize) + "\n")
fid.write(str(self.waterDepth) + "\n")
fid.write(str(mooring) + "\n")
fid.close()
#wave
x = np.linspace(-domainSize, domainSize, numPointsX)
y = np.linspace(-domainSize, domainSize, numPointsY)
[X,Y] = np.meshgrid(x,y)
lx = np.size(x,1)
ly = np.size(y,1)
numVertex = lx * ly
numFace = (lx-1) * (ly-1)
for it in range(0,len(t)):
#open file
os.mkdir('waves')
wDir = os.path.join(hDir, 'waves')
os.chdir(wDir)
filenames = 'waves_'+str(it)+'.vtp'
fids = open(filenames,"w+")
# calculate wave elevation
Z = self.waveElevationGrid(t[it], X, Y)
# write header
fids.write('<?xml version="1.0"?>\n')
fids.write('<!-- WEC-Sim Visualization using ParaView -->\n')
fids.write('<!-- model: ' , model , ' - ran on ' , simdate , ' -->\n')
fids.write('<!-- wave: ' , self.wType ,' -->\n')
fids.write('<!-- time: ' , str(t(it)) , ' -->\n')
fids.write('<VTKFile type="PolyData" version="0.1">\n')
fids.write(' <PolyData>\n')
# write wave info
fids.write(' <Piece NumberOfPoints="' + str(numVertex) + '" NumberOfPolys="' + str(numFace) + '">\n')
# write points
fids.write(' <Points>\n')
fids.write(' <DataArray type="Float32" NumberOfComponents="3" format="ascii">\n')
for jj in range(1,len(y)+1):
for ii in range(1,len(x)+1):
pt = [X[jj][ii], Y[jj][ii], Z[jj][ii]]
"""
if not work try np.array on X,Y,Z
"""
fids.write(' {:5.5f} {:5.5f} {:5.5f}\n'.format(pt[0],pt[1],pt[2]))
fids.write(' </DataArray>\n')
fids.write(' </Points>\n')
# write squares connectivity
fids.write(' <Polys>\n')
fids.write(' <DataArray type="Int32" Name="connectivity" format="ascii">\n')
for jj in range(1,ly):
for ii in range(1,lx):
p1 = (jj-1)*lx + (ii-1)
p2 = p1+1
p3 = p2 + lx
p4 = p1 + lx
fids.write(' {} {} {} {}\n'.format(p1,p2,p3,p4))
fids.write(' </DataArray>\n')
fids.write(' <DataArray type="Int32" Name="offsets" format="ascii">\n')
fids.write(' ')
for ii in range(1,numFace+1):
n = ii * 4
fids.write(' {}'.format(n))
fids.write('\n')
fids.write(' </DataArray>\n')
fids.write(' </Polys>\n')
# end file
fids.write(' </Piece>\n')
fids.write(' </PolyData>\n')
fids.write('</VTKFile>')
#close file
fids.close()
% calculate new position
pos = pos_all(it,:)
vertex_mod = self.rotateXYZ(vertex,[1 0 0],pos(4))
vertex_mod = self.rotateXYZ(vertex_mod,[0 1 0],pos(5))
vertex_mod = self.rotateXYZ(vertex_mod,[0 0 1],pos(6))
vertex_mod = self.offsetXYZ(vertex_mod,pos(1:3))
% open file
filename = ['vtk' filesep 'body' num2str(self.bodyNumber) '_' bodyname filesep bodyname '_' num2str(it) '.vtp']
fid = fopen(filename, 'w')
% write header
fprintf(fid, '<?xml version="1.0"?>\n')
fprintf(fid, ['<!-- WEC-Sim Visualization using ParaView -->\n'])
fprintf(fid, ['<!-- model: ' model ' - ran on ' simdate ' -->\n'])
fprintf(fid, ['<!-- body: ' bodyname ' -->\n'])
fprintf(fid, ['<!-- time: ' num2str(t(it)) ' -->\n'])
fprintf(fid, '<VTKFile type="PolyData" version="0.1">\n')
fprintf(fid, ' <PolyData>\n')
% write body info
fprintf(fid,[' <Piece NumberOfPoints="' num2str(numVertex) '" NumberOfPolys="' num2str(numFace) '">\n'])
% write points
fprintf(fid,' <Points>\n')
fprintf(fid,' <DataArray type="Float32" NumberOfComponents="3" format="ascii">\n')
for ii = 1:numVertex
fprintf(fid, ' %5.5f %5.5f %5.5f\n', vertex_mod(ii,:))
clear vertex_mod
fprintf(fid,' </DataArray>\n')
fprintf(fid,' </Points>\n')
% write tirangles connectivity
fprintf(fid,' <Polys>\n')
fprintf(fid,' <DataArray type="Int32" Name="connectivity" format="ascii">\n')
for ii = 1:numFace
fprintf(fid, ' %i %i %i\n', face(ii,:)-1)
fprintf(fid,' </DataArray>\n')
fprintf(fid,' <DataArray type="Int32" Name="offsets" format="ascii">\n')
fprintf(fid, ' ')
for ii = 1:numFace
n = ii * 3
fprintf(fid, ' %i', n)
fprintf(fid, '\n')
fprintf(fid,' </DataArray>\n')
fprintf(fid, ' </Polys>\n')
% write cell data
fprintf(fid,' <CellData>\n')
% Cell Areas
fprintf(fid,' <DataArray type="Float32" Name="Cell Area" NumberOfComponents="1" format="ascii">\n')
for ii = 1:numFace
fprintf(fid, ' %i', cellareas(ii))
fprintf(fid, '\n')
fprintf(fid,' </DataArray>\n')
% Hydrostatic Pressure
if ~isempty(hspressure)
fprintf(fid,' <DataArray type="Float32" Name="Hydrostatic Pressure" NumberOfComponents="1" format="ascii">\n')
for ii = 1:numFace
fprintf(fid, ' %i', hspressure.signals.values(it,ii))
fprintf(fid, '\n')
fprintf(fid,' </DataArray>\n')
% Non-Linear Froude-Krylov Wave Pressure
if ~isempty(wavenonlinearpressure)
fprintf(fid,' <DataArray type="Float32" Name="Wave Pressure NonLinear" NumberOfComponents="1" format="ascii">\n')
for ii = 1:numFace
fprintf(fid, ' %i', wavenonlinearpressure.signals.values(it,ii))
fprintf(fid, '\n')
fprintf(fid,' </DataArray>\n')
% Linear Froude-Krylov Wave Pressure
if ~isempty(wavelinearpressure)
fprintf(fid,' <DataArray type="Float32" Name="Wave Pressure Linear" NumberOfComponents="1" format="ascii">\n')
for ii = 1:numFace
fprintf(fid, ' %i', wavelinearpressure.signals.values(it,ii))
fprintf(fid, '\n')
fprintf(fid,' </DataArray>\n')
fprintf(fid,' </CellData>\n')
% end file
fprintf(fid, ' </Piece>\n')
fprintf(fid, ' </PolyData>\n')
fprintf(fid, '</VTKFile>')
% close file
fclose(fid)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
1737,
1542,
1467,
25,
2078,
25,
2920,
12131,
198,
198,
31,
9800,
25,
12219,
198,
37... | 1.737414 | 5,522 |
from enum import Enum
| [
6738,
33829,
1330,
2039,
388,
628
] | 3.833333 | 6 |
import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq import testing_utils
class DummyTrainableQuantizer(tf.keras.layers.Layer):
"""Used to test whether we can set layers as quantizers without any throws."""
class TestCommonFunctionality:
"""Test functionality common to all quantizers, like serialization and usage."""
@pytest.mark.parametrize("module", [lq.quantizers, tf.keras.activations])
@pytest.mark.parametrize(
"name",
["ste_sign", "approx_sign", "magnitude_aware_sign", "swish_sign", "ste_tern"],
)
@pytest.mark.parametrize(
"ref_fn",
[
lq.quantizers.SteSign(),
lq.quantizers.SteHeaviside(),
lq.quantizers.MagnitudeAwareSign(),
lq.quantizers.SwishSign(),
lq.quantizers.SteTern(),
],
)
@pytest.mark.parametrize("quantizer", ["input_quantizer", "kernel_quantizer"])
def test_layer_as_quantizer(self, quantizer, keras_should_run_eagerly):
"""Test whether a keras.layers.Layer can be used as quantizer."""
input_data = testing_utils.random_input((1, 10))
model = tf.keras.Sequential(
[lq.layers.QuantDense(1, **{quantizer: DummyTrainableQuantizer()})]
)
model.compile(optimizer="sgd", loss="mse", run_eagerly=keras_should_run_eagerly)
model.fit(input_data, np.ones((1,)), epochs=1)
assert any(["dummy_weight" in var.name for var in model.trainable_variables])
class TestQuantization:
"""Test binarization and ternarization."""
@pytest.mark.parametrize(
"fn",
[
"ste_sign",
lq.quantizers.SteSign(),
"approx_sign",
"swish_sign",
lq.quantizers.SwishSign(),
],
)
@pytest.mark.parametrize("fn", ["ste_heaviside", lq.quantizers.SteHeaviside()])
@pytest.mark.parametrize(
"fn",
[
"ste_tern",
lq.quantizers.SteTern(),
lq.quantizers.SteTern(ternary_weight_networks=True),
lq.quantizers.SteTern(threshold_value=np.random.uniform(0.01, 0.8)),
],
)
@pytest.mark.parametrize("fn", ["ste_tern", lq.quantizers.SteTern()])
@pytest.mark.parametrize(
"fn", [lq.quantizers.dorefa_quantizer, lq.quantizers.DoReFaQuantizer(2)]
)
class TestGradients:
"""Test gradients for different quantizers."""
@pytest.mark.parametrize(
"fn",
[lq.quantizers.ste_sign, lq.quantizers.ste_tern, lq.quantizers.ste_heaviside],
)
@pytest.mark.parametrize(
"fn",
[lq.quantizers.ste_sign, lq.quantizers.ste_tern, lq.quantizers.ste_heaviside],
)
# Test with and without default threshold
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
11748,
26371,
80,
355,
300,
80,
198,
6738,
26371,
80,
1330,
4856,
62,
26791,
628,
198,
4871,
360,
13513,
44077,
540,
24915,
750... | 2.161063 | 1,279 |
import json
from aiohttp import ClientTimeout, ClientSession, UnixConnector, client_exceptions
import asyncio
from datetime import datetime
from os import environ
import time
import requests
MAX_CONCURRENT_REQUEST = 5
TOTAL_REQUEST_COUNT = 10000
WARMUP_REQUEST_COUNT = 100
WARMUP_LAST_SLEEP_DURATION = 30
WEBLOG_URL = environ["WEBLOG_URL"] if "WEBLOG_URL" in environ else "http://weblog:7777"
LOG_FOLDER = environ["LOG_FOLDER"] if "LOG_FOLDER" in environ else "/app/logs"
TESTED_PATHS = ("/", "/waf/", "/waf/fdsfds/fds/fds/fds/", "/waf?a=b", "/waf?acd=bcd", "/waf?a=b&a=c")
# TOTAL_REQUEST_COUNT = 100
# WARMUP_REQUEST_COUNT = 1
# WARMUP_LAST_SLEEP_DURATION = 1
# WEBLOG_URL="http://localhost:7777"
# LOG_FOLDER="logs"
Runner().run()
| [
11748,
33918,
198,
6738,
257,
952,
4023,
1330,
20985,
48031,
11,
20985,
36044,
11,
33501,
34525,
11,
5456,
62,
1069,
11755,
198,
11748,
30351,
952,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
28686,
1330,
551,
2268,
198,
11748,
... | 2.391586 | 309 |
# <TextAddressRange></TextAddressRange>
# <DataAddressRange></DataAddressRange>
import argparse
import yaml
from . import iar_link
from . import gcc_link
from . import keil_link
# Linker file locations.
IAR_LD_SCRIPT = "iar/linker_script.icf"
KEIL_LD_SCRIPT = "keil/linker_script.sct"
KEIL_DEBUG_FILE = "keil/Dbg_RAM.ini"
GCC_LD_SCRIPT = "gcc/linker_script.ld"
KEIL_STARTUP_FILE = "keil/startup_keil.s"
IAR_STARTUP_FILE = "iar/startup_iar.c"
GCC_STARTUP_FILE = "gcc/startup_gcc.c"
def read_configuration(config_file):
"""Read a configuration YAML files and return a dictionary of memory sections"""
# Read the YAML configuration file as is.
with open(config_file) as file_object:
config_string = file_object.read()
config = yaml.load(config_string, Loader=yaml.FullLoader)
memory_sections = config['MemorySections']
# Search through the memory sections...
for name in memory_sections.keys():
# Find the start value, and convert it if necessary.
start = memory_sections[name]['start']
memory_sections[name]['start'] = convert_number(start)
# If we find a size value, use it.
if 'size' in memory_sections[name]:
size = memory_sections[name]['size']
memory_sections[name]['size'] = convert_number(size)
# It not, try to use an "end" value.
elif 'end' in memory_sections[name]:
end = memory_sections[name]['end']
memory_sections[name]['size'] = convert_number(end) - convert_number(start)
stack_size = config['StackOptions']['size']
config['StackOptions']['size'] = convert_number(stack_size)
# Create a memory section for the stack.
memory_sections['STACK'] = dict()
# Create a section for the stack. To do this, we'll either need to
# carve space out of TCM or RWMEM.
if config['StackOptions']['place_in_tcm']:
if config['StackOptions']['size'] > memory_sections['TCM']['size']:
raise LinkerConfigError("Stack ({} B) doesn't fit in TCM ({} B)".format(
config['StackOptions']['size'],
memory_sections['TCM']['size']))
memory_sections['STACK']['start'] = memory_sections['TCM']['start']
memory_sections['STACK']['size'] = config['StackOptions']['size']
memory_sections['TCM']['start'] = (memory_sections['STACK']['start'] +
config['StackOptions']['size'])
memory_sections['TCM']['size'] = (memory_sections['TCM']['size'] -
config['StackOptions']['size'])
else:
if config['StackOptions']['size'] > memory_sections['RWMEM']['size']:
raise LinkerConfigError("Stack ({} B) doesn't fit in RWMEM ({} B)".format(
config['StackOptions']['size'],
memory_sections['RWMEM']['size']))
memory_sections['STACK']['start'] = memory_sections['RWMEM']['start']
memory_sections['STACK']['size'] = config['StackOptions']['size']
memory_sections['RWMEM']['start'] = (memory_sections['STACK']['start'] +
config['StackOptions']['size'])
memory_sections['RWMEM']['size'] = (memory_sections['RWMEM']['size'] -
config['StackOptions']['size'])
return memory_sections
def convert_number(N):
"""Take in an integer or a numerical string ending in 'K', and convert it to an int"""
if isinstance(N, int):
return N
elif isinstance(N, str):
if N.endswith('K'):
return int(N[:-1]) * 1024
else:
raise LinkerConfigError('"{}" not recognized as a number'.format(N))
else:
raise LinkerConfigError('"{}" not recognized as a number'.format(N))
def print_memory_map(memory_sections):
"""Show the memory map in a human readable format"""
# Sort the section names by their starting address.
section_names = sorted(memory_sections.keys(), key=lambda x: memory_sections[x]['start'])
# Search for the longest section name, and record its length.
max_name_length = max(len(x) for x in section_names)
# This is a roundabout way to copy the maximum name length into a format
# string, so we can make the output string look pretty.
name_format = '{{:{}}}'.format(max_name_length + 1)
for name in section_names:
section = memory_sections[name]
mapping = {
'name': name_format.format(name + ':'),
'start': '0x{:08X}'.format(section['start']),
'end': '0x{:08X}'.format(section['start'] + section['size']),
'size': section['size'],
}
print('{name} {start:10} - {end:10} ({size} bytes)'.format(**mapping))
# Custom error for linker configuration problems.
if __name__ == '__main__':
main()
| [
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1279,
8206,
20231,
17257,
12240,
8206,
20231,
17257,
29,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1279,
6601,
20231,
17257,
12240,
6601,
20231,
17257,
2... | 2.269763 | 2,239 |
import random
from torchvision import datasets
from torchvision import transforms
from torch.utils import data
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# train set = 45000
# val set = 5000
# test set = 10000 | [
11748,
4738,
198,
198,
6738,
28034,
10178,
1330,
40522,
198,
6738,
28034,
10178,
1330,
31408,
198,
6738,
28034,
13,
26791,
1330,
1366,
198,
198,
35636,
62,
27432,
796,
31408,
13,
7293,
577,
26933,
198,
220,
220,
220,
31408,
13,
29531,
3... | 2.833333 | 192 |
#!/usr/bin/python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
gpoList = [27]
for i in gpoList:
GPIO.setup(i, GPIO.OUT)
#GPIO.output(i, GPIO.HIGH)
try:
GPIO.output(27, GPIO.LOW)
print "Pin 27 On IN2"
except KeyboardInterrupt:
print "Saliendo ..."
GPIO.cleanup()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
640,
198,
198,
16960,
9399,
13,
2617,
14171,
7,
16960,
9399,
13,
2749,
44,
8,
198,
198,
70,
7501,
8053,
796,
685,
1983,
60,
... | 2.189394 | 132 |
from database import DbConnection, NotExists, User
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import login_user, logout_user, login_required
from werkzeug.security import check_password_hash, generate_password_hash
import logging
auth = Blueprint("auth", __name__)
log = logging.getLogger(__name__)
@auth.route("/login")
@auth.route("/login", methods=["POST"])
@auth.route("/signup")
@auth.route("/signup", methods=["POST"])
@auth.route("/logout")
@login_required
| [
6738,
6831,
1330,
360,
65,
32048,
11,
1892,
3109,
1023,
11,
11787,
198,
6738,
42903,
1330,
39932,
11,
7644,
11,
18941,
11,
8543,
62,
28243,
11,
2581,
11,
19016,
62,
1640,
198,
6738,
42903,
62,
38235,
1330,
17594,
62,
7220,
11,
2604,
... | 3.14881 | 168 |
import math
import os
import random
import re
import sys
# Complete the hourglassSum function below.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
result = hourglassSum(arr)
fptr.write(str(result) + '\n')
fptr.close()
| [
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
198,
198,
2,
13248,
262,
1711,
20721,
13065,
2163,
2174,
13,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
... | 2.503448 | 145 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: files_attributes
author: Manala (@manala)
short_description: returns a curated attributes list
description:
- Takes a attributes list and returns it curated.
'''
from ansible.plugins.lookup import LookupBase
from ansible.errors import AnsibleError
import re
| [
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
8,
198,
834,
4164,
330,
31172,
834,
796,
2099,
198,
198,
38715,
5883,
3525,
6234,
796,
705,
7061,
198,
220,
220,
220,
1438,
25,
3696,
62,
1078,
7657... | 3.262295 | 122 |
# -*- coding: utf-8 -*-
"""
mul.recipe.appengine
"""
__author__ = 'Michael Lenaghan'
__email__ = 'metamul -@- gmail.com'
__version__ = '0.4.0'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
37811,
198,
76,
377,
13,
29102,
431,
13,
1324,
18392,
198,
37811,
628,
198,
834,
9800,
834,
796,
705,
13256,
12592,
45109,
6,
198,
834,
12888,
834,
796,
705,
4164... | 2.161765 | 68 |
import pygame
import numpy as np
| [
11748,
12972,
6057,
198,
11748,
299,
32152,
355,
45941,
198
] | 3.3 | 10 |
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
@attr.s
@attr.s
| [
2,
6127,
7560,
416,
300,
668,
62,
21282,
74,
62,
5235,
13,
8410,
5626,
48483,
13,
198,
198,
6738,
279,
2645,
668,
13,
75,
668,
62,
25927,
1330,
16089,
18453,
3041,
80,
11,
4808,
3605,
62,
24396,
62,
18076,
198,
6738,
279,
2645,
66... | 2.655914 | 93 |
from django.urls import path
from . import views
app_name = 'catalog'
urlpatterns = [
path('', views.Index.as_view(), name='index'),
path('books/', views.BookListView.as_view(), name='books'),
path('<int:pk>/books/', views.BookDetailView.as_view(), name='book-detail'),
path('authors/', views.AuthorListView.as_view(), name='authors'),
path('<int:pk>/authors/', views.AuthorDetailView.as_view(), name='author-detail'),
path('mybooks/', views.LoanedBooksByUserListView.as_view(), name='my-borrowed'),
path('book/<uuid:pk>/renew/', views.renew_book_librarian, name='renew-book-librarian'),
path('author/create/', views.AuthorCreate.as_view(), name='author_create'),
path('author/<int:pk>/update/', views.AuthorUpdate.as_view(), name='author_update'),
path('author/<int:pk>/delete/', views.AuthorDelete.as_view(), name='author_delete'),
# path('', views.QuestionView.as_view(), name='question'),
# path('<int:pk>/detail/', views.DetailView.as_view(), name='detail'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
9246,
11794,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
15732,
13,
292,
62,
... | 2.673684 | 380 |
#
# Copyright IBM Corp All Rights Reserved
#
# SPDX-License-Identifier: Apache-2.0
#
import config_util
import json
import os
import shutil
import subprocess
import sys
import time
import common_util
try:
pbFilePath = "../fabric/bddtests"
sys.path.insert(0, pbFilePath)
from peer import chaincode_pb2
except:
print("ERROR! Unable to import the protobuf libraries from the ../fabric/bddtests directory: {0}".format(sys.exc_info()[0]))
sys.exit(1)
# The default channel ID
SYS_CHANNEL_ID = "behavesyschan"
TEST_CHANNEL_ID = "behavesystest"
| [
2,
198,
2,
15069,
19764,
11421,
1439,
6923,
33876,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
198,
11748,
4566,
62,
22602,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
4423,
... | 2.687204 | 211 |
from minpiler.std import M, inline
c = C(20)
c.f(1, 2)
# > print 23.0
| [
6738,
949,
79,
5329,
13,
19282,
1330,
337,
11,
26098,
628,
198,
198,
66,
796,
327,
7,
1238,
8,
198,
66,
13,
69,
7,
16,
11,
362,
8,
198,
198,
2,
1875,
3601,
2242,
13,
15,
198
] | 2 | 37 |
# #-- -- -- -- Manipulating DataFrames with pandas
# # Used for Data Scientist Training Path
# #FYI it's a compilation of how to work
# #with different commands.
# ### --------------------------------------------------------
# # # # ------>>>>Index ordering In this
# exercise, the DataFrame
# election is provided for you.
# It contains the 2012 US
# election results for the
# state of Pennsylvania with
# county names as row indices.
# Your job is to select
# 'Bedford' county and
# the'winner' column. Which
# method is the preferred way?
# Feel free to explore the
# DataFrame in the IPython
# Shell.
election.loc['Bedford', 'winner']
# ### --------------------------------------------------------
# # # # ------>>>>Positional and labeled indexing
# Assign the row position of election.loc['Bedford']: x
x = 4
# Assign the column position of election['winner']: y
y = 4
# Print the boolean equivalence
print(election.iloc[x, y] == election.loc['Bedford', 'winner'])
# ### --------------------------------------------------------
# # # # ------>>>>Indexing and column rearrangement
# Import pandas
import pandas as pd
# Read in filename and set the index: election
election = pd.read_csv(filename, index_col='county')
# Create a separate dataframe with the columns ['winner', 'total', 'voters']: results
results = election[['winner', 'total', 'voters']]
# Print the output of results.head()
print(results.head())
# ### --------------------------------------------------------
# # # # ------>>>> Slicing rows
# Slice the row labels 'Perry' to 'Potter': p_counties
p_counties = election.loc['Perry':'Potter',:]
# Print the p_counties DataFrame
print(p_counties)
# Slice the row labels 'Potter' to 'Perry' in reverse order: p_counties_rev
p_counties_rev = election.loc['Potter':'Perry':-1]
# Print the p_counties_rev DataFrame
print(p_counties_rev)
# ### --------------------------------------------------------
# # # # ------>>>>Slicing columns
# Slice the columns from the starting column to 'Obama': left_columns
left_columns = election.loc[:,:'Obama']
# Print the output of left_columns.head()
print(left_columns.head())
# Slice the columns from 'Obama' to 'winner': middle_columns
middle_columns = election.loc[:,'Obama':'winner']
# Print the output of middle_columns.head()
print(middle_columns.head())
# Slice the columns from 'Romney' to the end: 'right_columns'
right_columns = election.loc[:,'Romney':]
# Print the output of right_columns.head()
print(right_columns.head())
# ### --------------------------------------------------------
# # # # ------>>>> Subselecting DataFrames with lists
# Create the list of row labels: rows
rows = ['Philadelphia', 'Centre', 'Fulton']
# Create the list of column labels: cols
cols = ['winner', 'Obama', 'Romney']
# Create the new DataFrame: three_counties
three_counties = election.loc[rows,cols]
# Print the three_counties DataFrame
print(three_counties)
# ### --------------------------------------------------------
# # # # ------>>>> Thresholding data
# Create the boolean array: high_turnout
high_turnout = election['turnout'] > 70
# Filter the election DataFrame with the high_turnout array: high_turnout_df
high_turnout_df = election[high_turnout]
# Print the high_turnout_results DataFrame
print(high_turnout_df)
# ### --------------------------------------------------------
# # # # ------>>>> Filtering columns using other columns
# Import numpy
import numpy as np
# Create the boolean array: too_close
too_close = election['margin'] < 1
# Assign np.nan to the 'winner' column where the results were too close to call
election.loc[too_close, 'winner'] = np.nan
# Print the output of election.info()
print(election.info())
# ### --------------------------------------------------------
# # # # ------>>>> Filtering using NaNs
# Select the 'age' and 'cabin' columns: df
df = titanic[['age', 'cabin']]
# Print the shape of df
print(df.shape)
# Drop rows in df with how='any' and print the shape
print(df.dropna(how='any').shape)
# Drop rows in df with how='all' and print the shape
print(df.dropna(how='all').shape)
# Drop columns in titanic with less than 1000 non-missing values
print(titanic.dropna(thresh=1000, axis='columns').info())
# ### --------------------------------------------------------
# # # # ------>>>> Using apply() to transform a column
# Write a function to convert degrees Fahrenheit to degrees Celsius: to_celsius
# Apply the function over 'Mean TemperatureF' and 'Mean Dew PointF': df_celsius
df_celsius = weather[['Mean TemperatureF', 'Mean Dew PointF']].apply(to_celsius)
# Reassign the column labels of df_celsius
df_celsius.columns = ['Mean TemperatureC', 'Mean Dew PointC']
# Print the output of df_celsius.head()
print(df_celsius.head())
# ### --------------------------------------------------------
# # # # ------>>>> Using .map() with a dictionary
# Create the dictionary: red_vs_blue
red_vs_blue = {'Obama':'blue', 'Romney':'red'}
# Use the dictionary to map the 'winner' column to the new column: election['color']
election['color'] = election['winner'].map(red_vs_blue)
# Print the output of election.head()
print(election.head())
# ### --------------------------------------------------------
# # # # ------>>>> Using vectorized functions
# Import zscore from scipy.stats
from scipy.stats import zscore
# Call zscore with election['turnout'] as input: turnout_zscore
turnout_zscore = zscore(election['turnout'])
# Print the type of turnout_zscore
print(type(turnout_zscore))
# Assign turnout_zscore to a new column: election['turnout_zscore']
election['turnout_zscore'] = turnout_zscore
# Print the output of election.head()
print(election.head())
# ### --------------------------------------------------------
# # # # ------>>>>Index values and names
# Which one of the following index operations does not raise an error?
# The sales DataFrame which you have seen in the videos of the previous
# chapter has been pre-loaded for you and is available for exploration in the IPython Shell.
# eggs salt spam
# month
# Jan 47 12.0 17
# Feb 110 50.0 31
# Mar 221 89.0 72
# Apr 77 87.0 20
# May 132 NaN 52
# Jun 205 60.0 55
# R/ --->
sales.index = range(len(sales))
# ### --------------------------------------------------------
# # # # ------>>>> Changing index of a DataFrame
# Create the list of new indexes: new_idx
new_idx = [i.upper() for i in sales.index]
# Assign new_idx to sales.index
sales.index = new_idx
# Print the sales DataFrame
print(sales)
# ### --------------------------------------------------------
# # # # ------>>>>Changing index name labels
# Assign the string 'MONTHS' to sales.index.name
sales.index.name = 'MONTHS'
# Print the sales DataFrame
print(sales)
# Assign the string 'PRODUCTS' to sales.columns.name
sales.columns.name = 'PRODUCTS'
# Print the sales dataframe again
print(sales)
# ### --------------------------------------------------------
# # # # ------>>>> Building an index, then a DataFrame
# Generate the list of months: months
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun']
# Assign months to sales.index
sales.index = months
# Print the modified sales DataFrame
print(sales)
# ### --------------------------------------------------------
# # # # ------>>>> Extracting data with a MultiIndex
# Print sales.loc[['CA', 'TX']]
print(sales.loc[['CA', 'TX']])
# Print sales['CA':'TX']
print(sales['CA':'TX'])
# ### --------------------------------------------------------
# # # # ------>>>> Setting & sorting a MultiIndex
# Set the index to be the columns ['state', 'month']: sales
sales = sales.set_index(['state','month'])
# Sort the MultiIndex: sales
sales = sales.sort_index()
# Print the sales DataFrame
print(sales)
# ### --------------------------------------------------------
# # # # ------>>>> Using .loc[] with nonunique indexes
# Set the index to the column 'state': sales
sales = sales.set_index(['state'])
# Print the sales DataFrame
print(sales)
# Access the data from 'NY'
print(sales.loc['NY'])
# ### --------------------------------------------------------
# # # # ------>>>> Indexing multiple levels of a MultiIndex
# Look up data for NY in month 1 in sales: NY_month1
NY_month1 = sales.loc['NY',1]
# Look up data for CA and TX in month 2: CA_TX_month2
CA_TX_month2 = sales.loc[(['CA', 'TX'], 2),:]
# Access the inner month index and look up data for all states in month 2: all_month2
all_month2 = sales.loc[(['CA','NY','TX'], 2),:]
# ### --------------------------------------------------------
# # # # ------>>>> Pivoting and the index
# Prior to using .pivot(), you need to set the index of the
# DataFrame somehow. Is this statement True or False?
# R/ False
# ### --------------------------------------------------------
# # # # ------>>>> Pivoting a single variable
# Pivot the users DataFrame: visitors_pivot
visitors_pivot = users.pivot(index='weekday', columns='city', values='visitors')
# Print the pivoted DataFrame
print(visitors_pivot)
# ### --------------------------------------------------------
# # # # ------>>>> Pivoting all variables
# Pivot users with signups indexed by weekday and city: signups_pivot
signups_pivot = users.pivot(index='weekday',columns='city',values='signups')
# Print signups_pivot
print(signups_pivot)
# Pivot users pivoted by both signups and visitors: pivot
pivot = users.pivot(index='weekday',columns='city')
# Print the pivoted DataFrame
print(pivot)
# ### --------------------------------------------------------
# # # # ------>>>> Stacking & unstacking I
# Unstack users by 'weekday': byweekday
byweekday = users.unstack(level='weekday')
# Print the byweekday DataFrame
print(byweekday)
# Stack byweekday by 'weekday' and print it
print(byweekday.stack(level='weekday'))
# ### --------------------------------------------------------
# # # # ------>>>> Stacking & unstacking II
# Unstack users by 'city': bycity
bycity = users.unstack(level='city')
# Print the bycity DataFrame
print(bycity)
# Stack bycity by 'city' and print it
print(bycity.stack(level='city'))
# ### --------------------------------------------------------
# # # # ------>>>> Restoring the index order
# Stack 'city' back into the index of bycity: newusers
newusers = bycity.stack(level='city')
# Swap the levels of the index of newusers: newusers
newusers = newusers.swaplevel(0, 1)
# Print newusers and verify that the index is not sorted
print(newusers)
# Sort the index of newusers: newusers
newusers = newusers.sort_index()
# Print newusers and verify that the index is now sorted
print(newusers)
# Verify that the new DataFrame is equal to the original
print(newusers.equals(users))
# ### --------------------------------------------------------
# # # # ------>>>> Adding names for readability
# Reset the index: visitors_by_city_weekday
visitors_by_city_weekday = visitors_by_city_weekday.reset_index()
# Print visitors_by_city_weekday
print(visitors_by_city_weekday)
# Melt visitors_by_city_weekday: visitors
visitors = pd.melt(visitors_by_city_weekday, id_vars=['weekday'], value_name='visitors')
# Print visitors
print(visitors)
# ### --------------------------------------------------------
# # # # ------>>>> Going from wide to long
# Melt users: skinny
skinny = pd.melt(users, id_vars=['weekday','city'])
# Print skinny
print(skinny)
# ### --------------------------------------------------------
# # # # ------>>>> Obtaining key-value pairs with melt()
# Set the new index: users_idx
users_idx = users.set_index(['city','weekday'])
# Print the users_idx DataFrame
print(users_idx)
# Obtain the key-value pairs: kv_pairs
kv_pairs = pd.melt(users_idx, col_level=0)
# Print the key-value pairs
print(kv_pairs)
# ### --------------------------------------------------------
# # # # ------>>>> Setting up a pivot table
# Create the DataFrame with the appropriate pivot table: by_city_day
by_city_day = users.pivot_table(index='weekday',columns='city')
# Print by_city_day
print(by_city_day)
# ### --------------------------------------------------------
# # # # ------>>>> Using other aggregations in pivot tables
# Use a pivot table to display the count of each column: count_by_weekday1
count_by_weekday1 = users.pivot_table(index='weekday', aggfunc='count')
# Print count_by_weekday
print(count_by_weekday1)
# Replace 'aggfunc='count'' with 'aggfunc=len': count_by_weekday2
count_by_weekday2 = users.pivot_table(index='weekday', aggfunc=len)
# Verify that the same result is obtained
print('==========================================')
print(count_by_weekday1.equals(count_by_weekday2))
# ### --------------------------------------------------------
# # # # ------>>>> Using margins in pivot tables
# Create the DataFrame with the appropriate pivot table: signups_and_visitors
signups_and_visitors = users.pivot_table(index='weekday', aggfunc=sum)
# Print signups_and_visitors
print(signups_and_visitors)
# Add in the margins: signups_and_visitors_total
signups_and_visitors_total = users.pivot_table(index='weekday', margins=True, aggfunc=sum)
# Print signups_and_visitors_total
print(signups_and_visitors_total)
# ### --------------------------------------------------------
# # # # ------>>>> Advantages of categorical data types
# What are the main advantages of storing data explicitly as categorical types instead of object types?
# Computations are faster.
# Categorical data require less space in memory.
# R/ All of the above.
# ### --------------------------------------------------------
# # # # ------>>>> Grouping by multiple columns
# Group titanic by 'pclass'
by_class = titanic.groupby('pclass')
# Aggregate 'survived' column of by_class by count
count_by_class = by_class['survived'].count()
# Print count_by_class
print(count_by_class)
# Group titanic by 'embarked' and 'pclass'
by_mult = titanic.groupby(['embarked','pclass'])
# Aggregate 'survived' column of by_mult by count
count_mult = by_mult['survived'].count()
# Print count_mult
print(count_mult)
# ### --------------------------------------------------------
# # # # ------>>>> Grouping by another series
# Read life_fname into a DataFrame: life
life = pd.read_csv(life_fname, index_col='Country')
# Read regions_fname into a DataFrame: regions
regions = pd.read_csv(regions_fname,index_col='Country')
# Group life by regions['region']: life_by_region
life_by_region = life.groupby(regions['region'])
# Print the mean over the '2010' column of life_by_region
print(life_by_region['2010'].mean())
# ### --------------------------------------------------------
# # # # ------>>>> Computing multiple aggregates of multiple columns
# Group titanic by 'pclass': by_class
by_class = titanic.groupby('pclass')
# Select 'age' and 'fare'
by_class_sub = by_class[['age','fare']]
# Aggregate by_class_sub by 'max' and 'median': aggregated
aggregated = by_class_sub.agg(['max','median'])
# Print the maximum age in each class
print(aggregated.loc[:, ('age','max')])
# Print the median fare in each class
print(aggregated.loc[:, ('fare','median')])
# ### --------------------------------------------------------
# # # # ------>>>> Aggregating on index levels/fields
# Read the CSV file into a DataFrame and sort the index: gapminder
gapminder = pd.read_csv('gapminder.csv', index_col=['Year','region','Country']).sort_index()
# Group gapminder by 'Year' and 'region': by_year_region
by_year_region = gapminder.groupby(level=['Year','region'])
# Define the function to compute spread: spread
# Create the dictionary: aggregator
aggregator = {'population':'sum', 'child_mortality':'mean', 'gdp':spread}
# Aggregate by_year_region using the dictionary: aggregated
aggregated = by_year_region.agg(aggregator)
# Print the last 6 entries of aggregated
print(aggregated.tail(6))
# ### --------------------------------------------------------
# # # # ------>>>> Grouping on a function of the index
# Read file: sales
sales = pd.read_csv('sales.csv', index_col='Date', parse_dates=True)
# Create a groupby object: by_day
by_day = sales.groupby(sales.index.strftime('%a'))
# Create sum: units_sum
units_sum = by_day['Units'].sum()
# Print units_sum
print(units_sum)
# ### --------------------------------------------------------
# # # # ------>>>> Detecting outliers with Z-Scores
# Import zscore
from scipy.stats import zscore
# Group gapminder_2010: standardized
standardized = gapminder_2010.groupby('region')[['life','fertility']].transform(zscore)
# Construct a Boolean Series to identify outliers: outliers
outliers = (standardized['life'] < -3) | (standardized['fertility'] > 3)
# Filter gapminder_2010 by the outliers: gm_outliers
gm_outliers = gapminder_2010.loc[outliers]
# Print gm_outliers
print(gm_outliers)
# ### --------------------------------------------------------
# # # # ------>>>> Filling missing data (imputation) by group
# Create a groupby object: by_sex_class
by_sex_class = titanic.groupby(['sex', 'pclass'])
# Write a function that imputes median
# Impute age and assign to titanic['age']
titanic.age = by_sex_class['age'].transform(impute_median)
# Print the output of titanic.tail(10)
print(titanic.tail(10))
# ### --------------------------------------------------------
# # # # ------>>>> Other transformations with .apply
# Group gapminder_2010 by 'region': regional
regional = gapminder_2010.groupby('region')
# Apply the disparity function on regional: reg_disp
reg_disp = regional.apply(disparity)
# Print the disparity of 'United States', 'United Kingdom', and 'China'
print(reg_disp.loc[['United States', 'United Kingdom', 'China']])
# ### --------------------------------------------------------
# # # # ------>>>> Grouping and filtering with .apply()
# Create a groupby object using titanic over the 'sex' column: by_sex
by_sex = titanic.groupby('sex')
# Call by_sex.apply with the function c_deck_survival
c_surv_by_sex = by_sex.apply(c_deck_survival)
# Print the survival rates
print(c_surv_by_sex)
# ### --------------------------------------------------------
# # # # ------>>>> Grouping and filtering with .filter()
# Read the CSV file into a DataFrame: sales
sales = pd.read_csv('sales.csv', index_col='Date', parse_dates=True)
# Group sales by 'Company': by_company
by_company = sales.groupby('Company')
# Compute the sum of the 'Units' of by_company: by_com_sum
by_com_sum = by_company['Units'].sum()
print(by_com_sum)
# Filter 'Units' where the sum is > 35: by_com_filt
by_com_filt = by_company.filter(lambda g:g['Units'].sum() > 35)
print(by_com_filt)
# ### --------------------------------------------------------
# # # # ------>>>> Filtering and grouping with .map()
# Create the Boolean Series: under10
under10 = (titanic['age'] < 10).map({True:'under 10', False:'over 10'})
# Group by under10 and compute the survival rate
survived_mean_1 = titanic.groupby(under10)['survived'].mean()
print(survived_mean_1)
# Group by under10 and pclass and compute the survival rate
survived_mean_2 = titanic.groupby([under10, 'pclass'])['survived'].mean()
print(survived_mean_2)
# ### --------------------------------------------------------
# # # # ------>>>> Grouping and aggregating The Olympic medal data for the following exercises
# comes from The Guardian. It comprises records of all events held at the Olympic
# games between 1896 and 2012.
# Suppose you have loaded the data into a DataFrame medals. You now want to find
# the total number of medals awarded to the USA per edition. To do this, filter
# the 'USA' rows and use the groupby() function to put the 'Edition' column on
# the index:
# USA_edition_grouped = medals.loc[medals.NOC == 'USA'].groupby('Edition') Given
# the goal of finding the total number of USA medals awarded per edition, what
# column should you select and which aggregation method should you use?
# R/
USA_edition_grouped['Medal'].count()
# ### --------------------------------------------------------
# # # # ------>>>> Using .value_counts() for ranking
# Select the 'NOC' column of medals: country_names
country_names = medals['NOC']
# Count the number of medals won by each country: medal_counts
medal_counts = country_names.value_counts()
# Print top 15 countries ranked by medals
print(medal_counts.head(15))
# ### --------------------------------------------------------
# # # # ------>>>> Using .pivot_table() to count medals by type
# Construct the pivot table: counted
counted = medals.pivot_table(index='NOC', values='Athlete', columns='Medal', aggfunc='count')
# Create the new column: counted['totals']
counted['totals'] = counted.sum(axis='columns')
# Sort counted by the 'totals' column
counted = counted.sort_values('totals', ascending=False)
# Print the top 15 rows of counted
print(counted.head(15))
# ### --------------------------------------------------------
# # # # ------>>>> Applying .drop_duplicates()
# Select columns: ev_gen
ev_gen = medals[['Event_gender', 'Gender']]
# Drop duplicate pairs: ev_gen_uniques
ev_gen_uniques = ev_gen.drop_duplicates()
# Print ev_gen_uniques
print(ev_gen_uniques)
# ### --------------------------------------------------------
# # # # ------>>>> Finding possible errors with .groupby()
# Group medals by the two columns: medals_by_gender
medals_by_gender = medals.groupby(['Event_gender', 'Gender'])
# Create a DataFrame with a group count: medal_count_by_gender
medal_count_by_gender = medals_by_gender.count()
# Print medal_count_by_gender
print(medal_count_by_gender)
# ### --------------------------------------------------------
# # # # ------>>>> Locating suspicious data
# Create the Boolean Series: sus
sus = (medals.Event_gender == 'W') & (medals.Gender == 'Men')
# Create a DataFrame with the suspicious row: suspect
suspect = medals[sus]
# Print suspect
print(suspect)
# ### --------------------------------------------------------
# # # # ------>>>> Using .nunique() to rank by distinct sports
# Group medals by 'NOC': country_grouped
country_grouped = medals.groupby('NOC')
# Compute the number of distinct sports in which each country won medals: Nsports
Nsports = country_grouped['Sport'].nunique()
# Sort the values of Nsports in descending order
Nsports = Nsports.sort_values(ascending=False)
# Print the top 15 rows of Nsports
print(Nsports.head(15))
# ### --------------------------------------------------------
# # # # ------>>>> Counting USA vs. USSR Cold War Olympic Sports
# Create a Boolean Series that is True when 'Edition' is between 1952 and 1988: during_cold_war
during_cold_war = (medals['Edition'] >= 1952) & (medals['Edition'] <= 1988)
# Extract rows for which 'NOC' is either 'USA' or 'URS': is_usa_urs
is_usa_urs = medals.NOC.isin(['USA', 'URS'])
# Use during_cold_war and is_usa_urs to create the DataFrame: cold_war_medals
cold_war_medals = medals.loc[during_cold_war & is_usa_urs]
# Group cold_war_medals by 'NOC'
country_grouped = cold_war_medals.groupby('NOC')
# Create Nsports
Nsports = country_grouped['Sport'].nunique().sort_values(ascending=False)
# Print Nsports
print(Nsports)
# ### --------------------------------------------------------
# # # # ------>>>> Counting USA vs. USSR Cold War Olympic Medals
# Create the pivot table: medals_won_by_country
medals_won_by_country = medals.pivot_table(index='Edition', columns='NOC', values='Athlete', aggfunc='count')
# Slice medals_won_by_country: cold_war_usa_urs_medals
cold_war_usa_urs_medals = medals_won_by_country.loc[1952:1988, ['USA','URS']]
# Create most_medals
most_medals = cold_war_usa_urs_medals.idxmax(axis='columns')
# Print most_medals.value_counts()
print(most_medals.value_counts())
# ### --------------------------------------------------------
# # # # ------>>>> Visualizing USA Medal Counts by Edition: Line Plot
# Create the DataFrame: usa
usa = medals[medals.NOC == 'USA']
# Group usa by ['Edition', 'Medal'] and aggregate over 'Athlete'
usa_medals_by_year = usa.groupby(['Edition', 'Medal'])['Athlete'].count()
# Reshape usa_medals_by_year by unstacking
usa_medals_by_year = usa_medals_by_year.unstack(level='Medal')
# Plot the DataFrame usa_medals_by_year
usa_medals_by_year.plot()
plt.show()
# ### --------------------------------------------------------
# # # # ------>>>> Visualizing USA Medal Counts by Edition: Area Plot
# Create the DataFrame: usa
usa = medals[medals.NOC == 'USA']
# Group usa by 'Edition', 'Medal', and 'Athlete'
usa_medals_by_year = usa.groupby(['Edition', 'Medal'])['Athlete'].count()
# Reshape usa_medals_by_year by unstacking
usa_medals_by_year = usa_medals_by_year.unstack(level='Medal')
# Create an area plot of usa_medals_by_year
usa_medals_by_year.plot.area()
plt.show()
# ### --------------------------------------------------------
# # # # ------>>>> Visualizing USA Medal Counts by Edition: Area Plot with Ordered Medals
# Redefine 'Medal' as an ordered categorical
medals.Medal = pd.Categorical(values=medals.Medal,categories=['Bronze','Silver','Gold'],ordered=True)
# Create the DataFrame: usa
usa = medals[medals.NOC == 'USA']
# Group usa by 'Edition', 'Medal', and 'Athlete'
usa_medals_by_year = usa.groupby(['Edition', 'Medal'])['Athlete'].count()
# Reshape usa_medals_by_year by unstacking
usa_medals_by_year = usa_medals_by_year.unstack(level='Medal')
# Create an area plot of usa_medals_by_year
usa_medals_by_year.plot.area()
plt.show() | [
2,
1303,
438,
220,
1377,
220,
1377,
220,
1377,
35045,
8306,
6060,
35439,
351,
19798,
292,
201,
198,
2,
1303,
16718,
329,
6060,
33374,
13614,
10644,
220,
201,
198,
2,
1303,
43833,
40,
340,
338,
257,
23340,
286,
703,
284,
670,
201,
19... | 3.1618 | 8,220 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
16529,
30934,
198,
2,
15069,
357,
66,
8,
2321,
3899,
28238,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2... | 3.622951 | 427 |
print('='*20, 'DESAFIO 016', '='*20) #ok
d=int(input('Quantos dias alugados? '))
km=float(input('Quantos km rodados? '))
at= (60 * d) + (0.15*km)
print(f'O total a pagar é de R$ {at:.2f}.') | [
4798,
10786,
11639,
9,
1238,
11,
705,
30910,
8579,
9399,
5534,
21,
3256,
705,
11639,
9,
1238,
8,
1303,
482,
201,
198,
201,
198,
67,
28,
600,
7,
15414,
10786,
24915,
418,
2566,
292,
435,
1018,
22484,
30,
705,
4008,
201,
198,
201,
1... | 1.951456 | 103 |
# Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
DFGs = [{'name': 'network', 'squads': [
{'name': 'vNES', 'components': ['neutron', 'python-neutronclient',
'networking-l2gw', 'networking-bgpvpn']},
{'name': 'octavia', 'components': ['octavia', 'neutron-lbaas']},
{'name': 'ovn', 'components': ['networking-ovn']}]},
{'name': 'storage', 'squads': [
{'name': 'cinder', 'components': [
'cinder',
'python-os-brick', 'python-cinderclient']},
{'name': 'glance', 'components': [
'glance',
'glance_store', 'python-glanceclient']},
{'name': 'manila', 'components': ['manila', 'python-manilaclient']},
{'name': 'sahara', 'components': ['sahara', 'python-saharaclient']},
{'name': 'swift', 'components': ['swift', 'python-swiftclien']}]}]
| [
2,
15069,
13130,
317,
5034,
347,
2301,
805,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846... | 2.467116 | 593 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
import requests
from lxml import html
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
}
# URL root
url = "https://www.nba.com/"
# Requirements
r = requests.get(url, headers=headers)
# Parse the html response
parser = html.fromstring(r.text)
# Get by id the element using lxml
# english = parser.get_element_by_id('js-link-box-en')
# print(english.text_content())
# Get the text using xpath
languages = parser.xpath("//div[contains(@class, 'Block_titleContainer__3NqMt Block_titleWithCarousel__2HnEn')]//h2/text()")
for language in languages:
print(language)
# print(languages.text)
| [
11748,
7007,
198,
6738,
300,
19875,
1330,
27711,
198,
198,
50145,
796,
1391,
198,
220,
220,
220,
366,
7220,
12,
25781,
1298,
366,
44,
8590,
5049,
14,
20,
13,
15,
357,
55,
1157,
26,
7020,
2124,
4521,
62,
2414,
8,
4196,
13908,
20827,
... | 2.695652 | 253 |
# Copyright (c) 2019 Markus Ressel
# .
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# .
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# .
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from telegram_click.argument import Argument, Flag
from telegram_click.parser import parse_telegram_command
from tests import TestBase
| [
2,
220,
15069,
357,
66,
8,
13130,
46013,
1874,
741,
198,
2,
220,
764,
198,
2,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
220,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
... | 3.710059 | 338 |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
from common.Logging import get_generic_logger
from ..AppRunner import AppRunner
from .app import app
from .WebAppConfig import webappconf
if __name__ == '__main__':
app = WebApp()
print(app.name)
app.run()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
6738,
2219,
13,
11187,
2667,
1330,
651,
62,
41357,
62,
6404,
1362,
198,
198,
6738,
11485,
4677,
49493,
1330,
2034,
49493,
198... | 2.65 | 100 |
#!/usr/bin/env python3
from gazebo_msgs.srv import GetModelState
import rospy
import math
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
308,
1031,
1765,
78,
62,
907,
14542,
13,
27891,
85,
1330,
3497,
17633,
9012,
198,
11748,
686,
2777,
88,
198,
11748,
10688,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
... | 2.5 | 52 |
f()
| [
198,
69,
3419,
198
] | 1.25 | 4 |
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model, authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import Http404
from django.shortcuts import render, redirect
from .models import Profile, User
from .forms import (
RegisterForm, LoginForm, UserEditForm, ProfileEditForm, EditRolesForm)
from common.decorators import is_admin, is_admin_or_manager
from common.utils import get_user_roles
from projects.models import Project
from tickets.models import Ticket
@login_required
@login_required
@login_required
@login_required
@is_admin
@login_required
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
11,
8323,
5344,
11,
17594,
11,
2604,
448,
198,
6738,
426... | 3.452174 | 230 |
import os
if __name__ == '__main__':
kmfs = kmfs2dict('./datamodel')
print(kmfs)
print(extract_chapterid('datamodel/core/chapter1.json'))
| [
11748,
28686,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
10571,
9501,
796,
10571,
9501,
17,
11600,
7,
4458,
14,
19608,
321,
375,
417,
11537,
198,
220,
220,
220,
3601,
7,
13276,
... | 2.214286 | 70 |
from .order import Order
from .user import User
| [
6738,
764,
2875,
1330,
8284,
198,
6738,
764,
7220,
1330,
11787,
198
] | 4 | 12 |
# Copyright 2019 The KRules Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pykube
from krules_core.base_functions import RuleFunctionBase
from krules_core.providers import subject_factory
def k8s_subject(obj=None, resource_path=None, prefix="k8s:"):
"""
Returns a k8s subject instance providing a kubernetes resource
:param obj:
:param resource_path:
:param prefix:
:return:
"""
if hasattr(obj, 'obj'):
obj = obj.obj
if obj is None:
obj = {}
if resource_path is None:
resource_path = obj["metadata"]["selfLink"]
return subject_factory(f"{prefix}{resource_path}", event_data=obj)
def k8s_object(subject):
"""
Returns the k8s resource providing a subject instance
:param subject:
:return:
"""
try:
return subject._storage._get_resource()
except AttributeError:
raise TypeError("not a k8s storaged subject")
| [
2,
15069,
13130,
383,
29430,
5028,
46665,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 3 | 479 |
from rest_framework import serializers
from rest_framework.relations import PrimaryKeyRelatedField
from resource_tracker.models import ResourceGroup, Resource, ResourceAttribute, ResourceGroupAttributeDefinition
from service_catalog.models import Instance
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
1334,
62,
30604,
13,
39468,
1330,
21087,
9218,
9819,
15878,
198,
198,
6738,
8271,
62,
2213,
10735,
13,
27530,
1330,
20857,
13247,
11,
20857,
11,
20857,
33682,
11,
20857,
13247,
33682,
... | 4.696429 | 56 |
"""
The :mod:`fatf.utils.models.models` module holds custom models.
The models implemented in this module are mainly used for used for
FAT Forensics package testing and the examples in the documentation.
"""
# Author: Kacper Sokol <k.sokol@bristol.ac.uk>
# License: new BSD
import abc
from typing import Optional
import numpy as np
import fatf.utils.array.tools as fuat
import fatf.utils.array.validation as fuav
import fatf.utils.distances as fud
from fatf.exceptions import (IncorrectShapeError, PrefittedModelError,
UnfittedModelError)
__all__ = ['KNN']
class Model(abc.ABC):
"""
An abstract class used to implement predictive models.
This abstract class requires ``fit`` and ``predict`` methods and defines
an optional ``predict_proba`` method.
This is a scikit-learn-inspired model specification and it is being relied
on through out this package.
Raises
------
NotImplementedError
Any of the required methods -- ``fit`` or ``predict`` -- is not
implemented.
"""
# pylint: disable=invalid-name
@abc.abstractmethod
def __init__(self) -> None:
"""
Initialises the abstract model class.
"""
@abc.abstractmethod
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
"""
Fits this predictive model.
Parameters
----------
X : numpy.ndarray
A 2-dimensional numpy data array used to fit the model.
y : numpy.ndarray
A 1-dimensional numpy labels array used to fit the model.
"""
@abc.abstractmethod
def predict(self, X: np.ndarray) -> None:
"""
Predicts labels of new data points using this model.
Parameters
----------
X : numpy.ndarray
A 2-dimensional numpy data array for which labels are predicted.
"""
def predict_proba(self, X: np.ndarray) -> None:
"""
Predicts probabilities of labels for new data points using this model.
Parameters
----------
X : numpy.ndarray
A 2-dimensional numpy data array for which labels probabilities are
predicted.
Raises
------
NotImplementedError
By default this method is not required, hence it raises a
``NotImplementedError``.
"""
raise NotImplementedError
class KNN(Model):
"""
A K-Nearest Neighbours model based on Euclidean distance.
When the ``k`` parameter is set to 0 the model works as a majority class
classifier. In case the count of neighbours (within ``k``) results in a
tie the overall majority class for the whole training data is returned.
Finally, when the training data contains categorical (i.e. non-numerical,
e.g. strings) columns the distance for these columns is 0 when the value
matches and 1 otherwise.
This model can operate in two modes: *classifier* or *regressor*. The first
one works for categorical and numerical targets and provides two predictive
methods: ``predict`` -- for predicting labels and ``predict_proba`` for
predicting probabilities of labels. The regressor mode, on the other hand,
requires the target to be numerical and it only supports the ``predict``
method, which returns the average of the target value of the ``k``
neighbours for the queried data point.
Parameters
----------
k : integer, optional (default=3)
The number of neighbours used to make a prediction. Defaults to 3.
mode : string, optional (default='classifier')
The mode in which the model will operate. Either ``'classifier'``
(``'c'``) or ``'regressor'`` (``'r'``). In the latter case
``predict_proba`` method is disabled.
Raises
------
PrefittedModelError
Raised when trying to fit a model that has already been fitted. Usually
raised when calling the ``fit`` method for the second time. Try using
the ``clear`` method to reset the model before fitting it again.
TypeError
The ``k`` parameter is not an integer.
UnfittedModelError
Raised when trying to predict data with a model that has not been
fitted yet. Try using the ``fit`` method to fit the model first.
ValueError
The ``k`` parameter is a negative number or the ``mode`` parameter does
not have one of the allowed values: ``'c'``, ``'classifier'``, ``'r'``
or ``'regressor'``.
Attributes
----------
_MODES : Set[string]
Possible modes of the KNN model: ``'classifier'`` (``'c'``) or
``'regressor'`` (``'r'``).
_k : integer
The number of neighbours used to make a prediction.
_is_classifier : boolean
True when the model is initialised (and operates) as a classifier.
False when it acts as a regressor.
_is_fitted : boolean
A Boolean variable indicating whether the model is fitted.
_X : numpy.ndarray
The KNN model training data.
_y : numpy.ndarray
The KNN model training labels.
_X_n : integer
The number of data points in the training set.
_unique_y : numpy.ndarray
An array with unique labels in the training labels set ordered
lexicographically.
_unique_y_counts : numpy.ndarray
An array with counts of the unique labels in the training labels set.
_unique_y_probabilities : numpy.ndarray
Probabilities of labels calculated using their frequencies in the
training data.
_majority_label : Union[string, integer, float]
The most common label in the training set.
_is_structured : boolean
A Boolean variable indicating whether the model has been fitted on a
structured numpy array.
_categorical_indices : numpy.ndarray
An array with categorical indices in the training array.
_numerical_indices : numpy.ndarray
An array with numerical indices in the training array.
"""
# pylint: disable=too-many-instance-attributes
_MODES = set(['classifier', 'c', 'regressor', 'r'])
def __init__(self, k: int = 3, mode: Optional[str] = None) -> None:
"""
Initialises the KNN model with the selected ``k`` parameter.
"""
super().__init__()
if not isinstance(k, int):
raise TypeError('The k parameter has to be an integer.')
if k < 0:
raise ValueError('The k parameter has to be a positive integer.')
if mode is None:
self._is_classifier = True
else:
if mode in self._MODES:
self._is_classifier = mode[0] == 'c'
else:
raise ValueError(('The mode parameter has to have one of the '
'following values {}.').format(self._MODES))
self._k = k
self._is_fitted = False
self._X = np.ndarray((0, 0)) # pylint: disable=invalid-name
self._y = np.ndarray((0, ))
self._X_n = int() # pylint: disable=invalid-name
self._unique_y = np.ndarray((0, ))
self._unique_y_counts = np.ndarray((0, ))
self._unique_y_probabilities = np.ndarray((0, ))
self._majority_label = None
self._is_structured = False
self._categorical_indices = np.ndarray((0, ))
self._numerical_indices = np.ndarray((0, ))
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
"""
Fits the model.
Parameters
----------
X : numpy.ndarray
The KNN training data.
y : numpy.ndarray
The KNN training labels.
Raises
------
IncorrectShapeError
Either the ``X`` array is not 2-dimensional, the ``y`` array is not
1-dimensional, the number of rows in ``X`` is not the same as the
number of elements in ``y`` or the ``X`` array has 0 rows or 0
columns.
PrefittedModelError
Trying to fit the model when it has already been fitted. Usually
raised when calling the ``fit`` method for the second time without
clearing the model first.
TypeError
Trying to fit a KNN predictor in a regressor mode with
non-numerical target variable.
"""
if self._is_fitted:
raise PrefittedModelError('This model has already been fitted.')
if not fuav.is_2d_array(X):
raise IncorrectShapeError('The training data must be a 2-'
'dimensional array.')
if not fuav.is_1d_array(y):
raise IncorrectShapeError('The training data labels must be a 1-'
'dimensional array.')
if X.shape[0] == 0:
raise IncorrectShapeError('The data array has to have at least '
'one data point.')
# If the array is structured the fuav.is_2d_array function takes care
# of checking whether there is at least one column
if not fuav.is_structured_array(X) and X.shape[1] == 0:
raise IncorrectShapeError('The data array has to have at least '
'one feature.')
if X.shape[0] != y.shape[0]:
raise IncorrectShapeError('The number of samples in X must be the '
'same as the number of labels in y.')
if not self._is_classifier and not fuav.is_numerical_array(y):
raise TypeError('Regressor can only be fitted for a numerical '
'target vector.')
numerical_indices, categorical_indices = fuat.indices_by_type(X)
self._numerical_indices = numerical_indices
self._categorical_indices = categorical_indices
self._is_structured = fuav.is_structured_array(X)
self._X = X
self._y = y
if self._is_classifier:
unique_y, unique_y_counts = np.unique(self._y, return_counts=True)
# Order labels lexicographically.
unique_y_sort_index = np.argsort(unique_y)
self._unique_y = unique_y[unique_y_sort_index]
self._unique_y_counts = unique_y_counts[unique_y_sort_index]
# How many other labels have the same count.
top_y_index = self._unique_y_counts == np.max(
self._unique_y_counts)
top_y_unique_sorted = np.sort(self._unique_y[top_y_index])
self._majority_label = top_y_unique_sorted[0]
self._unique_y_probabilities = (
self._unique_y_counts / self._y.shape[0])
else:
self._majority_label = self._y.mean()
self._unique_y = np.ndarray((0, ))
self._unique_y_counts = np.ndarray((0, ))
self._unique_y_probabilities = np.ndarray((0, ))
self._X_n = self._X.shape[0]
self._is_fitted = True
def clear(self) -> None:
"""
Clears (unfits) the model.
Raises
------
UnfittedModelError
Raised when trying to clear a model that has not been fitted yet.
Try using the fit method to ``fit`` the model first.
"""
if not self._is_fitted:
raise UnfittedModelError('This model has not been fitted yet.')
self._is_fitted = False
self._X = np.ndarray((0, 0))
self._y = np.ndarray((0, ))
self._X_n = int()
self._unique_y = np.ndarray((0, ))
self._unique_y_counts = np.ndarray((0, ))
self._unique_y_probabilities = np.ndarray((0, ))
self._majority_label = None
self._is_structured = False
self._categorical_indices = np.ndarray((0, ))
self._numerical_indices = np.ndarray((0, ))
def _get_distances(self, X: np.ndarray) -> np.ndarray:
"""
Gets distances for a mixture of numerical and categorical features.
For numerical columns the distance is calculated as the Euclidean
distance. For categorical columns (i.e. non-numerical, e.g. strings)
the distance is 0 when the value matches and 1 otherwise.
Parameters
----------
X : numpy.ndarray
A data array for which distances to the training data will be
calculated.
Raises
------
AssertionError
Raised when the model is not fitted, X is not a 2-dimensional
array or X's dtype is different than training data's dtype. It is
also raised when the distances matrix is not 2-dimensional.
Returns
-------
distances : numpy.ndarray
An array of distances between X and the training data.
"""
# pylint: disable=invalid-name
assert self._is_fitted, 'Cannot calculate distances on unfitted model.'
assert fuav.is_2d_array(X), 'X must be a 2-dimensional array.'
assert fuav.are_similar_dtype_arrays(X, self._X), \
'X must have the same dtype as the training data.'
distances_shape = (self._X.shape[0], X.shape[0])
categorical_distances = np.zeros(distances_shape)
numerical_distances = np.zeros(distances_shape)
if self._is_structured:
if self._categorical_indices.size:
categorical_distances = fud.binary_array_distance(
self._X[self._categorical_indices],
X[self._categorical_indices])
if self._numerical_indices.size:
numerical_distances = fud.euclidean_array_distance(
self._X[self._numerical_indices],
X[self._numerical_indices])
else:
if self._categorical_indices.size:
categorical_distances = fud.binary_array_distance(
self._X[:, self._categorical_indices],
X[:, self._categorical_indices])
if self._numerical_indices.size:
numerical_distances = fud.euclidean_array_distance(
self._X[:, self._numerical_indices],
X[:, self._numerical_indices])
assert categorical_distances.shape == numerical_distances.shape, \
'Different number of point-wise distances for these feature types.'
distances = categorical_distances + numerical_distances
assert fuav.is_2d_array(distances), 'Distances matrix must be 2D.'
return distances
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Predicts labels of new instances with the fitted model.
Parameters
----------
X : numpy.ndarray
The data for which labels will be predicted.
Raises
------
IncorrectShapeError
X is not a 2-dimensional array, it has 0 rows or it has a different
number of columns than the training data.
UnfittedModelError
Raised when trying to predict data when the model has not been
fitted yet. Try using the ``fit`` method to fit the model first.
ValueError
X has a different dtype than the data used to fit the model.
Returns
-------
predictions : numpy.ndarray
Predicted class labels for each data point.
"""
# pylint: disable=too-many-locals,too-many-branches
if not self._is_fitted:
raise UnfittedModelError('This model has not been fitted yet.')
if not fuav.is_2d_array(X):
raise IncorrectShapeError('X must be a 2-dimensional array. If '
'you want to predict a single data '
'point please format it as a single row '
'in a 2-dimensional array.')
if not fuav.are_similar_dtype_arrays(X, self._X):
raise ValueError('X must have the same dtype as the training '
'data.')
if not X.shape[0]:
raise IncorrectShapeError('X must have at least one row.')
# No need to check for columns in a structured array -> this is handled
# by the dtype checker.
if not fuav.is_structured_array(X):
if X.shape[1] != self._X.shape[1]:
raise IncorrectShapeError(('X must have the same number of '
'columns as the training data '
'({}).').format(self._X.shape[1]))
predictions = np.empty((X.shape[0], ))
if self._k < self._X_n:
distances = self._get_distances(X)
# If there are 3 nearest neighbours within distances 1, 2 and 2 and
# k is set to 2, then argpartition will always take the first
# within distance 2.
knn = np.argpartition(distances, self._k, axis=0)
predictions = []
for column in knn.T:
close_labels = self._y[column[:self._k]]
if self._is_classifier:
values, counts = np.unique(
close_labels, return_counts=True)
# If there is a tie in the counts take into consideration
# the overall label count in the training data to resolve
# it.
top_label_index = counts == counts.max()
top_label_unique_sorted = np.sort(values[top_label_index])
assert len(top_label_unique_sorted.shape) == 1, \
'This should be a flat array.'
if top_label_unique_sorted.shape[0] > 1:
# Resolve the tie.
# Get count of these label for the training data.
labels_filter = np.array(
self._unique_y.shape[0] * [False])
for top_prediction in top_label_unique_sorted:
unique_y_filter = self._unique_y == top_prediction
np.logical_or(
labels_filter,
unique_y_filter,
out=labels_filter)
g_top_label = self._unique_y[labels_filter]
g_top_label_counts = (
self._unique_y_counts[labels_filter])
# What if any of the global labels have the same count?
g_top_label_index = g_top_label_counts == np.max(
g_top_label_counts)
g_top_label_sorted = np.sort(
g_top_label[g_top_label_index])
prediction = g_top_label_sorted[0]
else:
prediction = top_label_unique_sorted[0]
else:
prediction = close_labels.mean()
predictions.append(prediction)
predictions = np.array(predictions)
else:
predictions = np.array(X.shape[0] * [self._majority_label])
return predictions
def predict_proba(self, X: np.ndarray) -> np.ndarray:
"""
Calculates label probabilities for new instances with the fitted model.
Parameters
----------
X : numpy.ndarray
The data for which labels probabilities will be predicted.
Raises
------
IncorrectShapeError
X is not a 2-dimensional array, it has 0 rows or it has a different
number of columns than the training data.
UnfittedModelError
Raised when trying to predict data when the model has not been
fitted yet. Try using the ``fit`` method to fit the model first.
RuntimeError
Raised when trying to use this method when the predictor is
initialised as a regressor.
ValueError
X has a different dtype than the data used to fit the model.
Returns
-------
probabilities : numpy.ndarray
Probabilities of each instance belonging to every class. The labels
in the return array are ordered by lexicographic order.
"""
if not self._is_classifier:
raise RuntimeError('This functionality is not available for a '
'regressor.')
if not self._is_fitted:
raise UnfittedModelError('This model has not been fitted yet.')
if not fuav.is_2d_array(X):
raise IncorrectShapeError('X must be a 2-dimensional array. If '
'you want to predict a single data '
'point please format it as a single row '
'in a 2-dimensional array.')
if not fuav.are_similar_dtype_arrays(X, self._X):
raise ValueError('X must have the same dtype as the training '
'data.')
if not X.shape[0]:
raise IncorrectShapeError('X must have at least one row.')
# No need to check for columns in a structured array -> this is handled
# by the dtype checker.
if not fuav.is_structured_array(X):
if X.shape[1] != self._X.shape[1]:
raise IncorrectShapeError(('X must have the same number of '
'columns as the training data '
'({}).').format(self._X.shape[1]))
probabilities = np.empty((X.shape[0], self._unique_y.shape[0]))
if self._k < self._X_n:
distances = self._get_distances(X)
knn = np.argpartition(distances, self._k, axis=0)
probabilities = []
for column in knn.T:
close_labels = self._y[column[:self._k]]
values, counts = np.unique(close_labels, return_counts=True)
total_counts = np.sum(counts)
probs = np.zeros((self._unique_y.shape[0], ))
for i in range(values.shape[0]):
ind = np.where(self._unique_y == values[i])[0]
probs[ind] = counts[i] / total_counts
probabilities.append(probs)
probabilities = np.array(probabilities)
else:
probabilities = np.tile(self._unique_y_probabilities,
(X.shape[0], 1))
return probabilities
| [
37811,
198,
464,
1058,
4666,
25,
63,
17359,
69,
13,
26791,
13,
27530,
13,
27530,
63,
8265,
6622,
2183,
4981,
13,
198,
198,
464,
4981,
9177,
287,
428,
8265,
389,
8384,
973,
329,
973,
329,
198,
37,
1404,
4558,
49242,
5301,
4856,
290,
... | 2.209709 | 10,238 |
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the EKS Nodegroup resource
"""
import logging
import time
from typing import Dict, Tuple
import pytest
from acktest.k8s import resource as k8s
from acktest.resources import random_suffix_name
from e2e import CRD_VERSION, service_marker, CRD_GROUP, load_eks_resource
from e2e.replacement_values import REPLACEMENT_VALUES
from .test_cluster import simple_cluster, wait_for_cluster_active, get_and_assert_status
RESOURCE_PLURAL = 'nodegroups'
# Time to wait after creating the CR for the status to be populated
CREATE_WAIT_AFTER_SECONDS = 10
# Time to wait after modifying the CR for the status to change
MODIFY_WAIT_AFTER_SECONDS = 5
# Time to wait after the nodegroup has changed status, for the CR to update
CHECK_STATUS_WAIT_SECONDS = 10
@pytest.fixture
@service_marker | [
2,
15069,
6186,
13,
785,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
921,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
... | 3.362963 | 405 |
import unittest
from http import HTTPStatus
from mock import patch
from metadata_service.api.table import TableDescriptionAPI
from metadata_service.api.column import ColumnDescriptionAPI
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
2638,
1330,
14626,
19580,
198,
198,
6738,
15290,
1330,
8529,
198,
6738,
20150,
62,
15271,
13,
15042,
13,
11487,
1330,
8655,
11828,
17614,
198,
6738,
20150,
62,
15271,
13,
15042,
13,
28665,
1330,
29201,
1... | 3.606061 | 66 |
# %%
import subprocess, os
os.environ["UDP_EVENTS"] = "yes"
os.environ["MDS_PATH"] = "/usr/local/mdsplus/tdi;/tdi"
os.environ["PATH"] = "/usr/local/mdsplus/bin:" + os.environ["PATH"]
os.environ["LD_LIBRARY_PATH"] = "/usr/local/mdsplus/lib"
# os.environ["mdsip_server_host"] =
# Get tree path environment variables configured in the MDSplus setup scripts
# %%
p = subprocess.Popen(
". /usr/local/mdsplus/setup.sh; printenv | grep _path=",
stdout=subprocess.PIPE,
shell=True,
)
s = p.wait()
defs = p.stdout.read().split("\n")[:-1]
p.stdout.close()
for env in defs:
ps = env.split("=")
os.environ[ps[0]] = ps[1]
# %%
| [
2,
43313,
198,
11748,
850,
14681,
11,
28686,
198,
198,
418,
13,
268,
2268,
14692,
52,
6322,
62,
20114,
15365,
8973,
796,
366,
8505,
1,
198,
418,
13,
268,
2268,
14692,
44,
5258,
62,
34219,
8973,
796,
12813,
14629,
14,
12001,
14,
9132... | 2.296029 | 277 |
#!/bin/env python2
from client import Connection
c = Connection()
response = c.stored_procedure("TPCC-Delivery", """{ "W_ID": 1, "D_ID": 1, "O_CARRIER_ID": 1}""")
print response
| [
2,
48443,
8800,
14,
24330,
21015,
17,
198,
6738,
5456,
1330,
26923,
198,
66,
796,
26923,
3419,
198,
26209,
796,
269,
13,
301,
1850,
62,
1676,
771,
495,
7203,
7250,
4093,
12,
33129,
1600,
37227,
90,
366,
54,
62,
2389,
1298,
352,
11,
... | 2.656716 | 67 |
from .is_mmtf_MMTFDecoder import is_mmtf_MMTFDecoder
from .to_file_mmtf import to_file_mmtf
from .to_file_pdb import to_file_pdb
from .to_mdtraj_Trajectory import to_mdtraj_Trajectory
from .to_molsysmt_MolSys import to_molsysmt_MolSys
from .to_molsysmt_Topology import to_molsysmt_Topology
from .to_molsysmt_Trajectory import to_molsysmt_Trajectory
| [
6738,
764,
271,
62,
3020,
27110,
62,
12038,
10234,
10707,
12342,
1330,
318,
62,
3020,
27110,
62,
12038,
10234,
10707,
12342,
198,
6738,
764,
1462,
62,
7753,
62,
3020,
27110,
1330,
284,
62,
7753,
62,
3020,
27110,
198,
6738,
764,
1462,
... | 2.39726 | 146 |
from __future__ import print_function
import sys
import errno
import shutil
import os
import traceback
import subprocess
import io
import glob
import re
import git
import yaml
DEBUG = False
REFRESH_TEMPLATE = False
WORK_OFFLINE = False
def eprint(*args, **kwargs):
'''
Print to std error.
'''
print(*args, file=sys.stderr, **kwargs)
def log_warn(*string, **kwargs):
'''
Log warning string to std error.
'''
eprint('WARNING:', sys._getframe(1).f_code.co_name + ':', *string, **kwargs)
def log_err(*string, **kwargs):
'''
Log warning string to std error.
'''
eprint('ERROR:', sys._getframe(1).f_code.co_name + ':', *string, **kwargs)
if DEBUG is True:
traceback.print_stack()
sys.exit(1)
def log_dbg(*string, **kwargs):
'''
Log warning string to std error.
'''
if DEBUG is True:
eprint('DEBUG:', sys._getframe(
1).f_code.co_name + ':', *string, **kwargs)
def delete_folder(path):
'''
Delete the specified folder/files. Globs are supported
'''
expanded_paths = expand_path_with_glob(path)
for expanded_path in expanded_paths:
if os.path.exists(expanded_path):
if os.path.isfile(expanded_path):
os.unlink(expanded_path)
elif os.path.isdir(expanded_path):
shutil.rmtree(expanded_path)
def copy_files(src, dst):
'''
Copy all the files from the path src to dst.
'''
src = expand_path(src)
dst = expand_path(dst)
try:
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno == errno.EEXIST:
for item in os.listdir(src):
src_rec = os.path.join(src, item)
dst_rec = os.path.join(dst, item)
copy_files(src_rec, dst_rec)
elif exc.errno == errno.ENOTDIR:
shutil.copy2(src, dst)
if not os.path.exists(dst):
os.makedirs(dst)
def copy_folders(src, dst):
'''
Copy all the folders (only) in path src to dst.
'''
src = expand_path(src)
dst = expand_path(dst)
for folder in os.listdir(src):
copy_src = os.path.join(src, folder)
copy_dst = os.path.join(dst, folder)
if os.path.isdir(copy_src):
copy_files(copy_src, copy_dst)
def load_config(path):
'''
Load the yaml config at path
'''
config = None
try:
expanded_path = expand_path(path)
with open(expanded_path, 'r') as fpr:
config = yaml.safe_load(fpr)
except:
log_dbg('can not load config: %s' % (path))
config = {}
return config
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
25064,
198,
11748,
11454,
3919,
198,
11748,
4423,
346,
198,
11748,
28686,
198,
11748,
12854,
1891,
198,
11748,
850,
14681,
198,
11748,
33245,
198,
11748,
15095,
198,
11748,
302,
1... | 2.137287 | 1,231 |
#!/usr/bin/env python
"""
This is the initialization module for the nori library's DBMS subsystem;
see ../__main__.py for license and usage information.
"""
########################################################################
# IMPORTS
########################################################################
#########
# system
#########
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from pprint import pprint as pp # for debugging
##################
# this subpackage
##################
#
# add all of this subpackage's submodules here
#
# use absolute imports (e.g., .dbms), and import *
#
from .dbms import *
from .mysql import *
from .postgresql import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
37811,
198,
1212,
318,
262,
37588,
8265,
329,
262,
4249,
72,
5888,
338,
20137,
5653,
39335,
26,
198,
3826,
11485,
14,
834,
12417,
834,
13,
9078,
329,
5964,
290,
8748,
1321,
13,
... | 3.661905 | 210 |
from hashlib import pbkdf2_hmac
| [
6738,
12234,
8019,
1330,
279,
65,
74,
7568,
17,
62,
71,
20285,
198
] | 2.461538 | 13 |
from data_collection.management.commands import BaseScotlandSpatialHubImporter
| [
6738,
1366,
62,
43681,
13,
27604,
13,
9503,
1746,
1330,
7308,
47230,
4561,
34961,
16066,
3546,
26634,
628
] | 4.444444 | 18 |
import numpy as np
import pandas as pd
import pytest
from .test_routing import add_line_sequence
from ..flows import (get_inflow_locations_from_parent_model, get_inflows_from_parent_model,
add_to_perioddata, add_to_segment_data)
from gisutils import shp2df
@pytest.fixture()
@pytest.mark.skip(reason="still working on this feature")
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
9288,
198,
198,
6738,
764,
9288,
62,
81,
13660,
1330,
751,
62,
1370,
62,
43167,
198,
6738,
11485,
44041,
1330,
357,
1136,
62,
259,
11125,
62,
1794... | 2.889831 | 118 |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from time import sleep
import mock
import pytest
import requests
from datadog_checks.dev import docker_run
from datadog_checks.dev.conditions import CheckEndpoints
from .common import (
CONFIG,
GITLAB_LOCAL_PORT,
GITLAB_LOCAL_PROMETHEUS_PORT,
GITLAB_PROMETHEUS_ENDPOINT,
GITLAB_TEST_PASSWORD,
GITLAB_URL,
HERE,
)
@pytest.fixture(scope="session")
def dd_environment():
"""
Spin up and initialize gitlab
"""
# specify couchbase container name
env = {
'GITLAB_TEST_PASSWORD': GITLAB_TEST_PASSWORD,
'GITLAB_LOCAL_PORT': str(GITLAB_LOCAL_PORT),
'GITLAB_LOCAL_PROMETHEUS_PORT': str(GITLAB_LOCAL_PROMETHEUS_PORT),
}
with docker_run(
compose_file=os.path.join(HERE, 'compose', 'docker-compose.yml'),
env_vars=env,
conditions=[CheckEndpoints(GITLAB_URL, attempts=200), CheckEndpoints(GITLAB_PROMETHEUS_ENDPOINT)],
):
# run pre-test commands
for _ in range(100):
requests.get(GITLAB_URL)
sleep(2)
yield CONFIG
@pytest.fixture()
| [
2,
357,
34,
8,
16092,
324,
519,
11,
3457,
13,
2864,
12,
25579,
198,
2,
1439,
2489,
10395,
198,
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
357,
3826,
38559,
24290,
8,
198,
198,
11748,
28686,
198,
6738,
640,
1330... | 2.254206 | 535 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-05-16 15:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
16,
319,
1584,
12,
2713,
12,
1433,
1315,
25,
940,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
19... | 2.724638 | 69 |
import os
import traceback
import logging
import asyncio
import argparse
from scripts import install
from bot import root_path
if __name__ == "__main__":
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
except KeyboardInterrupt:
pass
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
if install.WINDOWS:
os.system("pause")
| [
11748,
28686,
198,
11748,
12854,
1891,
198,
11748,
18931,
198,
11748,
30351,
952,
198,
11748,
1822,
29572,
198,
198,
6738,
14750,
1330,
2721,
198,
6738,
10214,
1330,
6808,
62,
6978,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
... | 2.570588 | 170 |
#!/usr/bin/env python
# Find fiducials in Q60
#
# davep 30-oct-2013
import sys
import numpy as np
import logging
import pickle
import math
import itertools
import Image
import ImageDraw
from scipy.cluster.vq import kmeans,vq
import scipy.ndimage.filters
#import matplotlib.pyplot as plt
import imtools
FIDUCIAL_WIDTH = 40
STATE_INIT=0
STATE_SEEK_NEGATIVE_EDGE=1 # large negative difference
STATE_SEEK_POSITIVE_EDGE=2 # large positive difference
STATE_SEEK_FLAT=3 # small or no difference
STATE_SEEK_POSITIVE_EDGE_2=4 # large positive difference
STATE_SEEK_NEGATIVE_EDGE_2=5 # large negative difference
STATE_SEEK_FLAT_2=6 # small or no difference
#STATE_SEEK_BLACK=10 # search for black line of fiducial
#STATE_SEEK_WHITE=11 # search for white line of fiducial
edge_threshold = 20
flat_threshold = 2
# for fid in filtered_fiducial_list :
# print fid["row"], fid["col"], fid["dist_prev"],fid["dist_next"],fid["group"]
# data = np.asarray([ (fid["row"],fid["col"]) for fid in filtered_fiducial_list ])
#
# # http://glowingpython.blogspot.com/2012/04/k-means-clustering-with-scipy.html
# # computing K-Means with K = 2 (2 clusters)
# centroids,_ = kmeans(data,2)
# # assign each sample to a cluster
# idx,_ = vq(data,centroids)
#
# print centroids
# plt.plot(data[idx==0,0],data[idx==0,1],'ob',
# data[idx==1,0],data[idx==1,1],'or',
# data[idx==2,0],data[idx==2,1],'og') # third cluster points
# plt.plot(centroids[:,0],centroids[:,1],'sm',markersize=8)
# plt.show()
if __name__=='__main__' :
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
9938,
49909,
1229,
8231,
287,
1195,
1899,
198,
2,
198,
2,
288,
1015,
79,
1542,
12,
38441,
12,
6390,
198,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
18... | 2.333824 | 680 |
import serial
import time
import os
import sys, getopt
import signal
import time
import pygame
import os
import pyttsx3
from DT import DT, DT_voice
from datetime import date
from Weather import Weather , Weather_voice
from RPi import GPIO
from gpiozero import Button
from time import sleep
from Media_player import Media_player as MP
from edge_impulse_linux.audio import AudioImpulseRunner
aion = Button(18)
runner = None
engine = pyttsx3.init()
engine.setProperty('voice','english_rp+f4')
signal.signal(signal.SIGINT, signal_handler)
if __name__ == '__main__':
MP("init", None, None)
main(sys.argv[1:])
| [
11748,
11389,
198,
11748,
640,
220,
198,
11748,
28686,
198,
11748,
25064,
11,
651,
8738,
198,
11748,
6737,
198,
11748,
640,
198,
11748,
12972,
6057,
198,
11748,
28686,
198,
11748,
12972,
83,
912,
87,
18,
198,
6738,
24311,
1330,
24311,
1... | 3.075 | 200 |
# Copyright 2017-2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main API for Authbox.
Your business logic should subclass BaseDispatcher and set up your peripherals
in its __init__ method. Most simple uses will use callbacks for everything.
See two_button.py as an example workflow.
Peripherals are kept in other files in this same package, and should be listed
in CLASS_REGISTRY so they can be loaded lazily.
"""
from __future__ import print_function
import sys
import threading
import traceback
import types
from authbox.compat import queue
from RPi import GPIO
# The line above simplifies imports for other modules that are already importing from api.
# TODO give each object a logger and use that instead of prints
CLASS_REGISTRY = [
"authbox.badgereader_hid_keystroking.HIDKeystrokingReader",
"authbox.badgereader_wiegand_gpio.WiegandGPIOReader",
"authbox.gpio_button.Button",
"authbox.gpio_relay.Relay",
"authbox.gpio_buzzer.Buzzer",
"authbox.timer.Timer",
]
# Add this to event_queue to request a graceful shutdown.
SHUTDOWN_SENTINEL = object()
class NoMatchingDevice(Exception):
"""Generic exception for missing devices."""
def split_escaped(s, glue=",", preserve=False):
"""Handle single-char escapes using backslash."""
buf = []
it = iter(s)
for c in it:
if c == glue:
yield "".join(buf)
del buf[:]
elif c == "\\":
if preserve:
buf.append(c)
c = next(it)
buf.append(c)
else:
buf.append(c)
if buf:
yield "".join(buf)
| [
2,
15069,
2177,
12,
7908,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
26... | 2.876 | 750 |
from .element import Element
class Sample(Element):
"""
Represents deui-examples output from computer program.
"""
| [
6738,
764,
30854,
1330,
11703,
628,
198,
4871,
27565,
7,
20180,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1432,
6629,
390,
9019,
12,
1069,
12629,
5072,
422,
3644,
1430,
13,
198,
220,
220,
220,
37227,
198
] | 3.307692 | 39 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 08:20:35 2016
@author: Gonçalo
"""
if __name__ is "__main__":
seq = raw_input('What is your seq?')
print 'Complement- ' + complement(seq)
print'Complement reverse- ' + complement_reverse(seq) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
5979,
220,
718,
8487,
25,
1238,
25,
2327,
1584,
198,
198,
31,
9800,
25,
35371,
16175,
7... | 2.574074 | 108 |
from ._apis import BaseApi, DataDragonApi, ChampionApiV3, ChampionMasteryApiV3, LeagueApiV3, LolStatusApiV3
from ._apis import MatchApiV3, SpectatorApiV3, SummonerApiV3, ThirdPartyCodeApiV3
from .Handlers import JsonifyHandler, ThrowOnErrorHandler, TypeCorrectorHandler
from .Handlers.RateLimit import RateLimitHandler
class RiotWatcher(object):
"""
RiotWatcher class is intended to be the main interaction point with the RiotAPI.
"""
def __init__(self, api_key, custom_handler_chain=None):
"""
Initialize a new instance of the RiotWatcher class.
:param string api_key: the API key to use for this instance
:param List[RequestHandler] custom_handler_chain:
RequestHandler chain to pass to the created BaseApi object.
This chain is called in order before any calls to the API, and called in
reverse order after any calls to the API.
If preview_request returns data, the rest of the call short circuits,
preventing any call to the real API and calling any handlers that have already
been run in reverse order.
This should allow for dynamic tiered caching of data.
If after_request returns data, that is the data that is fed to the next handler
in the chain.
Default chain is:
[
JsonifyHandler,
ThrowOnErrorHandler,
TypeCorrector,
RateLimitHandler
]
"""
if custom_handler_chain is None:
custom_handler_chain = [
JsonifyHandler(),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
RateLimitHandler(),
]
self._base_api = BaseApi(api_key, custom_handler_chain)
self._champion = ChampionApiV3(self._base_api)
self._champion_mastery = ChampionMasteryApiV3(self._base_api)
self._league = LeagueApiV3(self._base_api)
self._lol_status = LolStatusApiV3(self._base_api)
self._match = MatchApiV3(self._base_api)
self._spectator = SpectatorApiV3(self._base_api)
self._data_dragon = DataDragonApi(self._base_api)
self._summoner = SummonerApiV3(self._base_api)
self._third_party_code = ThirdPartyCodeApiV3(self._base_api)
# todo: tournament-stub
# todo: tournament
@property
def champion_mastery(self):
"""
Interface to the ChampionMastery Endpoint
:rtype: ChampionMasteryApiV3
"""
return self._champion_mastery
@property
def champion(self):
"""
Interface to the Champion Endpoint
:rtype: ChampionApiV3
"""
return self._champion
@property
def league(self):
"""
Interface to the League Endpoint
:rtype: LeagueApiV3
"""
return self._league
@property
def lol_status(self):
"""
Interface to the LoLStatus Endpoint
:rtype: LolStatusApiV3
"""
return self._lol_status
@property
def match(self):
"""
Interface to the Match Endpoint
:rtype: MatchApiV3
"""
return self._match
@property
def spectator(self):
"""
Interface to the Spectator Endpoint
:rtype: SpectatorApiV3
"""
return self._spectator
@property
def data_dragon(self):
"""
Interface to the DataDragon Endpoint
:rtype: DataDragonApi
"""
return self._data_dragon
@property
def summoner(self):
"""
Interface to the Summoner Endpoint
:rtype: SummonerApiV3
"""
return self._summoner
@property
def third_party_code(self):
"""
Interface to the Third Party Code Endpoint
:rtype: ThirdPartyCodeApiV3
"""
return self._third_party_code
| [
6738,
47540,
499,
271,
1330,
7308,
32,
14415,
11,
6060,
17808,
32,
14415,
11,
15869,
32,
14415,
53,
18,
11,
15869,
18254,
88,
32,
14415,
53,
18,
11,
4041,
32,
14415,
53,
18,
11,
44841,
19580,
32,
14415,
53,
18,
198,
6738,
47540,
4... | 2.17927 | 1,891 |
dx([[0, 17],[12, 1]], [[], [[5, 3], [12, 8]]])
dx([[0, 17],[12, 1]], [[[5, 3]], [[12, 8]]])
dx([[0, 10],[12, 8]], [[], [[5, 3], [12, 8]]])
dx([[0, 10],[12, 8]], [[[5, 3]], [[12, 8]]])
| [
198,
34350,
26933,
58,
15,
11,
1596,
38430,
1065,
11,
352,
60,
4357,
16410,
4357,
16410,
20,
11,
513,
4357,
685,
1065,
11,
807,
11907,
12962,
198,
34350,
26933,
58,
15,
11,
1596,
38430,
1065,
11,
352,
60,
4357,
16410,
58,
20,
11,
... | 1.754717 | 106 |
from .Pin import Pin
from .Pin_t import Pin_t
| [
6738,
764,
28348,
1330,
13727,
198,
6738,
764,
28348,
62,
83,
1330,
13727,
62,
83,
628
] | 2.9375 | 16 |