source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ml jobs delete command."""
from googlecloudsdk.api_lib.ml import operations
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml import flags
from googlecloudsdk.core import resources
class Delete(base.SilentCommand):
"""Delete a Cloud ML operation."""
@staticmethod
def Args(parser):
flags.OPERATION_NAME.AddToParser(parser)
def Run(self, args):
operation_ref = resources.REGISTRY.Parse(
args.operation, collection='ml.projects.operations')
return operations.OperationsClient().Delete(operation_ref)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | google-cloud-sdk/lib/surface/ml/operations/delete.py | KaranToor/MA450 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
utils_opts = [
cfg.StrOpt('rootwrap_config',
default="/etc/zun/rootwrap.conf",
help='Path to the rootwrap configuration file to use for '
'running commands as root.'),
cfg.StrOpt('auth_encryption_key',
secret=True,
default='notgood but just long enough i t',
help='Key used to encrypt authentication info in the '
'database. Length of this key must be 32 characters.'),
]
def register_opts(conf):
conf.register_opts(utils_opts)
def list_opts():
return {
"DEFAULT": utils_opts
}
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?... | 3 | zun/conf/utils.py | wanghuiict/zun |
from logging import Logger
import injector
__all__ = [
# module
"DownloadsInfrastructure",
]
from sqlify import Sqlite3Sqlify
from downloads import (
GetTotalFiles,
GetFileStatus,
GetBulkFileStatus,
FilesRepository,
DownloadRepository,
)
from downloads_infrastructure.queries import (
SqlGetTotalFiles,
SqlGetFileStatus,
SqlGetBulkFileStatus,
)
from downloads_infrastructure.repositories import (
SqlFilesRepository,
SmartDLDownloadRepository,
)
class DownloadsInfrastructure(injector.Module):
@injector.provider
def get_total_files(self, database: Sqlite3Sqlify) -> GetTotalFiles:
return SqlGetTotalFiles(database)
@injector.provider
def get_file_status(self, database: Sqlite3Sqlify) -> GetFileStatus:
return SqlGetFileStatus(database)
@injector.provider
def get_bulk_file_status(self, database: Sqlite3Sqlify) -> GetBulkFileStatus:
return SqlGetBulkFileStatus(database)
@injector.provider
def files_repo(self, database: Sqlite3Sqlify) -> FilesRepository:
return SqlFilesRepository(database)
@injector.provider
def download_repo(self, logger: Logger) -> DownloadRepository:
return SmartDLDownloadRepository(logger)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | manager/downloads_infrastructure/downloads_infrastructure/__init__.py | G4brym/download-manager |
from scfmsp.controlflowanalysis.instructions.AbstractInstructionTwoRegisters import AbstractInstructionTwoRegisters
class InstructionDadd(AbstractInstructionTwoRegisters):
name = 'dadd'
def get_execution_time(self):
return self.clock
def execute_judgment(self, ac):
super(InstructionDadd, self).execute_judgment(ac)
self._execute_judgment_carry_influence(ac)
self._execute_judgment_carry(ac)
self._execute_judgment_zero(ac)
self._execute_judgment_negative(ac)
self._execute_judgment_overflow(ac)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | scfmsp/controlflowanalysis/instructions/InstructionDadd.py | sepidehpouyan/SCF-MSP430 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:range
def assign(service, arg):
if service == "zhengfang":
return True, arg
def audit(arg):
url1 = arg + 'default_ysdx.aspx'
url2 = arg + 'default6.aspx'
url3 = arg + 'default5.aspx'
code1, head1, res1, errcode1,finalurl1 = curl.curl(url1)
code2, head2, res2, errcode2,finalurl2 = curl.curl(url2)
code3, head3, res3, errcode3,finalurl3 = curl.curl(url3)
if code1 == 200:
security_warning(url1 + '此处无验证码,账号可被爆破')
if code2 == 200:
security_warning(url2 + '此处无验证码,账号可被爆破')
if code3 == 200:
security_warning(url3 + '此处或许无验证码,账号可能被爆破')
if __name__ == '__main__':
from dummy import *
audit(assign('zhengfang', 'http://jwxt.nwu.edu.cn/(awqq1x45d0vtixv1nfk5zd45)/')[1])
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | Bugscan_exploits-master/exp_list/exp-390.py | csadsl/poc_exp |
import os
from os.path import join, dirname
from dotenv import load_dotenv
from urllib.parse import urlparse
# loading .env file
env_path = join(dirname(__file__), '.env')
load_dotenv(env_path)
# use function
def url_path_check(path):
sample_host = 'http://localhost'
sample_url = sample_host + path
if urlparse(sample_url) and urlparse(sample_url).path == path:
return path
return None
def number_check(num=None):
if isinstance(int(num), int):
return int(num)
return None
# Register Env Param
try:
API_AUTH_FEATURE = os.environ.get('API_AUTH_FEATURE', 'False').lower() in ('true') or False
DEFAULT_LANGUAGE = os.environ.get('DEFAULT_LANGUAGE') or 'ja'
VERSION = os.environ.get('VERSION') or '1.0.0'
SHOW_SWAGGER_PATH = url_path_check(os.environ.get('SHOW_SWAGGER_PATH') or "") or None
SHOW_REDOC_PATH = url_path_check(os.environ.get('SHOW_REDOC_PATH') or "") or None
SHOW_OPENAPI_PATH = url_path_check(os.environ.get('SHOW_OPENAPI_PATH')) or None
DB_HOST = os.environ.get('DB_HOST') or 'pgsql'
DB_PORT = number_check(os.environ.get('DB_PORT')) or 5432
DB_USER = os.environ.get('DB_USER') or 'postgres'
DB_PASSWORD = os.environ.get('DB_PASSWORD') or 'postgres'
DATABASE = os.environ.get('DATABASE') or 'postgres'
except Exception:
print("defined param error: check .env file")
raise | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | backend/config.py | takusan64/world-dictionary-backend |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from __future__ import print_function, unicode_literals
import cPickle as pickle
import voodoo.sessions.exc as SessionErrors
class SessionSerializer(object):
def serialize(self,sess_obj):
try:
sess_obj_serialized = pickle.dumps(sess_obj)
except (pickle.PickleError, TypeError) as pe:
raise SessionErrors.SessionNotSerializableError(
"Session object not serializable with pickle: %s" % pe,
pe
)
return "{pickle}".encode() + sess_obj_serialized
def deserialize(self,sess_obj_serialized):
if sess_obj_serialized.startswith("{pickle}".encode()):
sos = sess_obj_serialized[len("{pickle}".encode()):]
try:
deserialized = pickle.loads(sos)
except (pickle.PickleError, TypeError) as pe:
raise SessionErrors.SessionNotDeserializableError(
"Session object not deserializable with pickle: %s" % pe,
pe
)
return deserialized
else:
raise SessionErrors.SessionSerializationNotImplementedError(
"Session serialization not implemented"
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | server/src/voodoo/sessions/serializer.py | romainrossi/weblabdeusto |
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../")))
import v2.utils.utils as utils
import traceback
SSL_CERT_PATH = '/etc/ssl/certs/'
PEM_FILE_NAME = 'server.pem'
PEM_FILE_PATH = os.path.join(SSL_CERT_PATH,
PEM_FILE_NAME)
import logging
log = logging.getLogger()
def create_pem():
"""
creates a pem file.
Parameters:
Returns:
PEM_FILE_PATH : returns the pem file path
"""
try:
log.info('Creating pem file')
cmd = 'openssl req -x509 -newkey rsa:2048 -keyout server.key -out server.csr -days 365 -nodes ' \
'-subj "/C=IN/ST=KA/L=BLR/O=Carina Company/OU=Redhat/CN=*.ceph.redhat.com"'
out = utils.exec_shell_cmd(cmd)
if out is False:
raise Exception("Key file creation error")
log.info('output :%s' % out)
cmd2 = 'cat server.csr server.key > {pem_file_path}'.format(pem_file_path=PEM_FILE_PATH)
out2 = utils.exec_shell_cmd(cmd2)
if out2 is False:
raise Exception("Pem file generation error")
log.info('output :%s' % out2)
# # copy tmp_pem_file to PEM_FILE_PATH
# cmd3 = 'cp {tmp_pem_file} {pem_file_path}'.format(tmp_pem_file=PEM_FILE_NAME,
# pem_file_path=PEM_FILE_PATH)
# out3 = utils.exec_shell_cmd(cmd3)
# if out3 is False:
# raise Exception("Linux copy error")
# log.info('output :%s' % out3)
log.info('pem file created')
return PEM_FILE_PATH
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
sys.exit(1)
def check_pem_file_exists():
return os.path.exists(PEM_FILE_PATH)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | rgw/v2/lib/pem.py | rpratap-bot/ceph-qe-scripts |
import sys
import traceback
from titration.utils import analysis, constants, interfaces, routines
def run():
"""Main driver for the program. Initializes components and queries the user for next steps"""
try:
# initialize components
initialize_components()
# output prompt to LCD screen
routine_selection = "0"
page = 1
while routine_selection != "6" or routine_selection != constants.KEY_6:
if routine_selection is constants.KEY_STAR:
if page == 1:
page = 2
else:
page = 1
if page == 1:
interfaces.display_list(constants.ROUTINE_OPTIONS_1)
else:
interfaces.display_list(constants.ROUTINE_OPTIONS_2)
# wait for user input to select which routine (polling should be fine here)
routine_selection = interfaces.read_user_input()
routines.run_routine(routine_selection)
analysis.save_calibration_data()
interfaces.temperature_controller.deactivate()
interfaces.lcd_clear()
interfaces.ui_lcd.lcd_backlight(False)
except Exception:
# Deactivate the SSR if any crash occurs
if interfaces.temperature_controller is not None:
interfaces.temperature_controller.deactivate()
print("\nDeactivated SSR")
print(sys.exc_info()[0])
traceback.print_exc()
def initialize_components():
"""Initializes external interfaces and saved calibration data"""
analysis.setup_calibration()
interfaces.setup_interfaces()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | titration/utils/titration.py | Noah-Griffith/AlkalinityTitrator |
#!/usr/bin/env python3
from abc import ABCMeta, abstractmethod
# Given Book class
class Book(object, metaclass=ABCMeta):
def __init__(self,title,author):
self.title=title
self.author=author
@abstractmethod
def display():
pass
# MyBook class
class MyBook(Book):
price = 0
def __init__(self, title, author, price):
super(Book, self).__init__()
self.price = price
def display(self):
print("Title: " + title)
print("Author: " + author)
print("Price: " + str(price))
if __name__ == "__main__":
title = input()
author = input()
price = int(input())
new_novel = MyBook(title,author,price)
new_novel.display()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | day13.py | matthewmuccio/30DaysofCode |
from torch.nn.modules.module import Module
from torch.autograd import Function, Variable
import resample2d_cuda
class Resample2dFunction(Function):
@staticmethod
def forward(ctx, input1, input2, kernel_size=1, bilinear= True):
assert input1.is_contiguous()
assert input2.is_contiguous()
ctx.save_for_backward(input1, input2)
ctx.kernel_size = kernel_size
ctx.bilinear = bilinear
_, d, _, _ = input1.size()
b, _, h, w = input2.size()
output = input1.new(b, d, h, w).zero_()
resample2d_cuda.forward(input1, input2, output, kernel_size, bilinear)
return output
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
assert grad_output.is_contiguous()
input1, input2 = ctx.saved_tensors
grad_input1 = Variable(input1.new(input1.size()).zero_())
grad_input2 = Variable(input1.new(input2.size()).zero_())
resample2d_cuda.backward(input1, input2, grad_output.data,
grad_input1.data, grad_input2.data,
ctx.kernel_size, ctx.bilinear)
return grad_input1, grad_input2, None, None
class Resample2d(Module):
def __init__(self, kernel_size=1, bilinear = True):
super(Resample2d, self).__init__()
self.kernel_size = kernel_size
self.bilinear = bilinear
def forward(self, input1, input2):
input1_c = input1.contiguous()
return Resample2dFunction.apply(input1_c, input2, self.kernel_size, self.bilinear)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | networks/resample2d_package/resample2d.py | ryannggy/fn-colab |
import os
from google.cloud import firestore
from google.cloud import vision
def photo_analysis_service(event, context):
bucket = os.environ.get('BUCKET', 'my-bmd-bucket')
file_name = event['name']
objects = _analyze_photo(bucket, file_name)
_store_results(bucket, file_name, objects)
def _analyze_photo(bucket, file_name):
client = vision.ImageAnnotatorClient()
image = vision.Image(source=vision.ImageSource(
image_uri=f'gs://{bucket}/{file_name}'))
objects = client.object_localization(
image=image).localized_object_annotations
return objects
def _store_results(bucket, file_name, objects):
db = firestore.Client()
for object_ in objects:
db.collection(u'tags').document(object_.name.lower()).set(
{u'photo_urls': firestore.ArrayUnion(
[u'https://storage.googleapis.com/{}/{}'.format(
bucket, file_name)]
)
},
merge=True)
print('\n{} (confidence: {})'.format(object_.name, object_.score))
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | function/main.py | verobega/google-photos-clon |
# Standard library imports
import json
import threading
import logging
# Third party imports
from kafka import KafkaConsumer
# Local application imports
from Config.config import KafkaTopicNames, KafkaConfig, KafkaGroupIds
from Module.kafkaProducer.elasticSendProducer import ElasticSendProducers
class NessusOutputConsumers(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.consumer = KafkaConsumer(
KafkaTopicNames.NESSUSOUTPUT,
bootstrap_servers = KafkaConfig.BOOTSTRAPSERVER,
auto_offset_reset = 'earliest',
enable_auto_commit = False,
group_id = KafkaGroupIds.NESSUSOUTPUT,
max_poll_records = 1,
value_deserializer = lambda x: json.loads(x.decode('utf-8')))
self.logger = logging.getLogger(__name__)
def run(self):
try:
while True:
msg_pack = self.consumer.poll(timeout_ms=2000)
for message in msg_pack.items():
for consumer_record in message[1]:
data = consumer_record.value
self.logger.info('Recieved {}'.format(data.get("target")))
elasticData = self.parseToElasticData(data)
elasticSendProducer = ElasticSendProducers()
elasticSendProducer.sendDataToQueue(elasticData)
self.consumer.commit()
except:
self.logger.exception("Thread " + __name__ + " terminated")
def parseToElasticData(self, data):
result = data
result["elastic_type"] = "nessusOutput"
return result | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | Module/kafkaConsumer/nessusOutputConsumer.py | vietnakid/VulnerabilityManagement |
TAM_MAX_CH = 26
def recebeModo():
"""
Função que pergunta se o usuário quer criptografar ou
decriptografar e garante que uma entrada válida foi recebida
"""
while True:
modo = input("Você deseja criptografar ou decriptografar?\n").lower()
if modo in 'criptografar c decriptografar d'.split():
return modo
else:
print("Entre 'criptografar' ou 'c' ou 'decriptografar' ou 'd'.")
def recebeChave():
"""
Função que pede o valor da chave para o usuário
e devolve a chave caso o valor desta esteja adequado
"""
global TAM_MAX_CH
chave = 0
while True:
chave = int(input('Entre o número da chave (1-%s)\n'%(TAM_MAX_CH)))
if 1 <= chave <= TAM_MAX_CH:
return chave
def geraMsgTraduzida(modo, mensagem, chave):
"""
Traduz a mensagem do usuário de modo conveniente
"""
if modo[0] == 'd':
chave *= -1
traduzido = ''
for simbolo in mensagem:
if simbolo.isalpha():
num = ord(simbolo)
num += chave
if simbolo.isupper():
if num > ord('Z'):
num -= 26
elif num < ord('A'):
num += 26
elif simbolo.islower():
if num > ord('z'):
num -= 26
elif num < ord('a'):
num += 26
traduzido += chr(num)
else:
traduzido += simbolo
return traduzido
def main():
"""
Função principal do programa
"""
modo = recebeModo()
mensagem = input("Entre sua mensagem\n")
chave = recebeChave()
print("Seu texto traduzido é:")
print(geraMsgTraduzida(modo, mensagem, chave))
main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exclud... | 3 | Conteudo das Aulas/048/Cifra de Cesar - Gabarito.py | cerberus707/lab-python |
from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode
class AndImmediate(AbstractOpcode):
def __init__(self, setflags, d, n, imm32, carry):
super(AndImmediate, self).__init__()
self.setflags = setflags
self.d = d
self.n = n
self.imm32 = imm32
self.carry = carry
def execute(self, processor):
if processor.condition_passed():
result = processor.registers.get(self.n) & self.imm32
if self.d == 15:
processor.alu_write_pc(result)
else:
processor.registers.set(self.d, result)
if self.setflags:
processor.registers.cpsr.set_n(result[0])
processor.registers.cpsr.set_z(result.all(False))
processor.registers.cpsr.set_c(self.carry)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherit... | 3 | armulator/armv6/opcodes/abstract_opcodes/and_immediate.py | matan1008/armulator |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import did_pb2 as did__pb2
class DidStub(object):
"""The did sidechain service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Sign = channel.unary_unary(
'/did.Did/Sign',
request_serializer=did__pb2.ApiRequest.SerializeToString,
response_deserializer=did__pb2.ApiResponse.FromString,
)
class DidServicer(object):
"""The did sidechain service definition.
"""
def Sign(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DidServicer_to_server(servicer, server):
rpc_method_handlers = {
'Sign': grpc.unary_unary_rpc_method_handler(
servicer.Sign,
request_deserializer=did__pb2.ApiRequest.FromString,
response_serializer=did__pb2.ApiResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'did.Did', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?... | 3 | grpc_adenine/stubs/did_pb2_grpc.py | rahulguna/GRPC_python |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
from copy import deepcopy
from textprocess import Word
class IndirectOrderModifier():
'''
Class to make a sentence in indirect order
'''
def __init__(self, pivot):
'''
Params
---
pivot: is the index that nominal and verbal parts
'''
self.pivot = pivot
def modify_sentence(self, words):
'''
Return a text in indirect order
Params
---
words: a list of objects 'Word'
'''
aux = deepcopy(words)
pu = Word("","")
start = aux[self.pivot:]
end = aux[:self.pivot]
if start[-1].word in ".?!":
pu = start[-1]
start = start[:-1]
if start[0].word[0].isalpha():
lst = list(start[0].word)
lst[0] = lst[0].upper()
start[0].word = "".join(lst)
if end[0].word_class != "NPROP":
lst = list(end[0].word)
lst[0] = lst[0].lower()
end[0].word = "".join(lst)
return start + end + [pu]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | src/server/textprocess/modifiers/indirectordermodifier.py | Vnicius/fact-check |
"""Test the date time method"""
from datetime import datetime
from api.utilities.helpers.date_time import date_time
fmt = '%Y-%m-%d %H:%M:%S'
fmt1 = '%Y-%m-%d %H:%M:%S %Z%z'
class TestDateTime:
"""Tests the custom datetime class"""
def test_the_date_time_method_succeeds(self):
"""Tests the date_time.date_time method"""
date = date_time.date_time(2018, 12, 6, 8, 54, 32, format_=fmt1)
assert str(date) == '2018-12-06 09:54:32 WAT+0100'
def test_the_time_method_with_sub_succeeds(self):
"""Tests the date_time.time method"""
date = date_time.time(
manipulate=True, manipulation_type='SUB', format_=fmt, hours=1)
dt = round(datetime.strptime(date, fmt).timestamp())
assert dt is not None
def test_the_time_method_with_add_succeeds(self):
"""Tests the date_time.time method"""
date = date_time.time(
manipulate=True, manipulation_type='ADD', format_=fmt, hours=1)
dt = round(datetime.strptime(date, fmt).timestamp())
assert dt is not None
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": false
},... | 3 | tests/utilities/helpers/test_date_time.py | Meeqan/trisixty-buys-API |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_parent(parent: str) -> str:
parent = parent
return parent
def make_batch_prediction_job(
display_name: str,
model_name: str,
gcs_source_uri: str,
gcs_destination_output_uri_prefix: str,
) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob:
model_parameters_dict = {}
model_parameters = to_protobuf_value(model_parameters_dict)
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"model_parameters": model_parameters,
"input_config": {
"instances_format": "jsonl",
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": "jsonl",
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
}
return batch_prediction_job
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 2... | 3 | .sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py | dizcology/python-aiplatform |
import os
import sys
from Crypto.Cipher import AES
from secret import FLAG
iv = os.urandom(16)
key = os.urandom(32)
auth = os.urandom(16)
def encrypt():
plaintext = input('Input a string to encrypt: ')
if len(plaintext) < len(FLAG[0]):
sys.exit(1)
cipher = AES.new(key, AES.MODE_GCM, nonce=iv).update(auth)
print(f'ciphertext: {cipher.encrypt(plaintext.encode()).hex()}')
print(f'tag: {cipher.digest().hex()}')
def decrypt():
ciphertext = bytes.fromhex(input('ciphertext: '))
tag = bytes.fromhex(input('tag: '))
try:
cipher = AES.new(key, AES.MODE_GCM, nonce=iv).update(auth)
plaintext = cipher.decrypt_and_verify(ciphertext, tag).decode()
except ValueError:
print('Decryption failed :(')
return
if plaintext == FLAG[0]:
print(f'Decrypt one flag and get one free, flag: {FLAG[1]}')
print('[FLAG] forbidden-attack', file=sys.stderr)
else:
print(f'Here is your decrypted string: {plaintext}')
menu = {
'encrypt': encrypt,
'decrypt': decrypt,
'server.py': lambda: print(open('./server.py', 'r').read()),
'exit': lambda: sys.exit(1)
}
if __name__ == '__main__':
print(f'flag: {AES.new(key, AES.MODE_GCM, nonce=iv).encrypt_and_digest(FLAG[0].encode())[0].hex()}')
for _ in range(10):
print('> encrypt')
print('> decrypt')
print('> server.py')
print('> exit')
choice = input('> Command: ')
if not menu.get(choice):
print('Not a valid choice...')
sys.exit(1)
menu.get(choice)()
sys.exit(1)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | Symmetric/GCM-mode/forbidden-attack/server.py | killua4564/Symmetric |
# coding: utf-8
"""
mParticle
mParticle Event API
OpenAPI spec version: 1.0.1
Contact: support@mparticle.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import mparticle
from mparticle.rest import ApiException
from mparticle.models.push_message_event import PushMessageEvent
class TestPushMessageEvent(unittest.TestCase):
""" PushMessageEvent unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testPushMessageEvent(self):
"""
Test PushMessageEvent
"""
model = mparticle.models.push_message_event.PushMessageEvent()
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"a... | 3 | test/test_push_message_event.py | juviasuisei/mparticle-python-sdk |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_subject_access_review_spec import V1beta1SubjectAccessReviewSpec
class TestV1beta1SubjectAccessReviewSpec(unittest.TestCase):
""" V1beta1SubjectAccessReviewSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1SubjectAccessReviewSpec(self):
"""
Test V1beta1SubjectAccessReviewSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_subject_access_review_spec.V1beta1SubjectAccessReviewSpec()
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"ans... | 3 | kubernetes/test/test_v1beta1_subject_access_review_spec.py | kevingessner/python |
import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerWindowOpenV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_xyz': obs[:3],
'wndw_xyz': obs[3:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_pow': 3
})
action['delta_pos'] = move(o_d['hand_xyz'], to_xyz=self._desired_xyz(o_d), p=25.)
action['grab_pow'] = 1.
return action.array
@staticmethod
def _desired_xyz(o_d):
pos_curr = o_d['hand_xyz']
pos_wndw = o_d['wndw_xyz']
pos_wndw += np.array([-0.03, -0.03, -0.1])
if np.linalg.norm(pos_curr[:2] - pos_wndw[:2]) > 0.04:
return pos_wndw + np.array([0., 0., 0.3])
elif abs(pos_curr[2] - pos_wndw[2]) > 0.02:
return pos_wndw
else:
return pos_wndw + np.array([0.1, 0., 0.])
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherita... | 3 | metaworld/policies/sawyer_window_open_v2_policy.py | pkol/metaworld |
from gzip import decompress
from http import cookiejar
from json import loads, dumps
from os import environ
from time import strftime, gmtime
from urllib import request
def get_url(ticker):
env = environ.get('FLASK_ENV', 'development')
if env == 'development':
url = 'https://www.fundamentus.com.br/amline/cot_hist.php?papel='
else:
phproxy = 'http://shortbushash.com/proxy.php'
url = phproxy + '?q=https%3A%2F%2Fwww.fundamentus.com.br%2Famline%2Fcot_hist.php%3Fpapel%3D'
return url + ticker + '&hl=1a7', env
def build_headers(url, env):
if env == 'development':
headers = [
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'),
('Referer', url),
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'),
]
else:
headers = [
('Accept', 'application/json, text/javascript, */*; q=0.01'),
('Accept-Encoding', 'gzip, deflate, br'),
('Referer', url),
('User-Agent', 'PostmanRuntime/7.26.8'),
]
return headers
def parse_epoch_time(parsed_content):
return [[strftime('%Y-%m-%d', gmtime(unix_epoch_time/1000)), price] for [unix_epoch_time, price] in parsed_content]
def load_prices(ticker, parse_json=True):
url, env = get_url(ticker)
cookie_jar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookie_jar))
opener.addheaders = build_headers(url, env)
with opener.open(url) as link:
gzip_response = link.read()
binary_response = gzip_response.decode() if env == 'development' else decompress(gzip_response)
parsed_content = loads(binary_response)
content = parse_epoch_time(parsed_content)
return dumps(content) if parse_json else content
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | historical_prices.py | pedroeml/stock-projection-service |
import os
import unittest
from contextlib import redirect_stdout, redirect_stderr
from io import StringIO
from pyshex.shex_evaluator import evaluate_cli
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))
validation_dir = os.path.join(data_dir, 'validation')
rdffile = os.path.join(validation_dir, 'simple.ttl')
shexfile = os.path.join(validation_dir, 'simple.shex')
class Issue25TestCase(unittest.TestCase):
def test_nostart(self):
outf = StringIO()
with(redirect_stdout(outf)):
evaluate_cli(f"{rdffile} {shexfile} -A".split())
self.assertEqual("""Errors:
Focus: None
Start: None
Reason: START node is not specified""", outf.getvalue().strip())
def test_all_nodes(self):
outf = StringIO()
with(redirect_stderr(outf)):
evaluate_cli(f"{rdffile} {shexfile} -s http://example.org/shapes/S".split())
self.assertEqual('Error: You must specify one or more graph focus nodes, supply a SPARQL query, '
'or use the "-A" option',
outf.getvalue().strip())
outf = StringIO()
with(redirect_stdout(outf)):
evaluate_cli(f"{rdffile} {shexfile} -A -s http://example.org/shapes/S".split())
self.assertEqual("""Errors:
Focus: http://a.example/s1
Start: http://example.org/shapes/S
Reason: Testing :s1 against shape http://example.org/shapes/S
No matching triples found for predicate :s4
Focus: http://a.example/s2
Start: http://example.org/shapes/S
Reason: Testing :s2 against shape http://example.org/shapes/S
No matching triples found for predicate :s4
Focus: http://a.example/s3
Start: http://example.org/shapes/S
Reason: Testing :s3 against shape http://example.org/shapes/S
No matching triples found for predicate :s4""", outf.getvalue().strip())
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": fals... | 3 | tests/test_issues/test_issue_25.py | cmungall/PyShEx |
from direct.directnotify import DirectNotifyGlobal
import RingTrack
class RingAction:
notify = DirectNotifyGlobal.directNotify.newCategory('RingAction')
def __init__(self):
pass
def eval(self, t):
return (0, 0)
class RingActionStaticPos(RingAction):
def __init__(self, pos):
RingAction.__init__(self)
self.__pos = pos
def eval(self, t):
return self.__pos
class RingActionFunction(RingAction):
def __init__(self, func, args):
RingAction.__init__(self)
self.__func = func
self.__args = args
def eval(self, t):
return self.__func(t, *self.__args)
class RingActionRingTrack(RingAction):
def __init__(self, ringTrack):
RingAction.__init__(self)
self.__track = ringTrack
def eval(self, t):
return self.__track.eval(t)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?"... | 3 | toontown/minigame/RingAction.py | AnonymousDeveloper65535/open-toontown |
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.front.caffe.extractors.inner_product import inner_product_ext
from mo.front.common.partial_infer.inner_product import caffe_inner_product
from mo.utils.unittest.extractors import FakeMultiParam, FakeModelLayer
class FakeProtoLayer:
def __init__(self, val):
self.inner_product_param = val
class TestInnerProduct(unittest.TestCase):
def test_inner_product_ext(self):
params = {
'num_output': 10,
'bias_term': True
}
mean_blob = np.array([1., 2.])
variance_blob = np.array([3., 4.])
blobs = [mean_blob, variance_blob]
res = inner_product_ext(FakeProtoLayer(FakeMultiParam(params)),
FakeModelLayer(blobs))
exp_res = {
'type': 'FullyConnected',
'out-size': 10,
'infer': caffe_inner_product,
'weights': mean_blob,
'biases': variance_blob,
'embedded_inputs': [
(1, 'weights', {
'bin': 'weights'
}),
(2, 'biases', {
'bin': 'biases'
})
]
}
for i in exp_res:
if i in ('weights', 'biases'):
np.testing.assert_array_equal(res[i], exp_res[i])
else:
self.assertEqual(res[i], exp_res[i])
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
... | 3 | model-optimizer/mo/front/caffe/extractors/inner_product_test.py | shinh/dldt |
import json
from os.path import abspath, dirname, join
from datetime import datetime
import tomlkit
SYNTAX_DIR = abspath(dirname(join("syntaxes", __name__)))
SYNTAXES = ["django-html", "django-txt"]
def load(path):
with open(path) as f:
return tomlkit.loads(f.read())
tomlkit.load = load
def build_file(syntax):
data = tomlkit.load(join(SYNTAX_DIR, f"{syntax}.toml"))
data.add("_comment", "Generated by: poetry run syntax")
for name in data["repositories"]:
repo = tomlkit.load(join(SYNTAX_DIR, "repositories", f"{name}.toml"))
data["repository"][name] = repo
data.remove("repositories")
with open(f"syntaxes/{syntax}.tmLanguage.json", "w") as f:
json.dump(data, f, indent=2)
def main():
for syntax in SYNTAXES:
build_file(syntax)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | .vscode/extensions/batisteo.vscode-django-0.20.0/syntaxes/build.py | jaeyholic/portfolio-v2 |
# Copyright 2022 highstreet technologies GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
"""
Module containing the Generator class.
"""
from model.python.tapi_common_context import TapiCommonContext
class TopologyGenerator:
"""
Class containing all methods to generate a TAPI topology.
The generation process is influenced by a configuration in json format.
"""
__configuration: dict = {}
# constructor
def __init__(self, configuration: dict):
self.__configuration = configuration
# getters
def configuration(self) -> dict:
"""
Getter returning the object configuration
:return A TopologyGenerator configuration
"""
return self.__configuration
# returns a JSON serializable object
def generate(self) -> TapiCommonContext:
"""
Method to start the generation process.
:return The TapiCommonContext object.
"""
return TapiCommonContext(self.configuration())
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | controller/network_generator.py | demx8as6/network-topology-instance-generator |
from __future__ import print_function
from flask import Flask, Response
from pyzbar import pyzbar
from picamera.array import PiRGBArray
from picamera import PiCamera
from datetime import datetime
import numpy as np
import cv2
import time
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
app = Flask(__name__)
@app.route('/stream')
def stream():
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def gen():
while True:
frame = get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def get_frame():
camera.capture(rawCapture, format="bgr", use_video_port=True)
frame = rawCapture.array
decoded_objs = decode(frame)
frame = display(frame, decoded_objs)
ret, jpeg = cv2.imencode('.jpg', frame)
rawCapture.truncate(0)
return jpeg.tobytes()
def decode(frame):
decoded_objs = pyzbar.decode(frame, scan_locations=True)
for obj in decoded_objs:
print(datetime.now().strftime('%H:%M:%S.%f'))
print('Type: ', obj.type)
print('Data: ', obj.data)
return decoded_objs
def display(frame, decoded_objs):
for decoded_obj in decoded_objs:
left, top, width, height = decoded_obj.rect
frame = cv2.rectangle(frame,
(left, top),
(left + width, height + top),
(0, 255, 255), 2)
return frame
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=False, threaded=True)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | main.py | shihu/qr-reader |
import unittest
from cupy import testing
@testing.gpu
class TestBasic(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_copyto(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_copyto_dtype(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype='?')
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_copyto_broadcast(self, xp, dtype):
a = testing.shaped_arange((3, 1), xp, dtype)
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_copyto_where(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_reverse_arange((2, 3, 4), xp, dtype)
c = testing.shaped_arange((2, 3, 4), xp, '?')
xp.copyto(a, b, where=c)
return a
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | tests/cupy_tests/manipulation_tests/test_basic.py | ytoyama/yans_chainer_hackathon |
from __future__ import absolute_import
from .Node import Op
from .._base import DNNL_LIB
from ..cpu_links import matrix_elementwise_add_by_const as cpu_matrix_elementwise_add_by_const
from ..gpu_links import matrix_elementwise_add_by_const
class AddByConstOp(Op):
def __init__(self, node_A, const_val, ctx=None):
super().__init__(AddByConstOp, [node_A], ctx)
self.const_attr = const_val
@property
def desc(self):
return self.name + '(%s, %s)' % (self.inputs[0].name, str(self.const_attr))
def compute(self, input_vals, output_val, stream_handle=None):
if self.on_cpu:
if DNNL_LIB['DnnlMatrixElementwiseAddByConst']:
cpu_matrix_elementwise_add_by_const(
input_vals[0], self.const_attr, output_val)
else:
output_val[:] = input_vals[0].asnumpy() + self.const_attr
else:
matrix_elementwise_add_by_const(
input_vals[0], self.const_attr, output_val, stream_handle)
def gradient(self, output_grad):
return [output_grad]
def infer_shape(self, input_shapes):
assert len(input_shapes) == 1
return input_shapes[0]
def addbyconst_op(node, const_val, ctx=None):
"""Make a new instance of AddByConstOp and call the instance.
Parameters:
----
node : Node
The Node to be added.
const_val : scalar value
The constant value to be added.
Returns:
----
A new Node instance created by Op.
"""
return AddByConstOp(node, const_val, ctx=ctx)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | python/hetu/gpu_ops/AddConst.py | HugoZHL/Hetu |
###############################################################################
#
# Test cases for xlsxwriter.lua.
#
# Copyright 2014-2015, John McNamara, jmcnamara@cpan.org
#
import base_test_class
class TestCompareXLSXFiles(base_test_class.XLSXBaseTest):
"""
Test file created with xlsxwriter.lua against a file created by Excel.
These tests check cell protection.
"""
def test_protect01(self):
self.run_lua_test('test_protect01')
def test_protect02(self):
self.run_lua_test('test_protect02')
def test_protect03(self):
self.run_lua_test('test_protect03')
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",... | 3 | test/comparison/test_protect.py | moteus/xlsxwriter.lua |
import tensorflow as tf
def make_weights(shape, name='weights'):
return tf.Variable(tf.truncated_normal(shape=shape, stddev=0.05), name=name)
def make_biases(shape, name='biases'):
return tf.Variable(tf.constant(0.05, shape=shape), name=name)
def convolution_layer(prev_layer, f_size, inp_c, out_c, stride_s):
_weights = make_weights([f_size, f_size, inp_c, out_c])
_bias = make_biases([out_c])
return tf.add(tf.nn.conv2d(prev_layer, _weights, [1, stride_s, stride_s, 1], padding='SAME'), _bias)
def pool_layer(prev_layer, size, stride_s):
kernel = [1, size, size, 1]
stride = [1, stride_s, stride_s, 1]
return tf.nn.max_pool(prev_layer, kernel, stride, padding='SAME')
def activation_layer(prev_layer, type):
if type == 'relu':
return tf.nn.relu(prev_layer)
else:
raise NotImplemented('unsupported activation type')
def flat_layer(inp):
input_size = inp.get_shape().as_list()
if len(input_size) != 4:
raise NotImplemented('flat layer unsupported for input with dim != 4')
output_size = input_size[-1] * input_size[-2] * input_size[-3]
return tf.reshape(inp, [-1, output_size]), output_size
def fc_layer(prev_layer, h_in, h_out):
_weights = make_weights([h_in, h_out])
_bias = make_biases([h_out])
return tf.add(tf.matmul(prev_layer, _weights), _bias)
def dropout_layer(prev_layer, prob):
return tf.nn.dropout(prev_layer, prob)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | tensorflow/mycode/src/tf_layer_utils.py | christinazavou/O-CNN |
from abc import ABC, abstractmethod
from enum import Enum
class ALGO(Enum):
ImageMatching = 1
SemanticSegmentation = 2
class SensorAbstractClass(ABC):
"""
Abstract class which all sensor classes should inherit.
"""
def __init__(self):
"""
Initializing the basic data of the class
and sensor
"""
super().__init__()
self.sensor_name = str
self.sensor_data = []
self.algo = ALGO.ImageMatching
@abstractmethod
def set_algorithm(self,in_algo):
"""
Setting the algorithm to be used on the
data of the sensor
:return: None
"""
pass
@abstractmethod
def run_sensor(self):
"""
Method to extract data from the sensor ,
preprocess the result run any algorithm
on the sensor data.
:return: None
"""
pass
@abstractmethod
def get_results(self):
"""
Returns the processed data of the sensor.
:return: list of results
"""
pass
@abstractmethod
def __str__(self):
"""
Return the details like name , sensor ID etc
:return: string
"""
pass
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | ast_project_2018/src/sensors/sensor_abstract_class.py | njanirudh/HBRS-AST-WS18 |
# coding: utf-8
import yaml
from unittest import TestCase, main
from hamcrest import assert_that, equal_to, starts_with
from pattern_matcher.interface import Interface
class MakeInterface(TestCase):
def test_str_should_succeed(self):
assert_that(str(Interface()), starts_with('interface anonymous_'))
assert_that(str(Interface('A')), equal_to('interface A'))
def test_repr_should_succeed(self):
assert_that(repr(Interface('A')), equal_to("Interface('A')"))
def test_dump_and_load_yaml_recursive_interface_should_succeed(self):
interface = Interface('a')
interface.suppliers = [interface]
data = (
"&id001 !Interface\n"
"name: a\n"
"suppliers:\n"
"- *id001\n"
)
assert_that(yaml.dump(interface), equal_to(data))
assert_that(yaml.load(data), equal_to(interface))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | src/match_pattern/pattern_matcher/test/interface.py | elsid/master |
import unittest
from odesli.Odesli import Odesli
from odesli.entity.song.Song import Song
EXPECTED_YOUTUBE_SONG = Song('VHb_XIql_gU', 'youtube', 'Kids', 'MGMT - Topic', 'https://i.ytimg.com/vi/VHb_XIql_gU/hqdefault.jpg', 480, 360, { 'youtube': 'https://www.youtube.com/watch?v=VHb_XIql_gU', 'youtubeMusic': 'https://music.youtube.com/watch?v=VHb_XIql_gU' })
class TestSong(unittest.TestCase):
def check(self, result):
self.assertEqual(result.songLink, 'https://song.link/s/1jJci4qxiYcOHhQR247rEU')
self.assertEqual(result.song.getType(), 'song')
self.assertEqual(result.song.provider, 'spotify')
self.assertEqual(result.songsByProvider['youtube'], EXPECTED_YOUTUBE_SONG)
def test_ByUrl(self):
o = Odesli()
song = o.getByUrl('https://open.spotify.com/track/1jJci4qxiYcOHhQR247rEU')
self.check(song)
def test_ById(self):
o = Odesli()
song = o.getById('1jJci4qxiYcOHhQR247rEU', 'spotify', 'song')
self.check(song)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | tests/test_TestSong.py | fabian-thomas/python-odesli |
from abc import ABC
class ILogger(ABC):
def __repr__(self):
return f'{self.__class__.__name__}()'
class ICommProtocol(ABC):
def __init__(self, logger: ILogger):
self._logger = logger
def __repr__(self):
return f'{self.__class__.__name__}({self._logger})'
class ICommunicator(ABC):
def __init__(self, comm_protocol: ICommProtocol, logger: ILogger):
self._comm_protocol = comm_protocol
self._logger = logger
def __repr__(self):
return f'{self.__class__.__name__}({self._comm_protocol}, {self._logger})'
class IDatabase(ABC):
def __init__(self, database_url: str, logger: ILogger):
self._database_url = database_url
self._logger = logger
def __repr__(self):
return f'{self.__class__.__name__}({self._database_url}, {self._logger})'
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | example/interfaces.py | tomer/di_container |
"""
API user class
"""
class APIUser:
"""
Information about an api user.
"""
def __init__(self, token, username, email, user_id):
self.token = token
self.username = username
self.email = email
self.is_admin = False
self.clockify_id = user_id
def get_token(self):
"""
returns token
"""
return self.token
def match(self, email, username):
"""
Returns if this user matches email OR username
"""
if email is not None and email == self.email:
return True
if username is not None and username == self.username:
return True
return False
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | converter/clockify/api_user.py | perlexed/toggl2clockify |
import numpy
import torch
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.attention.attention import Attention
from numpy.testing import assert_almost_equal
from seq2rel.modules.attention.multihead_attention import MultiheadAttention
class TestMultiheadAttention(AllenNlpTestCase):
def test_can_init_multihead(self):
legacy_attention = Attention.from_params(
Params({"type": "multihead_attention", "embed_dim": 4, "num_heads": 2})
)
isinstance(legacy_attention, MultiheadAttention)
def test_multihead_similarity(self):
attn = MultiheadAttention(embed_dim=4, num_heads=2)
vector = torch.FloatTensor([[0, 0, 0, 0], [1, 1, 1, 1]])
matrix = torch.FloatTensor(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]
)
with torch.no_grad():
output = attn(vector, matrix)
assert_almost_equal(
output.sum(dim=-1).numpy(),
numpy.array([1.0, 1.0]),
decimal=2,
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | tests/modules/attention/test_multihead_attention.py | JohnGiorgi/seq2rel |
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("default_group")
@pytest.mark.parametrize(
"pkg",
[
"epics-dev",
],
)
def test_default_pkgs(host, pkg):
package = host.package(pkg)
assert package.is_installed
@pytest.mark.parametrize(
"tool",
[
"caget",
"caput",
],
)
def test_default_tools(host, tool):
try:
cmd = host.find_command(tool)
print("{} tool found in {}".format(tool, cmd))
except ValueError:
raise AssertionError()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | roles/lnls-ans-role-epics/molecule/default/tests/test_default.py | lerwys/lnls-ans-role-users |
from __future__ import print_function
import json
import sys
import os
from . import TornadoScheduledReporter
class TornadoStreamReporter(TornadoScheduledReporter):
"""Writes JSON serialized metrics to a stream using an ``IOLoop`` for scheduling"""
def __init__(self, interval, stream=sys.stdout, registry=None, io_loop=None):
"""
:param interval: a timedelta
:param stream: the stream to write to, defaults to stdout
:param registry: the registry to report from, defaults to stdout
:param io_loop: the IOLoop to use, defaults to ``IOLoop.current()``
"""
super(TornadoStreamReporter, self).__init__(interval, registry, io_loop)
self.stream = stream
def report(self):
json.dump(self.registry.get_stats(), self.stream)
self.stream.write(os.linesep)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | tapes/reporting/tornado/stream.py | emilssolmanis/tapes |
import time
import threading
import asyncio
from typing import Optional
from sarge import run # type: ignore
def wait_respond(url: str, timeout: Optional[float] = None, retry_interval: float = 1):
test = f"curl --output /dev/null -k --silent --fail {url}"
stop_event = threading.Event()
def test_func():
while not stop_event.is_set() and run(test).returncode != 0:
time.sleep(retry_interval)
t = threading.Thread(target=test_func)
t.start()
t.join(timeout=timeout)
timedout = t.is_alive()
stop_event.set()
if timedout:
raise asyncio.TimeoutError(
f"service {url} does not answer before {timeout} seconds"
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | metis_lib/service.py | CS-METIS/minimetis |
from collections import deque
class Vertex:
def __init__(self,value):
self.value = value
class Edge:
def __init__(self,vertex,weight):
self.vertex = vertex
self.weight = weight
class Queue:
def __init__(self):
self.dq = deque()
def enqueue(self, value):
self.dq.appendleft(value)
def dequeue(self):
return self.dq.pop()
def __len__(self):
return len(self.dq)
class Graph:
def __init__(self):
self._adjacency_list = {}
def add_node(self, value):
node = Vertex(value)
self._adjacency_list[node] = []
return node
def size(self):
return len(self._adjacency_list)
def add_edge(self, start_node, end_node, weight=1):
if start_node not in self.adjacency_list:
raise KeyError('does not exist.')
if end_node not in self.adjacency_list:
raise KeyError('does not exist.')
adjacencies = self.adjacency_list[start_node]
adjacencies.append((end_node, weight))
def get_nodes(self):
return self._adjacency_list.keys()
def get_neighbors(self, vertex):
return self._adjacency_list.get(vertex, [])
def breadth_first_search(self, start_vertex, action=(lambda x: None)):
queue = Queue()
visited = set()
queue.enqueue(start_vertex)
visited.add(start_vertex)
while len(queue):
current_vertex = queue.dequeue()
action(current_vertex)
neighbors = self.get_neighbors(current_vertex)
for edge in neighbors:
neighbor_vertex = edge.vertex
if neighbor_vertex in visited:
continue
else:
visited.add(neighbor_vertex)
queue.enqueue(neighbor_vertex)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}... | 3 | python/code_challenges/graph/graph.py | dina-fouad/data-structures-and-algorithms |
from django.test import TestCase
from django.utils import timezone
import datetime
from django.core.exceptions import ValidationError
from BasicBusinessManager.models.order_related_objects.sale_out import Sale_out
# Create your tests here.
class SaleOutModelTests(TestCase):
def test_is_sale_name_returned_properly(self):
'''
checks if returns proper name for example "sdasd"
'''
name = "sdadsdad"
sale = Sale_out(name = name)
self.assertEqual(sale.__str__(),name)
def test_is_sale_finished_date_before_end_sale(self):
'''
checks if returns false with enddate before now
'''
end_date = timezone.now()-datetime.timedelta(days=8)
sale = Sale_out(end_date = end_date)
self.assertTrue(sale.finished())
def test_is_sale_finished_date_after_end_sale(self):
'''
checks if returns properly with enddate after now
'''
end_date = timezone.now()+datetime.timedelta(days=8)
sale = Sale_out(end_date = end_date)
self.assertFalse(sale.finished())
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | BasicBusinessManager/tests/tests_sale_out.py | seyhak/RestaurantWebApplication |
# -*- coding: UTF-8 -*-
from common_utils.new_log import NewLog
class LogDecorator:
log = NewLog(__name__)
logger = log.get_log()
def __call__(self, func):
def wrapper(*args, **kw):
self.logger.debug("call method %s ===============" % func.__name__)
self.logger.debug("method [%s] input args: [%s], kw: [%s]" % (func.__name__, args, kw))
result = func(*args, **kw)
self.logger.debug("method [%s] response: [%s]" % (func.__name__, result))
return result
return wrapper
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | common_utils/log_decorator.py | beansKingdom/CommonUtils |
import re
import collections
DATA = '06.txt'
def get_data():
points = list()
with open(DATA) as f:
for line in f:
match = re.match(r'(\d+), (\d+)', line)
points.append(list(int(_) for _ in match.groups()))
xmin, xmax = float('inf'), 0
ymin, ymax = float('inf'), 0
for x, y in points:
xmin = min(xmin, x)
xmax = max(xmax, x)
ymin = min(ymin, y)
ymax = max(ymax, y)
return points, xmin, xmax, ymin, ymax
def nearest_neighbour(point, points):
dist = dict()
x, y = point
for index, (x2, y2) in enumerate(points):
dist[index] = abs(x - x2) + abs(y - y2)
dist2 = sorted(dist.items(), key=lambda x: x[1])
if dist2[0][1] == dist2[1][1]:
return None # same distance for the two nearest neighbours
else:
return dist2[0][0]
def distance_neighbours(point, points):
sumdist = 0
x, y = point
for x2, y2 in points:
sumdist += abs(x - x2) + abs(y - y2)
return sumdist
def code1():
points, xmin, xmax, ymin, ymax = get_data()
# an area is infinite if it touches the border of the study area
infinite = set()
size = collections.defaultdict(int)
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
nearest = nearest_neighbour((x, y), points)
if nearest is not None:
size[nearest] += 1
if x == xmin or x == xmax or y == ymin or y == ymax:
infinite.add(nearest)
for point in infinite:
size[point] = 0
print('1>', max(size.values()))
def code2():
points, xmin, xmax, ymin, ymax = get_data()
area10000 = 0
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
sumdist = distance_neighbours((x, y), points)
if sumdist < 10000:
area10000 += 1
print('2>', area10000)
code1()
code2()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | 2018/06.py | GillesArcas/Advent_of_Code |
class ContentFilteringCategories(object):
def __init__(self, session):
super(ContentFilteringCategories, self).__init__()
self._session = session
def getNetworkContentFilteringCategories(self, networkId: str):
"""
**List all available content filtering categories for an MX network**
https://developer.cisco.com/docs/meraki-api-v0/#!get-network-content-filtering-categories
- networkId (string)
"""
metadata = {
'tags': ['Content filtering categories'],
'operation': 'getNetworkContentFilteringCategories',
}
resource = f'/networks/{networkId}/contentFiltering/categories'
return self._session.get(metadata, resource)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | meraki/api/content_filtering_categories.py | fsandberg/dashboard-api-python |
import inspect
from .task import *
def dir_classes(cls):
for member_name in dir(cls):
member = getattr(cls, member_name)
if (not member_name.startswith('_')) and inspect.isclass(member):
yield member
def default_predicate(task):
return True
def get_tasks(cls, predicate = default_predicate):
tasks = {}
def recursive_get_tasks(cls):
task = tasks.get(cls.__qualname__)
if task:
return task
else:
if hasattr(cls, 'ignore') and cls.ignore:
return None
if not predicate(cls):
return None
task = Task(cls)
tasks[task.name] = task
for member in dir_classes(cls):
child_task = recursive_get_tasks(member)
if child_task:
task.children.add(child_task.name)
task.deps.add(child_task.name)
if hasattr(cls, 'deps'):
deps = cls.deps()
for dep_cls in deps:
dep_task = recursive_get_tasks(dep_cls)
if dep_task:
task.deps.add(dep_task.name)
return task
def update_waiters(tasks):
for task in tasks.values():
for dep_name in task.deps:
dep_task = tasks[dep_name]
dep_task.waiters.add(task.name)
recursive_get_tasks(cls)
update_waiters(tasks)
return tasks
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | modules/qplan/loader.py | fifoforlifo/pyqplan |
from Crypto.Cipher import AES
from django import forms
from django.forms import ValidationError
from .utils import get_decode_key
class DecodeForm(forms.Form):
optional_decode_key = forms.CharField(required=False)
message = forms.CharField(widget=forms.Textarea, required=True)
def clean_optional_decode_key(self):
optional_decode_key = self.cleaned_data['optional_decode_key'].strip()
if optional_decode_key:
if len(optional_decode_key) != 64:
raise ValidationError('Invalid length for decode key !')
try:
decode_key = optional_decode_key.decode('hex')
except TypeError as e:
raise ValidationError('Cannot convert to binary: %r' % e.msg)
return decode_key
def clean_message(self):
message = self.cleaned_data['message']
try:
message = message.decode('base64')
except TypeError as e:
raise ValidationError('Cannot convert to binary: %r' % e.msg)
if len(message) % 16:
raise ValidationError('Wrong block size for message !')
if len(message) <= 16:
raise ValidationError('Message too short or missing IV !')
return message
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | src/monkey_team/forms.py | ionelmc/django-monkey-team |
import csv
def ClassFactory(class_name, dictionary):
return type(class_name, (object,), dictionary)
class CsvReader:
def __init__(self, filepath):
self.data = []
with open(filepath) as csv_files:
csv_data = csv.DictReader(csv_files, delimiter=',')
for row in csv_data:
self.data.append(row)
pass
def return_data_object(self, class_name):
objects = []
for row in self.data:
objects.append(ClassFactory(class_name, row))
return objects
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | src/CSVreader.py | Shannon-NJIT/Calculator-Individual |
"""integration tests for messages.email_ module."""
import pathlib
import pytest
import int_setup
from messages.email_ import Email
from messages._exceptions import MessageSendError
##############################################################################
# SKIP TESTS IF ENVIRONMENT NOT PREPPED
##############################################################################
#Skip all tests if not configured
pytestmark = pytest.mark.skipif(not int_setup.integration_test_configured('email'),
reason='Tester not configured for messages.email_.Email')
#############################################################################
# FIXTURES
##############################################################################
TESTDIR = pathlib.Path(__file__).absolute().parent.parent.joinpath('data')
@pytest.fixture()
def get_email():
"""Return a valid Email instance."""
return Email(
subject='[Messages] Integration Test',
body='Conducting Integration Testing',
profile='integration_tester',
attachments=str(TESTDIR.joinpath('file2.png')),
save=False)
##############################################################################
# TESTS: Email.send()
##############################################################################
def test_email_good(get_email, capsys):
"""
GIVEN a good email instance
WHEN sending the email
THEN verify the send occurs without issue
"""
e = get_email
e.send()
out, err = capsys.readouterr()
assert "Message sent" in out
def test_email_badAuth(get_email):
"""
GIVEN a email with the wrong password
WHEN sending the email
THEN verify MessageSendError is raised
"""
e = get_email
e.auth = 'baDp@ssw0rd'
with pytest.raises(MessageSendError):
e.send()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | tests/integration_tests/test_int_email.py | sclickk/messages |
from enum import Enum
from json import JSONEncoder
from typing import Any
from strips_hgn.features import AbstractFeatureMapper
class MetricsEncoder(JSONEncoder):
""" For encoding evaluation metrics """
def default(self, o: Any) -> Any:
if isinstance(o, Enum):
return o.value
else:
return super().default(o)
class ArgsEncoder(JSONEncoder):
""" For encoding command line arguments """
def default(self, o: Any) -> Any:
if isinstance(o, type):
if issubclass(o, AbstractFeatureMapper):
return o.name()
elif isinstance(o, Enum):
return o.value
else:
return super().default(o)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false... | 3 | src/strips_hgn/utils/json_encoders.py | yutian-zhao/STRIPS-HGN |
"""Automation using nox.
"""
import glob
import nox
nox.options.reuse_existing_virtualenvs = True
nox.options.sessions = "lint", "tests", "tests-pytest5"
locations = "pytest_test_utils", "tests.py"
@nox.session(python=["3.7", "3.8", "3.9", "3.10"])
def tests(session: nox.Session) -> None:
session.install(".[tests]")
# `pytest --cov` will start coverage after pytest
# so we need to use `coverage`.
session.run("coverage", "run", "-m", "pytest")
session.run("coverage", "report", "--show-missing", "--skip-covered")
@nox.session(python=["3.7"], name="tests-pytest5")
def tests_pytest5(session: nox.Session) -> None:
session.install(".[tests]")
session.install("pytest==5.0.0")
session.run("coverage", "run", "-m", "pytest", "tests.py")
@nox.session
def lint(session: nox.Session) -> None:
session.install("pre-commit")
session.install("-e", ".[dev]")
if session.posargs:
args = session.posargs + ["--all-files"]
else:
args = ["--all-files", "--show-diff-on-failure"]
session.run("pre-commit", "run", *args)
session.run("python", "-m", "mypy")
session.run("python", "-m", "pylint", *locations)
@nox.session
def build(session: nox.Session) -> None:
session.install("build", "setuptools", "twine")
session.run("python", "-m", "build")
dists = glob.glob("dist/*")
session.run("twine", "check", *dists, silent=True)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exc... | 3 | noxfile.py | iterative/pytest-test-utils |
class BaseDownsizing:
def __init__(self, raw_file_f, raw_file_r=None):
self.raw_file_f = raw_file_f
self.raw_file_f = raw_file_f
self._downsized_f = None
if raw_file_r:
self.raw_file_r = raw_file_r
self.raw_file_r = raw_file_r
self._downsized_r = None
def downsize_single(self):
"""Overridden in child classes to perform specified downsizing of fragment reads"""
return self.raw_file_f
def downsize_pair_uncompressed(self):
"""Overridden in child classes to perform specified downsizing of paired-ends reads"""
return self.raw_file_f, self.raw_file_r
def downsize_pair_gzip(self):
"""Overridden in child classes to perform specified downsizing of gzip compressed paired-ends reads"""
return self.raw_file_f, self.raw_file_r
@property
def downsized_pair_uncompressed(self):
if getattr(self, "._downsized_f", None) is None:
self._downsized_f, self_downsized_r = self.downsize_pair()
self.raw_file_f = self._downsized_f
self.raw_file_r = self._downsized_r
return self._downsized_f, self._downsized_r
@property
def downsized_pair_gzip(self):
if getattr(self, "._downsized_f", None) is None:
self._downsized_f, self_downsized_r = self.downsize_pair()
self.raw_file_f = self._downsized_f
self.raw_file_r = self._downsized_r
return self._downsized_f, self._downsized_r
@property
def downsized_single(self):
if getattr(self, "._downsized_f", None) is None:
self._downsized_f = self.downsize_single()
self.raw_file_f = self._downsized_f
return self._downsized_f
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | moonstone/normalization/reads/base.py | motleystate/moonstone |
# -*- coding: utf-8 -*-
from zappa_boilerplate.database import db_session
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import User
class RegisterForm(Form):
username = StringField('Username',
validators=[DataRequired(), Length(min=3, max=25)])
email = StringField('Email',
validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password',
validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Verify password',
[DataRequired(), EqualTo('password', message='Passwords must match')])
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = db_session.query(User).filter_by(username=self.username.data).first()
if user:
self.username.errors.append("Username already registered")
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append("Email already registered")
return False
return True | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": false
... | 3 | zappa_boilerplate/user/forms.py | 402900550b/dtnewman2 |
#!/usr/bin/env python
import testcases
class Solution:
def __init__(self, volume: int, drinks: list):
self.volume = volume
self.drinks = drinks
self.drink_count = len(self.drinks)
self.opt = [[0] * (self.drink_count + 1) for _ in range(volume + 1)]
def calculate(self) -> int:
for j in range(0, self.drink_count + 1):
self.opt[0][j] = 0
for j in range(self.drink_count - 1, -1, -1):
# print('handling %s' % (str(self.drinks[j])))
for i in range(0, self.volume + 1):
for k in range(0, self.drinks[j].max_amount + 1):
if i < k * self.drinks[j].volume:
break
x = self.opt[i - k * self.drinks[j].volume][j +
1] + self.drinks[j].satisfactoriness * k
if x > self.opt[i][j]:
self.opt[i][j] = x
return self.opt[self.volume][0]
if __name__ == '__main__':
vol = int(input('volume? >>> '))
sol = Solution(vol, testcases.drink_list)
print(sol.calculate())
# print(sol.opt)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | chapter-1/1-6/src/dp.py | yuetsin/beauty-of-programming |
import os
import json
Environ = os._Environ
def is_on_cloudfoundry(env: Environ=os.environ) -> bool:
return 'VCAP_SERVICES' in env
def load_cups_from_vcap_services(name: str, env: Environ=os.environ) -> None:
'''
Detects if VCAP_SERVICES exists in the environment; if so, parses
it and imports all the credentials from the given custom
user-provided service (CUPS) as strings into the environment.
For more details on CUPS, see:
https://docs.cloudfoundry.org/devguide/services/user-provided.html
'''
if not is_on_cloudfoundry(env):
return
vcap = json.loads(env['VCAP_SERVICES'])
for entry in vcap.get('user-provided', []):
if entry['name'] == name:
for key, value in entry['credentials'].items():
env[key] = value
def load_database_url_from_vcap_services(name: str, service: str,
env: Environ=os.environ) -> str:
"""
Sets os.environ[DATABASE_URL] from a service entry in VCAP_SERVICES.
"""
if not is_on_cloudfoundry(env):
return
# FIXME: this'll break if there are multiple databases. Not an issue right
# now, but could be in the future. Keep an eye on it.
vcap = json.loads(env['VCAP_SERVICES'])
env['DATABASE_URL'] = vcap[service][0]["credentials"]["uri"]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answe... | 3 | bugbounty/settings_utils.py | 18F/tts-bug-bounty-dashboard |
from flask import request
import python_game_code.random_functions
class room1(object):
which_room = "Finding the Sword"
def choices(self):
return self.room1_scene1
class room1_scene1(room1):
def choices(self):
rocks = [
'throw rock',
'throw rocks',
'throw',
'rock',
'rocks',
'throws'
'Throw',
'Throw rock',
'Throw Rock'
]
return "You enter the corridor, looking around for the sword, when a darkness being jumps out at you! Without any pieces yet, you need to defeat the being somehow without touching him. There are some rocks nearby"
if request.method == "POST":
answer = request.form['answer']
def right_answer():
for i in rocks:
if answer == i:
return "You found the sword!"
class room2(object):
which_room = "Finding the Saber"
def choices(self):
return self.room2_scene1
class room2_scene1(room2):
def choices(self):
return "You enter room2"
class room3(object):
which_room = "Finding the Blaster"
def choices(self):
return self.room2_scene1
class room3_scene1(room3):
def choices(self):
return "You enter room3"
class room4(object):
which_room = "Finding the Wand"
def choices(self):
return self.room3_scene1
class room4_scene1(room4):
def choices(self):
return "You enter room4"
class room5(object):
which_room = "Saving Reality"
def choices(self):
return self.room5_scene1
class room5_scene1(room5):
def choices(self):
return "You enter room5"
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | thedarkness/rooms.py | AndrewScholly/FlaskTheDarkness |
"""
Utilities for loading inference data into the model
"""
# TODO refactor so xml_loader and inference_loader import from a utilities directory
from ingestion.ingest_images import load_image, load_proposal, get_example_for_uuid
from torch.utils.data import Dataset
import torch
import os
from os.path import splitext
from torch_model.train.data_layer.xml_loader import XMLLoader
from torchvision.transforms import ToTensor
from torch_model.train.data_layer.transforms import NormalizeWrapper
from collections import namedtuple
normalizer = NormalizeWrapper(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
tens = ToTensor()
Document = namedtuple("Document", ["windows", "proposals", "identifier"])
class InferenceLoader(XMLLoader):
"""
Inference dataset object, based on XMLLoader
"""
def __init__(self, session, ingest_objs, classes):
"""
Init function
:param session: DB Session
:param ingest_objs: Database statistics object
:param classes: List of classes
"""
super().__init__(session, ingest_objs, classes)
@staticmethod
def collate(batch):
"""
collation function to be used with this dataset class
:param batch:
:return:
"""
if len(batch) > 1:
raise ValueError(f"Inference classes are only meant to be used with a batch size of 1, got {len(batch)}")
example = [batch[0][0]]
collated = XMLLoader.collate(example)
return collated, batch[0][1]
def __getitem__(self, item):
"""
Get an item
:param item: UUID index
:return: XMLLoader exmaple, as well as InferenceLoader example
"""
example = super(InferenceLoader, self).__getitem__(item)
uuid = self.uuids[item]
ex_db = get_example_for_uuid(uuid, self.session)
return example, ex_db
#
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | cosmos/torch_model/inference/data_layer/inference_loader.py | hadarohana/myCosmos |
from tensorflow import keras
import numpy as np
import pidash
import os
#import gc
PATH = os.path.dirname(__file__)
# This is a prototype implementation of the sensor AI deployment.
#This is not final code and should not be reguarded as a best practices.
# get_exposed() is a simple pixel count routine. It established the pixel count on the x and the y axis using simple n^2 logic loops
def get_exposed(y_hat):
img = y_hat.ravel()
img = img[2::3]
img = np.resize(img, (256, 256))
h = []
for i, obj in enumerate(img):
for j in obj:
if j:
h.append(i)
break
w=[]
for i, obj in enumerate(img.T):
for j in obj:
if j:
w.append(i)
break
h = len(h)
w = len(w)
return h, w
def execute(): #on_dek, meta, id):
#gc.collect()
#Load keras pretrained model from .h5 file
model = keras.models.load_model(PATH + "/model/UnetM-relu_output.h5")
# summarize model
model.summary()
pidash.dashboard()
#get px height and px width from image
pxH, pxW = run_on_dek(model)
outputtxt = 'Height: '+ str(pxH) + ' px '+ ' H(p): ' + str((3.36 - (pxH/pxW) * .333)) + ' width: '+ str(pxW) + ' px'
text_file = open("complete.txt", "w")
n = text_file.write(outputtxt)
text_file.close()
print (outputtxt)
def run_on_dek(model):
# Load img
img = np.load(PATH + "/on_dek/rdy.npy")
print("Image loaded..." + '\n\n' + "Running model...")
pidash.dashboard()
result = model.predict(img)
print("\n\nModel ran successfully...")
result = result >=.995
#print (result)
px, w = get_exposed(result)
return px, w
#execute()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"ans... | 3 | sensor_AI/run_lite.py | USGS-WiM/Gage-Cam-Sensor-AI |
# coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import dlxapi
from dlxapi.models.custom_name import CustomName # noqa: E501
from dlxapi.rest import ApiException
class TestCustomName(unittest.TestCase):
"""CustomName unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCustomName(self):
"""Test CustomName"""
# FIXME: construct object with mandatory attributes with example values
# model = dlxapi.models.custom_name.CustomName() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | python/test/test_custom_name.py | dlens/dlxapi |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["StarryBaseOp"]
import pkg_resources
from theano import gof
from ..build_utils import get_compile_args, get_cache_version
class StarryBaseOp(gof.COp):
__props__ = ()
func_file = None
func_name = None
def __init__(self):
super(StarryBaseOp, self).__init__(self.func_file, self.func_name)
def c_code_cache_version(self):
return get_cache_version()
def c_headers(self, compiler):
return ["theano_helpers.h"]
def c_header_dirs(self, compiler):
return [
pkg_resources.resource_filename(__name__, "include"),
pkg_resources.resource_filename(__name__, "starry/starry"),
pkg_resources.resource_filename(__name__,
"starry/lib/eigen_3.3.3"),
pkg_resources.resource_filename(__name__,
"starry/lib/boost_1_66_0"),
]
def c_compile_args(self, compiler):
return get_compile_args(compiler)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | exoplanet/theano_ops/starry/base_op.py | Junjun1guo/exoplanet |
import RPi.GPIO as GPIO
from Adafruit_LED_Backpack import SevenSegment
from time import sleep
def setup():
GPIO.setmode(GPIO.BCM)
red_segment = SevenSegment.SevenSegment(address=0x70)
green_segment = SevenSegment.SevenSegment(address=0x72)
red_segment.begin()
green_segment.begin()
return red_segment, green_segment
def teardown():
GPIO.cleanup()
def test_all_digits(segment):
segment.clear()
for num in range(9):
segment.set_digit(0, num)
segment.set_digit(1, num)
segment.set_digit(2, num)
segment.set_digit(3, num)
sleep(0.5)
def main():
try:
red_segment, green_segment = setup()
test_all_digits(red_segment)
test_all_digits(green_segment)
finally:
teardown()
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | king_of_the_hill/segment_timer.py | andrewzwicky/KingOfTheHill |
from flask import url_for
from app.questionnaire.rules import evaluate_skip_conditions
from app.templating.summary.question import Question
class Block:
def __init__(self, block_schema, group_id, answer_store, metadata, schema, group_instance):
self.id = block_schema['id']
self.title = block_schema.get('title')
self.number = block_schema.get('number')
self.link = self._build_link(block_schema, group_id, metadata, group_instance)
self.questions = self._build_questions(block_schema, answer_store, metadata, schema, group_instance)
@staticmethod
def _build_link(block_schema, group_id, metadata, group_instance):
return url_for('questionnaire.get_block',
eq_id=metadata['eq_id'],
form_type=metadata['form_type'],
collection_id=metadata['collection_exercise_sid'],
group_id=group_id,
group_instance=group_instance,
block_id=block_schema['id'])
@staticmethod
def _build_questions(block_schema, answer_store, metadata, schema, group_instance):
questions = []
for question_schema in block_schema.get('questions', []):
is_skipped = evaluate_skip_conditions(question_schema.get('skip_conditions'), schema, metadata, answer_store)
if not is_skipped:
question = Question(question_schema, answer_store, metadata, schema, group_instance).serialize()
questions.append(question)
return questions
def serialize(self):
return {
'id': self.id,
'title': self.title,
'number': self.number,
'link': self.link,
'questions': self.questions,
}
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | app/templating/summary/block.py | uk-gov-mirror/ONSdigital.eq-survey-runner |
def validate(*validations):
"""
Checks if all `validations` are correct before executing.
"""
def message_validation(func):
def func_wrapper(*args, **kwargs):
if all(validations):
func(*args, **kwargs)
return func_wrapper
return message_validation
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | bot/utils.py | lessss4/oil-and-rope |
#coding=utf-8
def MultiplePackage(N,C,weight,value,num,physic):
'''
多重背包问题(每个物品都有次数限制)
:param N: 预测的虚拟机种类,如N=pre_num
:param C:输入文件是CPU,那么背包总容量就是MEM,如C=
:param weight: 每个物品的容量数组表示,如weight=[0,5,4,7,2,6]
:param value: 每个物品的价值数组表示,如value=[0,12,3,10,3,6]
:param num:每个物品的个数限制,如num=[0,2,4,1,5,3]
:return: 返回总价值矩阵
'''
#初始化f[N+1][C+1]为0,f[i][j]表示前i件物品恰好放入一个容器为j的背包可以获得的最大价值
f=[[0 for col in range(C+1)] for row in range(N+1)]
for i in range(1,N+1):
for j in range(1,C+1):
#对于物品i最多能取的次数是j/weight[i]与num[i]的较小者
max_num_i=min(j/weight[i],num[i])
#初始取k=0为最大,下面的循环是把取了k个物品i能获得的最大价值赋值给f[i][j]
f[i][j]=f[i-1][j]
for k in range(max_num_i+1):
if f[i][j]<f[i-1][j-k*weight[i]]+k*value[i]<=physic:
#状态方程
f[i][j]=f[i-1][j-k*weight[i]]+k*value[i]
return f
def FindWhat(f,value,weight,i,j,item,num):
if i>=0:
if f[i][j]==f[i-1][j]:
item[i]=0
FindWhat(f,value,weight,i-1,j,item,num)
elif j-weight[i]>=0:
for k in range(num[i]+1):
if f[i][j]==f[i-1][j-k*weight[i]]+k*value[i]:
item[i]=k
break
FindWhat(f,value,weight,i-1,j-item[i]*weight[i],item,num)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | Competition Codes/packageFunction.py | Harrdy2018/2018-Huawei-Code-Craft |
# encoding: utf-8
import sys
from marrow.script.core import Parser
__all__ = ['Parser', 'execute', 'script', 'annotate', 'describe', 'short']
def execute(obj): # pragma: no cover
sys.exit(Parser(obj)(sys.argv[1:]))
def base(attr):
def decorator(**kw):
def inner(fn):
if not hasattr(fn, attr):
fn.__dict__[attr] = dict()
fn.__dict__[attr].update(kw)
return fn
return inner
return decorator
script = base('_cmd_script_info')
annotate = base('_cmd_arg_type')
describe = base('_cmd_arg_doc')
short = base('_cmd_arg_abbrev')
callbacks = base('_cmd_arg_callback')
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | marrow/script/__init__.py | marrow/script |
# Copyright (c) 2018-2019, NVIDIA CORPORATION.
from utils import assert_eq
import nvstrings
def test_cat():
strs = nvstrings.to_device(
["abc", "def", None, "", "jkl", "mno", "accént"]
)
got = strs.cat()
expected = ["abcdefjklmnoaccént"]
assert_eq(got, expected)
# non-default separator
got = strs.cat(sep=":")
expected = ["abc:def::jkl:mno:accént"]
assert_eq(got, expected)
# non default separator and na_rep
got = strs.cat(sep=":", na_rep="_")
expected = ["abc:def:_::jkl:mno:accént"]
assert_eq(got, expected)
# non-null others, default separator, and na_rep
strs2 = nvstrings.to_device(["1", "2", "3", "4", "5", "é", None])
got = strs.cat(strs2, sep=":", na_rep="_")
expected = ["abc:1", "def:2", "_:3", ":4", "jkl:5", "mno:é", "accént:_"]
assert_eq(got, expected)
# nvstrings others
strs2 = nvstrings.to_device(["1", "2", "3", None, "5", "é", ""])
got = strs.cat(strs2)
expected = ["abc1", "def2", None, None, "jkl5", "mnoé", "accént"]
assert_eq(got, expected)
def test_cat_multiple():
strs = nvstrings.to_device(["abc", "df", None, "", "jkl", "mn", "accént"])
strs1 = nvstrings.to_device(["1", "2", "3", "4", "5", "é", None])
strs2 = nvstrings.to_device(["1", "2", "3", None, "5", "é", ""])
got = strs.cat([strs1, strs2])
expected = ["abc11", "df22", None, None, "jkl55", "mnéé", None]
assert_eq(got, expected)
got = strs.cat([strs1, strs2], sep=":", na_rep="_")
expected = [
"abc:1:1",
"df:2:2",
"_:3:3",
":4:_",
"jkl:5:5",
"mn:é:é",
"accént:_:",
]
assert_eq(got, expected)
def test_join():
strs = nvstrings.to_device(["1", "2", "3", None, "5", "é", ""])
got = strs.join()
expected = ["1235é"]
assert_eq(got, expected)
# non-default sep
got = strs.join(sep=":")
expected = ["1:2:3:5:é:"]
assert_eq(got, expected)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | python/nvstrings/tests/test_combine.py | williamBlazing/cudf |
"""
create histogram table
Revision ID: a7b01b0429ff
Revises:
Create Date: 2020-07-09 10:03:15.932674
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = 'a7b01b0429ff'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if 'histogram' not in tables:
op.create_table(
'histogram',
sa.Column('hist_id', sa.Integer, primary_key=True),
sa.Column('image_id', sa.Integer, sa.ForeignKey('euv_images.image_id')),
sa.Column('meth_id', sa.Integer, sa.ForeignKey('meth_defs.meth_id')),
sa.Column('date_obs', sa.DateTime),
sa.Column('instrument', sa.String(10)),
sa.Column('wavelength', sa.Integer),
sa.Column('n_mu_bins', sa.Integer),
sa.Column('n_intensity_bins', sa.Integer),
sa.Column('lat_band', sa.Float),
sa.Column('mu_bin_edges', sa.LargeBinary),
sa.Column('intensity_bin_edges', sa.LargeBinary),
sa.Column('hist', sa.LargeBinary),
)
def downgrade():
op.drop_table('histogram')
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | chmap/alembic/versions/a7b01b0429ff_create_histogram_table.py | predsci/CHD |
#!/usr/bin/env python3
# Packet MAC Sniffer
# Author Yehia Elghaly
import socket
import textwrap
import struct
from colorama import Fore, Back, Style
def main():
connection = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3))
while True:
read_data, addr = connection.recvfrom(65536)
send_mac, recv_mac, protocol, packet_data = ethernet(read_data)
print ('\nEthernet Data:')
print (Fore.GREEN + 'Destination: {}, Source: {}, Protocol: {}'. format (send_mac, recv_mac, protocol))
def ethernet(packet_data):
send_mac, recv_mac, protocol = struct.unpack('!6s 6s H', packet_data[:14])
return read_mac_addr(send_mac), read_mac_addr(recv_mac), socket.htons(protocol), packet_data[:14]
def read_mac_addr(bytes):
bytes_s = map('{:02x}'.format, bytes)
return ':'.join(bytes_s).upper()
main() | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | Chapter 06/Packet-Sniffer-MAC.py | bpbpublications/Learn-Penetration-Testing-with-Python-3.x |
from PyQt5 import QtCore, QtWidgets, QtGui
from CharakterAssistent import ChoicePopup
class VariantPopupWrapper(object):
def __init__(self, variantListCollection, windowTitle):
super().__init__()
self.formMain = QtWidgets.QDialog()
self.formMain.setWindowFlags(
QtCore.Qt.Window |
QtCore.Qt.CustomizeWindowHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.WindowCloseButtonHint)
self.ui = ChoicePopup.Ui_formMain()
self.ui.setupUi(self.formMain)
self.formMain.setWindowTitle(self.formMain.windowTitle() + " (" + windowTitle + ")")
self.setupMainForm(variantListCollection)
self.formMain.setWindowModality(QtCore.Qt.ApplicationModal)
self.formMain.show()
self.ret = self.formMain.exec_()
self.choices = []
if self.ret == QtWidgets.QDialog.Accepted:
for i in range(len(variantListCollection.choiceLists)):
if self.buttons[i].isChecked():
self.choices.append(i)
def setupMainForm(self, variantListCollection):
self.buttons = []
self.labels = []
for variantList in variantListCollection.choiceLists:
if variantListCollection.chooseOne:
button = QtWidgets.QRadioButton(variantList.name)
if variantList == variantListCollection.choiceLists[0]:
button.setChecked(True)
else:
button = QtWidgets.QCheckBox(variantList.name)
self.buttons.append(button)
self.ui.verticalLayout.addWidget(button)
#labelText = ""
#for choice in variantList.choices:
# choiceStr = choice.toString()
# if not choiceStr:
# continue
# labelText += choiceStr + ", "
#labelText = labelText[:-2]
#label = QtWidgets.QLabel(labelText)
#self.labels.append(label)
#self.ui.verticalLayout.addWidget(label) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | src/Sephrasto/Plugins/CharakterAssistent/VariantPopupWrapper.py | qeqar/Sephrasto |
import json
import click
from isic_cli.cli.context import IsicContext
@click.group(short_help='Manage authentication with the ISIC Archive.')
@click.pass_obj
def user(ctx):
pass
@user.command()
@click.pass_obj
def login(obj: IsicContext):
"""Login to the ISIC Archive."""
if obj.user:
click.echo(f'Hello {obj.user["email"]}!')
else:
obj.oauth.login()
click.echo('Success!')
@user.command()
@click.pass_obj
def logout(obj: IsicContext):
"""Logout of the ISIC Archive."""
obj.oauth.logout()
@user.command(hidden=True)
@click.pass_obj
def print_token(obj: IsicContext):
obj.oauth._load()
click.echo(json.dumps(obj.oauth._session.token, indent=4))
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | isic_cli/cli/user.py | ImageMarkup/isic-cli |
'''
Description:
Shuffle a set of numbers without duplicates.
Example:
// Init an array with set 1, 2, and 3.
int[] nums = {1,2,3};
Solution solution = new Solution(nums);
// Shuffle the array [1,2,3] and return its result. Any permutation of [1,2,3] must equally likely to be returned.
solution.shuffle();
// Resets the array back to its original configuration [1,2,3].
solution.reset();
// Returns the random shuffling of array [1,2,3].
solution.shuffle();
Hint #1
The solution expects that we always use the original array to shuffle() else some of the test cases fail. (Credits; @snehasingh31)
'''
from typing import List
from random import shuffle
class Solution:
def __init__(self, nums: List[int]):
# copy to class member: self.array
self.array = [ *nums ]
self.origin = nums
def reset(self) -> List[int]:
"""
Resets the array to its original configuration and return it.
"""
self.array = [ *(self.origin) ]
return self.array
def shuffle(self) -> List[int]:
"""
Returns a random shuffling of the array.
"""
shuffle( self.array )
return self.array
# n : the length of input array
## Time Complexity: O( n )
#
# The overhead in time is the cost of shuffle and reset, which are of O( n ).
## Space Complexity: O( n )
#
# The overhead in space is the storage for class member, self.array, which is of O( n ).
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'sequence')
def test_bench():
t = TestEntry( sequence = [1,2,3] )
# reference output
# this is a challenge about randomness, so any valid permutation of shuffling result is accpeted.
'''
[1, 3, 2]
[1, 2, 3]
[3, 2, 1]
'''
obj = Solution( t.sequence )
print( obj.shuffle() )
print( obj.reset() )
print( obj.shuffle() )
if __name__ == '__main__':
test_bench() | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | Top_Interview_Question_Easy/Design/Shuffle an Array/by_random_shuffle.py | coderMaruf/leetcode-1 |
# Exercise 7.24
# Author: Noah Waterfield Price
import numpy as np
import matplotlib.pyplot as plt
import operator
class PiecewiseConstant:
def __init__(self, data, xmax):
self.data = data + [(None, xmax)]
def __call__(self, x):
if isinstance(x, (float, int)):
return self.piecewise(x, self.data)
else:
return self.piecewise_vec(x, self.data)
def plot(self):
data = self.data
# create lists of points to exactly reproduce discontinuities
x = [data[0][1]]
y = [data[0][0], data[0][0]]
for i in range(1, len(data) - 1):
x.append(data[i][1])
x.append(data[i][1])
y.append(data[i][0])
y.append(data[i][0])
x.append(data[-1][1])
return x, y
@staticmethod
def piecewise(x, data):
for i in range(len(data) - 1):
if data[i][1] <= x < data[i + 1][1] or x == data[-1][1]:
return data[i][0]
@staticmethod
def piecewise_vec(x, data):
r = np.zeros(len(x))
for i in xrange(len(data) - 1):
cond = operator.and_(data[i][1] <= x, x < data[i + 1][1])
cond = operator.or_(cond, x == data[-1][1])
r[cond] = data[i][0]
return r
f = PiecewiseConstant([(0.4, 1), (0.2, 1.5), (0.1, 3)], xmax=4)
x, y = f.plot()
plt.plot(x, y)
# set appropriate y limits
range = max(y) - min(y)
plt.ylim([min(y) - 0.1 * range, max(y) + 0.1 * range])
plt.xlabel('x')
plt.ylabel('y')
plt.title('Piecewise constant function')
plt.show()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | ch_7/PiecewiseConstant2.py | ProhardONE/python_primer |
Inc('dfaccto/util.py', abs=True)
class _Event(ModuleContext):
def __init__(self):
ModuleContext.__init__(self)
self._setup_packages()
def _setup_packages(self):
self.pkg = Pkg('dfaccto_event',
x_templates={self.File('generic/package.vhd.tpl'): self.File('pkg/dfaccto_event.vhd')})
with self.pkg:
self.tEvent = self.TypeEvent('Event')
def TypeEvent(self, name, stb_bits=None, ack_bits=None):
tlogic = Util.tlogic
if stb_bits is not None:
tsdata = Util.TypeUnsigned('{}Strb'.format(name), width=stb_bits)
else:
tsdata = None
if ack_bits is not None:
tadata = Util.TypeUnsigned('{}Ack'.format(name), width=ack_bits)
else:
tadata = None
return TypeC(name, x_is_event=True,
x_definition=self.Part('types/definition/event.part.tpl'),
x_format_ms=self.Part('types/format/event_ms.part.tpl'),
x_format_sm=self.Part('types/format/event_sm.part.tpl'),
x_wrapeport=self.Part('types/wrapeport/event.part.tpl'),
x_wrapeconv=self.Part('types/wrapeconv/event.part.tpl'),
x_wrapipmap=self.Part('types/wrapipmap/event.part.tpl'),
x_wrapigmap=None,
x_tlogic=tlogic, x_tsdata=tsdata, x_tadata=tadata,
x_cnull=lambda t: Con('{}Null'.format(name), t, value=Lit({'stb': False, 'ack': False})))
Event = _Event()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | cfg/dfaccto/event.py | lw0/dfaccto_lib |
#
# Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from build_definitions import *
class OpenLDAPDependency(Dependency):
def __init__(self):
super(OpenLDAPDependency, self).__init__(
'openldap',
'2_4_54',
'https://github.com/yugabyte/openldap/archive/OPENLDAP_REL_ENG_{}.tar.gz',
BUILD_GROUP_COMMON)
self.copy_sources = True
def build(self, builder):
# build client only
disabled_features = ('slapd', 'bdb', 'hdb', 'mdb', 'monitor', 'relay', 'syncprov')
builder.build_with_configure(
builder.log_prefix(self), ['--disable-' + feature for feature in disabled_features])
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | build_definitions/openldap.py | d-uspenskiy/yugabyte-db-thirdparty |
from .. import DataWrapper
from cogdl.data import Graph
class FullBatchNodeClfDataWrapper(DataWrapper):
def __init__(self, dataset):
super(FullBatchNodeClfDataWrapper, self).__init__(dataset)
self.dataset = dataset
def train_wrapper(self) -> Graph:
return self.dataset.data
def val_wrapper(self):
return self.dataset.data
def test_wrapper(self):
return self.dataset.data
def pre_transform(self):
self.dataset.data.add_remaining_self_loops()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answ... | 3 | cogdl/wrappers/data_wrapper/node_classification/node_classification_dw.py | li-ziang/cogdl |
# -*- coding: utf-8 -*-
""" Routes Module
Currently this module contains all of the routes for the main blueprint
"""
from flask import render_template
from flask_login import current_user, login_required
from app.main import main_bp
@main_bp.route('/')
@main_bp.route('/public')
def public():
"""Public Route
This is the route that leads to the public main page
Args:
None
Returns:
rendered public.html template
"""
return render_template('public.html', title='Home | CONP')
@main_bp.route('/index')
@login_required
def index():
""" Index Route
The route to the non-public index page
Args:
None
Returns:
The rendered index.html template for the current_user
"""
if current_user.is_authenticated:
return render_template('index.html', title='CONP | Home', user=current_user)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | app/main/routes.py | candicecz/conp-portal |
from errors import Request
from .mixins import Identify
class Report(Identify):
def _validate(self, request):
super()._validate(request)
validator = self._application.validator
self.__message = self._get(request, 'message', '').strip()
if validator.isempty(self.__message):
raise Request('message', self.__message)
def _process(self, request):
storage = self._application.storage
mail = self._application.mail
token = self._session.token
task_id = self._task.id
subject = f'Report from {token} about task #{task_id}'
mail.send(subject, self.__message)
storage.push(
self._session.account.uuid,
'''
Thank you for leaving report
We're working on your issue
''',
)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | api/source/actions/report.py | 1pkg/ReRe |
import cocotb
import logging
from cocotb.triggers import Timer
def bin2gray(num):
return num >> 1 ^ num;
def gray2bin(num):
mask = num
while(mask != 0):
mask = mask >> 1
num = num ^ mask
return num
BINARY_WIDTH = 8
@cocotb.test()
async def test(dut):
max_value = 2 ** BINARY_WIDTH
values = [i for i in range(max_value)]
l = logging.getLogger("cocotb")
for i in range(max_value):
g = bin2gray(i)
l.info("testing BIN -> GRAY {}".format(i))
dut.i_bin <= i;
await Timer(2, "ns")
assert dut.o_gray == g, "got : {}".format(dut.o_gray.value)
for i in range(max_value):
g = bin2gray(i)
l.info("testing GRAY -> BIN {}".format(i))
dut.i_gray <= g;
await Timer(2, "ns")
assert dut.o_bin == i, "got : {}".format(dut.o_bin.value)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"ans... | 3 | _oldfiles/src_test/gray_encoding/gray_tb.py | 4Kamei/verilog_sources |
#!/usr/bin/env python
"""
Solution to Project Euler Problem
http://projecteuler.net/
by Apalala <apalala@gmail.com>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
We shall say that an n-digit number is pandigital if it makes use of all
the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital
and is also prime.
What is the largest n-digit pandigital prime that exists?
"""
from digits import is_pandigital
from primality import primes_upto, is_prime
def pandigital_primes(digits=7):
for p in primes_upto(int("9" * digits)):
if is_pandigital(p):
yield p
def test():
assert not is_prime(123)
assert not is_prime(132)
assert not is_prime(213)
assert not is_prime(231)
assert not is_prime(312)
assert not is_prime(321)
assert is_prime(2143)
assert is_pandigital(2143)
assert 2143 in set(pandigital_primes(digits=4))
def run():
print(list(pandigital_primes())[-1])
if __name__ == "__main__":
test()
run()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projecteuler/euler041_pandigital_prime.py | webdevhub42/Lambda |
from discord.ext import commands
class Info(commands.CommandError):
def __init__(self, message, **kwargs):
super().__init__(message)
self.kwargs = kwargs
class Warning(commands.CommandError):
def __init__(self, message, **kwargs):
super().__init__(message)
self.kwargs = kwargs
class Error(commands.CommandError):
def __init__(self, message, **kwargs):
super().__init__(message)
self.kwargs = kwargs
class LastFMError(commands.CommandError):
def __init__(self, error_code, message):
super().__init__()
self.error_code = error_code
self.message = message
def __str__(self):
return f"LastFM error {self.error_code}"
def display(self):
return f"LastFM error {self.error_code} : {self.message}"
class RendererError(commands.CommandError):
pass
class ServerTooBig(commands.CheckFailure):
def __init__(self, member_count):
super().__init__()
self.member_count = member_count
class Blacklist(commands.CommandError):
pass
class BlacklistedUser(Blacklist):
def __init__(self):
super().__init__()
self.message = "You have been blacklisted from using Miso Bot"
class BlacklistedMember(Blacklist):
def __init__(self):
super().__init__()
self.message = "You have been blacklisted from using commands by the server moderators"
class BlacklistedGuild(Blacklist):
def __init__(self):
super().__init__()
self.message = "This server is blacklisted from using Miso Bot"
class BlacklistedCommand(Blacklist):
def __init__(self):
super().__init__()
self.message = "This command has been disabled by the server moderators"
class BlacklistedChannel(Blacklist):
def __init__(self):
super().__init__()
self.message = "Command usage in this channel has been disabled by the server moderators"
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherita... | 3 | modules/exceptions.py | Keyruu/miso-bot |
from flask import Flask, render_template
from db_connector.db_connector import connect_to_database, execute_query
app = Flask(__name__)
#the route is what you will type in browser
@app.route('/hello')
#the name of this function is just a cosmetic thing
def hello():
#this is the output returned to browser
return "Hello world!"
@app.route('/')
def index():
return "<i>Are you looking for /db-test or /hello ?</i>"
@app.route('/db-test')
def test_database_connection():
print("Executing a sample query on the database using the credentials from db_credentials.py")
db_connection = connect_to_database()
query = "SELECT * from classes;"
result = execute_query(db_connection, query)
return render_template('db_test.html', rows=result)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | db_connector/sample.py | DacinTitus/cs340_group15 |
import re
from vistautils.memory_amount import MemoryAmount, MemoryUnit
import pytest
UNIT_PARSE_TEST_PAIRS = [
(MemoryUnit.KILOBYTES, "K"),
(MemoryUnit.MEGABYTES, "M"),
(MemoryUnit.GIGABYTES, "G"),
(MemoryUnit.TERABYTES, "T"),
]
@pytest.mark.parametrize("reference_unit,string_to_parse", UNIT_PARSE_TEST_PAIRS)
def test_memory_unit(reference_unit: MemoryUnit, string_to_parse: str):
assert reference_unit == MemoryUnit.parse(string_to_parse)
assert reference_unit == MemoryUnit.parse(string_to_parse.lower())
def test_bad_memory_unit():
exception_pattern = re.compile("For a memory unit, expected one of.* but got .*")
with pytest.raises(RuntimeError, match=exception_pattern):
MemoryUnit.parse("A")
with pytest.raises(RuntimeError, match=exception_pattern):
MemoryUnit.parse("foo")
UNITS = [
MemoryUnit.KILOBYTES,
MemoryUnit.MEGABYTES,
MemoryUnit.GIGABYTES,
MemoryUnit.TERABYTES,
]
AMOUNTS = [(42, "42"), (1, "1")]
SPACES = ["", " "]
SUFFIXES = ["", "B", "b"]
@pytest.mark.parametrize("reference_amount,amount_string", AMOUNTS)
@pytest.mark.parametrize("reference_unit,unit_string", UNIT_PARSE_TEST_PAIRS)
@pytest.mark.parametrize("spaces", SPACES)
@pytest.mark.parametrize("suffix", SUFFIXES)
def test_memory_amount(
reference_amount: int,
amount_string: str,
reference_unit: MemoryUnit,
unit_string: str,
spaces: str,
suffix: str,
):
parsed = MemoryAmount.parse(f"{amount_string}{spaces}{unit_string}{suffix}")
assert reference_amount == parsed.amount
assert reference_unit == parsed.unit
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstr... | 3 | tests/test_memory_amount.py | isi-vista/vistautils |
"""
Dot product of two vectors implemented as parallel lists
"""
from operator import add, mul
from pyske.core.util import fun
__all__ = ['opt_dot_product', 'dot_product']
# ------------------- Dot Product Variant Example -------------
def dot_product(vector1, vector2):
"""
Compute the dot product of two vectors.
:param vector1: list of numbers representing a vector
:param vector2: list of numbers representing a vector
:return: the dot product of the two vectors
"""
return vector1.map2(mul, vector2).reduce(add, 0)
# ------------------- Dot Product Example -------------------
def opt_dot_product(vector1, vector2, uncurry=fun.uncurry):
"""
Compute the dot product of two vectors.
:param vector1: list of numbers representing a vector
:param vector2: list of numbers representing a vector
:param uncurry: (optional)
:return: the dot product of the two vectors
Examples::
>>> from pyske.core import PList
>>> vector_1 = PList.init(lambda x: x, 10)
>>> vector_2 = PList.init(lambda x: 1, 10)
>>> dot_product(vector_1, vector_2)
45
>>> from pyske.core import PList
>>> vector_1 = PList.init(lambda x: x, 10)
>>> vector_2 = PList.init(lambda x: 9 - x, 10)
>>> dot_product(vector_1, vector_2)
120
"""
return vector1.zip(vector2).map(uncurry(mul)).reduce(add, 0)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fals... | 3 | pyske/examples/list/dot_product.py | YohannBaudet/PySke |
from django.http import HttpResponse
from django.shortcuts import render
from .models import Item, Category
# Create your views here.
def get_html(req):
return render(req,"item.html")
def create_item(req):
#解析参数
params = req.POST
name = params.get("i_name")
barcoade = params.get("i_barcode")
cate_id = int(params.get("cate_id"))
#创建数据
item = Item.objects.create(
name=name,
barcode=barcoade,
category_id=cate_id
)
return HttpResponse("创建成功了{}".format(item.name))
def select_data(req):
#使用filter查名字
data = Item.objects.filter(name="陈二狗")
# data = Item.objects.filter(name__endswith="可乐")
# print(dir(data))
# data = data.filter(name="百事可乐")
# data = Item.objects.filter(id_gt=3)
# data = Item.objects.filter(id_in=[1,3])
# data = Item.objects.exclude(name="陈二狗")
# data = Item.objects.all().order_by("-id")
return render(req,"items.html",{"items":data})
def get_category(req):
cates = Category.objects.all()
return render(req,"cates.html",{"data":cates})
#根据商品分类拿商品数据
def get_item_by_c_id(req):
#解析get请求的c_id参数
c_id = int(req.GET.get("c_id"))
#获取商品数据
items = Item.objects.filter(category_id=c_id)
return render(req,"items.html",{"items":items}) | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | day02/app02/views.py | 940716tian/PythonStudy |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import subprocess
from setuptools import setup, find_packages
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
PACKAGE_DIR = os.path.join(BASE_DIR, 'amundsen_application', 'static')
def is_npm_installed() -> bool:
try:
subprocess.check_call(['npm --version'], shell=True)
return True
except subprocess.CalledProcessError:
return False
def build_js() -> None:
if not is_npm_installed():
logging.error('npm must be available')
try:
subprocess.check_call(['npm install'], cwd=PACKAGE_DIR, shell=True)
subprocess.check_call(['npm run build'], cwd=PACKAGE_DIR, shell=True)
except Exception as e:
logging.warn('Installation of npm dependencies failed')
logging.warn(str(e))
build_js()
requirements_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'requirements.txt')
with open(requirements_path) as requirements_file:
requirements = requirements_file.readlines()
__version__ = '3.1.0'
setup(
name='amundsen-frontend',
version=__version__,
description='Web UI for Amundsen',
url='https://www.github.com/lyft/amundsenfrontendlibrary',
maintainer='Lyft',
maintainer_email='amundsen-dev@lyft.com',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
dependency_links=[],
install_requires=requirements,
extras_require={
'oidc': ['flaskoidc==0.1.1']
},
python_requires=">=3.6",
entry_points="""
[action_log.post_exec.plugin]
logging_action_log=amundsen_application.log.action_log_callback:logging_action_log
""",
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | setup.py | joshthoward/amundsenfrontendlibrary |
#encoding: utf-8
import logging
import time
# time.time()返回1970/1/1日起的时间戳的(单位是秒)
from .base import BasePlugin
logger = logging.getLogger(__name__)
class Heartbeat(BasePlugin):
def __init__(self, config, *args, **kwargs):
super(Heartbeat, self).__init__(config, 'heartbeat', 10, *args, **kwargs)
def make_msg(self):
msg = {
'type' : 'heartbeat',
'content' : {
'time' : time.time(),
}
}
logger.debug('msg msg: %s', msg)
return msg
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | agent/plugins/heartbeat.py | Nazicc/easyCMDB |
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
import unittest
import shutil
import numpy as np
from ml.model import NumberRecognizeNN
from ml.data_processor import DataProcessor
from ml.trainer import Trainer
from ml.resource import Resource
class TestTrainer(unittest.TestCase):
TEST_DIR = ""
@classmethod
def setUpClass(cls):
path = os.path.join(os.path.dirname(__file__), "./test_trainer")
if not os.path.isdir(path):
os.mkdir(path)
cls.TEST_DIR = path
@classmethod
def tearDownClass(cls):
if os.path.isdir(cls.TEST_DIR):
shutil.rmtree(cls.TEST_DIR)
def test_train(self):
model = NumberRecognizeNN(Resource.INPUT_SIZE, Resource.OUTPUT_SIZE)
r = Resource(self.TEST_DIR)
trainer = Trainer(model, r)
dp = DataProcessor()
data, target = r.load_training_data()
print("Test Train the model")
trainer.train(data, target, epoch=5)
def test_baseline(self):
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
r = Resource(self.TEST_DIR)
dp = DataProcessor()
data, target = r.load_training_data()
dp.set_normalization_params(data)
f_data, f_target = dp.format_x(data), dp.format_y(target)
test_size = 200
model = SVC()
model.fit(f_data[:-test_size], f_target[:-test_size])
predicted = model.predict(f_data[-test_size:])
teacher = f_target[-test_size:]
score = accuracy_score(teacher, predicted)
print("Baseline score is {}".format(score))
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | ml/tests/test_trainer.py | Karapyon/apli_env |
import asyncio
import logging
import os
import shutil
import unittest
import rlog
from omega.logreceivers.redis import RedisLogReceiver
from tests import init_test_env
class TestRedisLogging(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self) -> None:
self.cfg = init_test_env()
async def test_redis_logging(self):
# remove handlers set by config file, if there is.
root = logging.getLogger()
root.handlers.clear()
channel = "test_redis_logging"
redis_logger = logging.getLogger("test_redis")
fmt = "%(asctime)s %(levelname)-1.1s %(process)d %(name)s:%(funcName)s:%(lineno)s | %(message)s"
handler = rlog.RedisHandler(
channel=channel,
level=logging.DEBUG,
host="localhost",
port="6379",
formatter=logging.Formatter(fmt),
)
redis_logger.addHandler(handler)
_dir = "/tmp/omega/test_redis_logging"
shutil.rmtree(_dir, ignore_errors=True)
receiver = RedisLogReceiver(
dsn="redis://localhost:6379",
channel_name=channel,
filename=f"{_dir}/omega.log",
max_bytes=20,
backup_count=2,
)
await receiver.start()
for i in range(5):
redis_logger.info("this is %sth test log", i)
await asyncio.sleep(0.5)
await receiver.stop()
self.assertEqual(3, len(os.listdir(_dir)))
with open(f"{_dir}/omega.log.2", "r", encoding="utf-8") as f:
content = f.readlines()[0]
msg = content.split("|")[1]
self.assertEqual(" this is 2th test log\n", msg)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | tests/logreceivers/test_redis_logging.py | zillionare/zeta |
import numpy as np
def clean_data(df, out_df_dir=""):
df.dropna(axis=1, inplace=True)
if out_df_dir:
df.to_csv(out_df_dir)
return df
# Calculate log change of daily price
def log_change(series):
return np.log(series[1] / series[0])
# Calculate correaltion
def calculate_cor(df, start, end):
return df[start:end].rolling(
window=2,
min_periods=2
).apply(
log_change,
raw=True
).corr(method="pearson")
# Calculate profit
def take_profit(price, start, end):
return price.iloc[end]/price.iloc[start] - 1 | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | utils.py | Duy-Vu/stock-network |
"""
Conversion functions for the NATO Phonetic Alphabet.
"""
import re
# To save a lot of typing the code words are presented here
# as a dict, but feel free to change this if you'd like.
ALPHANUM_TO_NATO = {
"A": "ALFA",
"B": "BRAVO",
"C": "CHARLIE",
"D": "DELTA",
"E": "ECHO",
"F": "FOXTROT",
"G": "GOLF",
"H": "HOTEL",
"I": "INDIA",
"J": "JULIETT",
"K": "KILO",
"L": "LIMA",
"M": "MIKE",
"N": "NOVEMBER",
"O": "OSCAR",
"P": "PAPA",
"Q": "QUEBEC",
"R": "ROMEO",
"S": "SIERRA",
"T": "TANGO",
"U": "UNIFORM",
"V": "VICTOR",
"W": "WHISKEY",
"X": "XRAY",
"Y": "YANKEE",
"Z": "ZULU",
"0": "ZERO",
"1": "ONE",
"2": "TWO",
"3": "TREE",
"4": "FOUR",
"5": "FIVE",
"6": "SIX",
"7": "SEVEN",
"8": "EIGHT",
"9": "NINER",
}
NATO_TO_ALPHANUM = {v: k for k, v in ALPHANUM_TO_NATO.items()}
ALPHANUM_RE = re.compile(r"[{}]".format("".join(ALPHANUM_TO_NATO.keys())))
NATO_RE = re.compile(r"{0}".format("|".join(ALPHANUM_TO_NATO.values())))
def transmit(message: str) -> str:
"""
Convert a message to a NATO code word transmission.
"""
result = []
for char in ALPHANUM_RE.findall(message.upper()):
result.append(ALPHANUM_TO_NATO[char])
return " ".join(result)
def receive(transmission: str) -> str:
"""
Convert a NATO code word transmission to a message.
"""
pattern = r"{0}".format("|".join(ALPHANUM_TO_NATO.values()))
result = []
for word in NATO_RE.findall(transmission):
result.append(NATO_TO_ALPHANUM[word])
return "".join(result)
def transmit_encoded(plaintext: str) -> str:
"""
Encode a message and transmit as NATO code words.
"""
pass # <- implement your functio
def receive_encoded(ciphertext: str) -> str:
"""
Receive an encoded message via NATO code word transmission.
"""
pass # <- implement your functio
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer... | 3 | exercises/python-2-b/.meta/3/python_2_b.py | ee7/exercism-research_experiment_1 |
from django.template.backends import django
from django.shortcuts import render, redirect
def main_board(request):
return render(request, 'main_page.html')
def redirect_main(request):
return redirect('main_boar_url', permanent=True)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | students/k3342/laboratory_works/Nikonchuk_Anna/Lr1/minos/minos/views.py | nikonura/ITMO_ICT_WebProgramming_2020 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from set_path import set_path
stockrsm_path = set_path()
# PYTHON_ARGCOMPLETE_OK
import argcomplete, argparse
import subprocess
import os
import sys
from argcomplete.completers import ChoicesCompleter
from argcomplete.completers import EnvironCompleter
# Set path and import SibylStart
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
# this is the RawTextHelpFormatter._split_lines
if text.startswith('R|'):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def main():
trial_class = 'c2w.test.protocol.tcp_client_test.c2wTcpChatClientTestCase'
parser = argparse.ArgumentParser(description='Trial tcp client test',
formatter_class=SmartFormatter)
with open(os.path.join(stockrsm_path, "data",
"c2w",
"test",
"tcp_client_tests_list.txt")) as tests_list:
tests = list(l.rstrip('\n') for l in tests_list.readlines())
if not tests:
print("Sorry, no test available for the moment")
exit()
# stupid hack for [""] + tests for getting the alignment of values.
# To be modified.
parser.add_argument("--scenario",
help='''R|Scenario name. Allowed values are
''' + '\n'.join([""] + tests,)
).completer = ChoicesCompleter(tuple(tests))
argcomplete.autocomplete(parser)
options = parser.parse_args()
os.environ['STOCKRSMPATH'] = stockrsm_path
del sys.argv[-1]
del sys.argv[-1]
sys.argv.append(trial_class + '.test_' + options.scenario)
sys.path.insert(0, os.path.abspath(os.getcwd()))
from twisted.scripts.trial import run
run()
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | c2w/scripts/c2w_test_tcp_client.py | JieDiscovery/ChatWhileWatching |
import time, datetime
from app import db
class ServerInfo(db.Model):
__tablename__ = 'servers'
__table_args__ = (db.PrimaryKeyConstraint('ip', 'port', name='_ip_port_pk'),)
ip = db.Column(db.String(128), nullable=False)
port = db.Column(db.Integer, nullable=False)
info = db.Column(db.String(1024), nullable=True)
player_count = db.Column(db.Integer, nullable=False)
player_total = db.Column(db.Integer, nullable=False)
servermod_version = db.Column(db.String(32), nullable=True)
pastebin_url = db.Column(db.String(32), nullable=True)
game_version = db.Column(db.String(32), nullable=True)
date_updated = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
self.__dict__[key] = value
@property
def serialize(self):
# du_unix = time.mktime(self.date_updated.timetuple())
# now_unix = time.mktime(datetime.datetime.now().timetuple())
return {
"ip": self.ip,
"port": self.port,
"info": self.info,
"player_count": self.player_count,
"player_total": self.player_total,
"game_version": self.game_version,
"servermod_version": self.servermod_version,
"pastebin_url": self.pastebin_url,
"date_updated": time.mktime(self.date_updated.timetuple())
}
def prettify_seconds(self, seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d: return "{} days".format(d)
if h: return "{} hours".format(h)
if m: return "{} minutes".format(m)
return "{} seconds".format(s)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | app/modules/serverinfo/models.py | sappykun/scpsl-masterserver |
"""Aioamqp tests"""
import unittest
import socket
from aioamqp import connect
from aioamqp.protocol import OPEN
from . import testing, testcase
class AmqpConnectionTestCase(testcase.RabbitTestCase, unittest.TestCase):
@testing.coroutine
def test_connect(self):
_transport, proto = yield from connect(virtualhost=self.vhost, loop=self.loop)
self.assertEqual(proto.state, OPEN)
self.assertIsNotNone(proto.server_properties)
yield from proto.close()
@testing.coroutine
def test_connect_tuning(self):
# frame_max should be higher than 131072
frame_max = 131072
channel_max = 10
heartbeat = 100
_transport, proto = yield from connect(
virtualhost=self.vhost,
loop=self.loop,
channel_max=channel_max,
frame_max=frame_max,
heartbeat=heartbeat,
)
self.assertEqual(proto.state, OPEN)
self.assertIsNotNone(proto.server_properties)
self.assertDictEqual(proto.connection_tunning, {
'frame_max': frame_max,
'channel_max': channel_max,
'heartbeat': heartbeat
})
self.assertEqual(proto.server_channel_max, channel_max)
self.assertEqual(proto.server_frame_max, frame_max)
self.assertEqual(proto.server_heartbeat, heartbeat)
yield from proto.close()
@testing.coroutine
def test_socket_nodelay(self):
transport, proto = yield from connect(virtualhost=self.vhost, loop=self.loop)
sock = transport.get_extra_info('socket')
opt_val = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertNotEqual(opt_val, 0)
yield from proto.close()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false... | 3 | aioamqp/tests/test_connect.py | michael-k/aioamqp |
from disnake.ext.commands import Bot as _Bot
from loguru import logger
from .status import StatusHeartbeater
class Bot(_Bot):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._status = StatusHeartbeater()
async def start(self, *args, **kwargs) -> None:
self._status.run()
await super().start(*args, **kwargs)
async def on_connect(self) -> None:
logger.info("Connected to the Discord Gateway.")
async def on_ready(self) -> None:
logger.info(f"READY event received, connected as {self.user} with {len(self.guilds)} guilds.")
def load_extension(self, ext: str) -> None:
super().load_extension(ext)
logger.info(f"Loaded extension {ext}.")
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer"... | 3 | src/impl/bot/bot.py | vcokltfre/hbot-rewrite |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import numpy as np
import xarray as xr
def log1pexp(x: np.ndarray) -> np.ndarray:
"""
Compute log(1 + exp(x)) in a numerically stable way,
see https://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf eqn 10
:param x: numpy array of numbers
:returns: log(1 + exp(x))
"""
y = np.zeros_like(x)
y[x < 18] = np.log1p(np.exp(x[x < 18]))
y[x >= 18] = x[x >= 18] + np.exp(-x[x >= 18])
return y
def log1mexpm(x: np.ndarray) -> np.ndarray:
"""
Compute log(1 - exp(-x)) in a numerically stable way for x > 0,
see https://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf eqn 7
:param x: numpy array of numbers >= 0
:returns: log(1 - exp(-x))
"""
y = np.zeros_like(x)
x_low, x_high = x < 0.693, x >= 0.693
y[x_low] = np.log(-(np.expm1(-x[x_low])))
y[x_high] = np.log1p(-(np.exp(-x[x_high])))
return y
def split_train_test(
data: xr.Dataset, coord_name: str, train_frac: float
) -> Tuple[xr.Dataset, xr.Dataset]:
"""
Splice a dataset into two along the given coordinate
and update n in the attributes.
:param data: A dataset object which is to be split
:param coord_name: The coordinate on which the data is going to be sliced
:param train_frac: Fraction of data to be given to training
:returns: The training and test datasets.
"""
num_train = int(train_frac * len(data.coords[coord_name]))
train = data[{coord_name: slice(None, num_train)}]
test = data[{coord_name: slice(num_train, None)}]
train.attrs = data.attrs.copy()
train.attrs["n"] = num_train
test.attrs = data.attrs.copy()
test.attrs["n"] = data.attrs["n"] - train.attrs["n"]
return train, test
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
... | 3 | pplbench/models/utils.py | dmitryvinn/pplbench |
"""
PolygonPShapeOOP.
Wrapping a PShape inside a custom class
and demonstrating how we can have a multiple objects each
using the same PShape.
"""
from polygon import Polygon
# A list of objects
polygons = []
def setup():
size(640, 360, P2D)
smooth()
# Make a PShape.
star = createShape()
star.beginShape()
star.noStroke()
star.fill(0, 127)
star.vertex(0, -50)
star.vertex(14, -20)
star.vertex(47, -15)
star.vertex(23, 7)
star.vertex(29, 40)
star.vertex(0, 25)
star.vertex(-29, 40)
star.vertex(-23, 7)
star.vertex(-47, -15)
star.vertex(-14, -20)
star.endShape(CLOSE)
# Pass in reference to the PShape.
# We coud make polygons with different PShapes.
for i in range(25):
polygons.append(Polygon(star))
def draw():
background(255)
# Display and move them all.
for poly in polygons:
poly.display()
poly.move()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | mode/examples/Topics/Create Shapes/PolygonPShapeOOP2/PolygonPShapeOOP2.pyde | timgates42/processing.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.