source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
# LIBTBX_SET_DISPATCHER_NAME iotbx.pdb.sort_atoms
from __future__ import absolute_import, division, print_function
from libtbx.utils import Usage
import sys
import iotbx.pdb
import mmtbx.model
master_phil_str = """
file_name = None
.type = path
.multiple = False
.optional = False
.style = hidden
"""
def show_usage():
help_msg = """\
iotbx.pdb.sort_atoms model.pdb
Sort atoms in residues so they will be in the same order in all residues.
Also renumbers atoms (atom serial number field 7-11 columns)."""
raise Usage(help_msg)
def run(args):
if len(args) == 0:
show_usage()
return
inp_fn = args[0]
pdb_input = iotbx.pdb.input(
file_name=inp_fn,
source_info=None,
raise_sorry_if_format_error=True)
model = mmtbx.model.manager(
model_input = pdb_input)
out_fn_prefix = inp_fn
if inp_fn.endswith(".pdb") or inp_fn.endswith(".cif"):
out_fn_prefix = inp_fn[:-4]
out_fn = out_fn_prefix + "_sorted"
txt = ""
if model.input_format_was_cif():
out_fn += ".cif"
txt = model.model_as_mmcif()
else:
out_fn += ".pdb"
txt = model.model_as_pdb()
with open(out_fn, 'w') as f:
f.write(txt)
if (__name__ == "__main__"):
run(sys.argv[1:])
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | iotbx/command_line/sort_atoms.py | hbrunie/cctbx_project |
import nose
import binary_heap
BinaryHeap = binary_heap.BinaryHeap
def test_binary_heap_101():
b = BinaryHeap()
nose.tools.assert_is_instance(b, BinaryHeap)
def test_binary_heap_num_entries():
b = BinaryHeap()
nose.tools.assert_equal(b.num_entries(), 0)
def test_binary_heap_insert():
b = BinaryHeap()
b.insert("Paolo")
b.insert("CIao")
nose.tools.assert_equal(b.num_entries(), 2)
def test_binary_heap_pop():
b = BinaryHeap()
b.insert("Paolo")
b.insert("CIao")
max_el = b.pop()
nose.tools.assert_equal(b.num_entries(), 1)
nose.tools.assert_equal(max_el, "CIao")
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | test/test_binary_heap.py | paolodelia99/Python-C-Algorithms |
import logging
import sys
import gym
logger = logging.getLogger(__name__)
root_logger = logging.getLogger()
requests_logger = logging.getLogger('requests')
# Set up the default handler
formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
# We need to take in the gym logger explicitly since this is called
# at initialization time.
def logger_setup(gym_logger):
root_logger.addHandler(handler)
gym_logger.setLevel(logging.INFO)
# When set to INFO, this will print out the hostname of every
# connection it makes.
# requests_logger.setLevel(logging.WARN)
def undo_logger_setup():
"""Undoes the automatic logging setup done by OpenAI Gym. You should call
this function if you want to manually configure logging
yourself. Typical usage would involve putting something like the
following at the top of your script:
gym.undo_logger_setup()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stderr))
"""
root_logger.removeHandler(handler)
gym.logger.setLevel(logging.NOTSET)
requests_logger.setLevel(logging.NOTSET)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | gym/configuration.py | JialinMao/gym-ww |
from .plugin.core.main import startup, shutdown
# TODO: narrow down imports
from .plugin.core.panels import *
from .plugin.core.registry import LspRestartClientCommand
from .plugin.core.documents import *
from .plugin.panels import *
from .plugin.edit import *
from .plugin.completion import *
from .plugin.diagnostics import *
from .plugin.configuration import *
from .plugin.formatting import *
from .plugin.highlights import *
from .plugin.goto import *
from .plugin.hover import *
from .plugin.references import *
from .plugin.signature_help import *
from .plugin.code_actions import *
from .plugin.color import *
from .plugin.symbols import *
from .plugin.rename import *
from .plugin.execute_command import *
from .plugin.workspace_symbol import *
def plugin_loaded():
startup()
def plugin_unloaded():
shutdown()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?... | 3 | boot.py | dndrsn/LSP |
from django.core.management.base import BaseCommand
from django.utils import timezone
from url_migration import models
class Command(BaseCommand):
def handle(self, **options):
for rule in models.UrlRegexpMapping.objects.filter(last_usage__isnull=False):
self._remove_if_unused(rule)
for rule in models.UrlMapping.objects.filter(last_usage__isnull=False):
self._remove_if_unused(rule)
def _remove_if_unused(self, rule):
if rule.last_usage.used_date + rule.expire_after < timezone.now():
self.stdout.write('Removing expired rule %s' % str(rule))
rule.delete()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | url_migration/management/commands/remove_expired_redirects.py | riklaunim/django-url-migration |
from app.service import token_service
from app.service import umm_client
from app.service import nexus_client
from app.domain.solution import Solution
from app.domain.document import Document
def upload_document(**args):
http_request = args.get('http_request')
token = token_service.get_token(http_request)
if not token.is_valid:
raise Exception('403 Forbidden')
user_login = token.username
file_body = args.get('stream')
solution_uuid = args.get('path_arg')
res = umm_client.get_solutions(solution_uuid, token.jwt)
if res['status'] != 'ok' or res['value']['total'] < 1:
raise Exception('文档模型不存在')
solution = Solution()
solution.__dict__ = res['value']['results'][0]
if solution.authorLogin != user_login:
raise Exception('403 Forbidden')
filename = http_request.files.get('upload_document')[0].filename
short_url = solution.authorLogin + '/' + solution_uuid + '/document/' + filename
long_url = nexus_client.upload_artifact_data(short_url, file_body)
if long_url is None:
raise Exception('向Nexus上传文件出错!')
document = Document()
document.solutionUuid = solution.uuid
document.name = filename
document.url = long_url
document.fileSize = len(file_body)
umm_client.create_document(document, token.jwt)
return 0
def delete_document(**args):
token = token_service.get_token(args.get('http_request'))
if not token.is_valid:
raise Exception('403 Forbidden')
user_login = token.username
has_role = token.has_role('ROLE_MANAGER')
document = Document()
document.__dict__ = umm_client.find_document(args.get('documentId'))['value']
if user_login != document.authorLogin and not has_role:
raise Exception('403 Forbidden')
nexus_client.delete_artifact(document.url)
umm_client.delete_document(args.get('documentId'), jwt=token.jwt)
return 0
def download_document(**args):
result = nexus_client.get_artifact(args.get('url'))
return result
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | umu-python/app/service/document_service.py | corner4world/cubeai |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from s3dump.utils._text import to_native
class S3DumpError(Exception):
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
super(S3DumpError, self).__init__(message)
self.message = "%s" % to_native(message)
if orig_exc:
self.orig_exc = orig_exc
def __str__(self):
return self.message
def __repr__(self):
return self.message
class S3DumpAssertionError(S3DumpError, AssertionError):
"""Invalid assertion."""
pass
class S3DumpOptionsError(S3DumpError):
"""Bad or incomplete options passed."""
pass
class S3DumpParserError(S3DumpError):
"""Something was detected early that is wrong about a playbook or data file."""
pass
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | lib/s3dump/errors/__init__.py | Placidina/s3dump |
import sys, os, json
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
# Game loop functions
def render(game,current):
''' Displays the current room '''
print('You are in the ' + game['rooms'][current]['name'])
print(game['rooms'][current]['desc'])
def getInput():
''' Asks the user for input and returns a stripped, uppercase version of what they typed '''
response = input('What would you like to do? ').strip().upper()
return response
def update(response,game,current):
''' Process the input and update the state of the world '''
for e in game['rooms'][current]['exits']:
if response == e['verb']:
current = e['target']
return current
def main():
game = {}
with open('house.json') as json_file:
game = json.load(json_file)
current = 'START'
quit = False
while not quit:
render(game,current)
response = getInput()
current = update(response,game,current)
if response == 'QUIT':
quit = True
if __name__ == '__main__':
main() | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
... | 3 | main.py | BraffordHunter/03-Text-Adventure-2 |
def getBestBloker(si, gat):
global relations
for g in gat:
if g in relations[si]:
return [si, g]
for g in gat:
if len(relations[g]) > 0:
return [g, relations[g][0]]
return [0, 0]
def unsetter(c1, c2):
global relations
relations[c1].remove(c2)
relations[c2].remove(c1)
n, l, e = [int(i) for i in input().split()]
gat = []
relations = {}
for i in range(l):
n1, n2 = [int(j) for j in input().split()]
relations.setdefault(n1, []).append(n2)
relations.setdefault(n2, []).append(n1)
for i in range(e):
ei = int(input())
gat.append(ei)
# game loop
while True:
si = int(input())
c1, c2 = getBestBloker(si, gat)
unsetter(c1, c2)
print("{0} {1}".format(c1, c2))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | Medium/Skynet_Revolution_Episode_1.py | Thomaw/CodeinGame |
# -*- coding: utf-8 -*-
#
# osc2rtmidi/device.py
#
"""MIDI device abstraction classes."""
import logging
import time
from rtmidi.midiutil import open_midioutput
__all__ = ("RtMidiDevice",)
log = logging.getLogger(__name__)
class RtMidiDevice(object):
"""Provides a common API for different MIDI driver implementations."""
def __init__(self, name="RtMidiDevice", port=None, portname=None):
self.name = name
self.port = port
self.portname = portname
self._output = None
def __str__(self):
return self.portname
def open_output(self):
self._output, self.portname = open_midioutput(self.port, interactive=False,
client_name=self.name, use_virtual=True)
def close_output(self):
if self._output is not None:
self._output.close_port()
def send(self, events):
if self._output:
for ev in events:
self._output.send_message(ev)
def send_sysex(self, msg):
if self._output:
self._output.send_message([ord(c) for c in msg])
@classmethod
def time(cls):
return time.time() / 1000.
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | osc2rtmidi/device.py | SpotlightKid/osc2rtmidi |
from mythic_payloadtype_container.MythicCommandBase import *
from mythic_payloadtype_container.MythicRPC import *
import json
class ShellArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"command": CommandParameter(
name="command", type=ParameterType.String, description="Command to run"
)
}
async def parse_arguments(self):
if len(self.command_line) > 0:
if self.command_line[0] == "{":
self.load_args_from_json_string(self.command_line)
else:
self.add_arg("command", self.command_line)
else:
raise ValueError("Missing arguments")
class ShellCommand(CommandBase):
cmd = "shell"
needs_admin = False
help_cmd = "shell {command}"
description = "This uses the execSync() Node.js function to execute arbitrary shell commands."
version = 1
author = "@mattreduce"
attackmapping = ["T1059"]
argument_class = ShellArguments
async def create_tasking(self, task: MythicTask) -> MythicTask:
resp = await MythicRPC().execute("create_artifact", task_id=task.id,
artifact="{}".format(task.args.get_arg("command")),
artifact_type="Process Create",
)
task.display_params = task.args.get_arg("command")
return task
async def process_response(self, response: AgentResponse):
pass
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | Payload_Type/venus/mythic/agent_functions/shell.py | MythicAgents/venus |
import tensorflow as tf
# ===============================================
# Previously was snippets.py of: 3_2_RNNs
# ===============================================
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
# Get 4 copies of feeding [inputs, m_prev] through the "Sigma" diagram.
# Note that each copy has its own distinct set of weights.
lstm_matrix = self._linear1([inputs, m_prev])
i, j, f, o = tf.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Feed each of the gates through a sigmoid.
i = sigmoid(i)
f = sigmoid(f + self._forget_bias)
o = sigmoid(o)
c = f * c_prev + i * self._activation(j)
m = o * self._activation(c)
new_state = LSTMStateTuple(c, m)
return m, new_state
# ===============================================
# RNN illustration
# ===============================================
hidden_size = 32
def rnn_step(x, h_prev):
# Project inputs to each have dimension hidden_size.
combined_inputs = tf.layers.Dense(hidden_size)(tf.concat([x, h_prev], axis=1))
# Compute the next hidden state.
h = tf.tanh(combined_inputs)
return h
# ===============================================
# Bidirectional RNNs
# ===============================================
outputs_tuple, final_state_tuple = tf.nn.bidirectional_dynamic_rnn(
cell_fw=tf.nn.rnn_cell.LSTMCell(128),
cell_bw=tf.nn.rnn_cell.LSTMCell(128),
inputs=inputs,
dtype=tf.float32)
# Concatenate the forward and backward outputs.
# Shape: (batch_size, max_seq_len, 2 * state_size)
outputs = tf.concat(outputs_tuple, -1)
# ===============================================
# Stacked RNNs
# ===============================================
def lstm_cell():
return tf.nn.rnn_cell.LSTMCell(128)
cell = tf.nn.rnn_cell.MultiRNNCell([
lstm_cell() for _ in range(2)])
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | section3/snippets.py | joyjeni/-Learn-Artificial-Intelligence-with-TensorFlow |
class Test:
def __init__(self):
self.foo = 11
self._bar = 23
self.__baz = 42
def get_gg(self):
return __gg
class ExtendedTest(Test):
def __init__(self):
super().__init__()
self.foo = 'overridden foo'
self._bar = 'overridden _bar'
self.__baz = 'overridden __baz'
def main():
t2 = ExtendedTest()
print(t2.foo)
print(t2._bar)
# print(t2.__baz)
print(t2._ExtendedTest__baz)
print(t2._Test__baz)
if __name__ == "__main__":
_Test__gg = 'gg'
print(Test().get_gg())
main() | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | python/underscore.py | mythnc/lab |
from abc import ABC
class AbcFacade(ABC):
"""Any interface will expect to be able to invoke the following methods."""
def count_rows(self):
pass
def get_rows(self):
pass
def get_last_workday(self):
pass
def delete_history(self):
pass
def disconnect(self):
pass
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | time_management/facade_abc.py | artorias111/time-management |
import json
from config import db
from models import UserModel
def table_record_to_json(record):
modelClass = type(record)
columns = [record for record in filter(lambda item: not item.startswith('_'),modelClass.__dict__)]
json_value = {column_name: str(getattr(record, column_name))for column_name in columns}
return json_value
def table_to_json(table):
return { "data": [table_record_to_json(record) for record in table] } | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | util.py | codefordc/us-congress-pizza-flag-tracker |
def possible_moves(current_word, words_list):
answer_set = set()
for word in words_list:
if word.startswith(current_word):
answer_set.add(word[len(current_word)])
return answer_set
def rec(words_list, current_word=''):
if current_word in words_list:
return {current_word,}
if len(current_word) % 2 == 1: # если ходит нечётный то ему для победы нужен вариант с нечётным количеством букв
win_variants = set()
lose_variants = set()
for word in possible_moves(current_word, words_list):
res = rec(words_list, current_word + word)
if len(list(res)[0]) % 2 == 0:
win_variants |= res
else:
lose_variants |= res
else:
win_variants = set()
lose_variants = set()
for word in possible_moves(current_word, words_list):
res = rec(words_list, current_word + word)
if len(list(res)[0]) % 2 == 1:
win_variants |= res
else:
lose_variants |= res
if win_variants:
return win_variants
else:
return lose_variants
def who_has_win_strategy(words_list):
return 1 - len(list(rec(words_list))[0]) % 2
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding s... | 3 | 04-A.Kraevskiy/2021-10-09-stone-game/words.py | Stankist04/2021-11-1 |
# -*- coding:utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import random
import string
import pytest
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
browsers = {
# 'firefox': webdriver.Firefox,
# 'chrome': webdriver.Chrome,
'phantomjs': webdriver.PhantomJS,
}
def random_string(n):
return ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(n)
)
@pytest.fixture(scope='session',
params=browsers.keys())
def driver(request):
if 'DISPLAY' not in os.environ:
pytest.skip('Test requires display server (export DISPLAY)')
try:
b = browsers[request.param]()
except WebDriverException as e:
pytest.skip(e)
else:
b.set_window_size(1200, 800)
request.addfinalizer(lambda *args: b.quit())
return b
@pytest.fixture
def testpages(db):
from tests.testapp.models import TestPage
return TestPage.objects.bulk_create(
[TestPage(pk=pk, content1=random_string(50), content2=random_string(50)) for pk in range(10)]
)
@pytest.fixture
def flatpages(db):
from django.contrib.flatpages.models import FlatPage
return FlatPage.objects.bulk_create(
[FlatPage(pk=pk, title=random_string(50), url=random_string(50)) for pk in range(1)]
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | tests/conftest.py | vanadium23/django-tinymce-lite |
#
# Copyright (c) 2020 - Neptunium Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from social_fabric.config_repo import ConfigRepo
class NetworkTemplateProcessor:
def __init__(self):
self.file_loader = FileSystemLoader(ConfigRepo.TEMPLATE_SRC_DIR)
self.env = Environment(loader=self.file_loader, undefined=StrictUndefined)
def process(self, filename, *args, **kwargs):
template = self.env.get_template(filename)
return template.render(*args, **kwargs)
if __name__ == '__main__':
config = {}
net_template_processor = NetworkTemplateProcessor()
output = net_template_processor.process('docker-compose-ca.yaml',
BCC_NETWORK_DOMAIN='orsnet',
BCC_CA_ADDR='ca.theobjects.com',
BCC_CA_PORT='7055',
BCC_CA_PUBLIC_CERT='ca.theobjects.com.cert.pem',
BCC_CA_PRIVATE_KEY='ca.theobjects.com.priv.key',
BCC_CA_ADMIN_NAME='admin', BCC_CA_ADMIN_PASSWORD='adminpw')
with open('/tmp/docker-compose.yaml', 'w') as f:
f.write(output)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answe... | 3 | social_fabric/network_template_processor.py | social-fabric/social-fabric |
from django import forms
from .models import SalesDiagnostic, Product, Headers
from django.contrib.auth.models import User
class ReportForm(forms.Form):
choices = Product.objects.values("vendor")
VENDOR_OPTIONS = ()
for i in choices:
choice = (i['vendor'], i['vendor'])
if choice not in VENDOR_OPTIONS:
VENDOR_OPTIONS += (choice),
headData = Headers.objects.values()
OPTIONS = ()
for i in headData:
choice = (i['database_header'], i['amazon_header'])
if i['database_header'] != 'asin':
OPTIONS += (choice),
vendor = forms.ChoiceField(choices=VENDOR_OPTIONS)
report_date = forms.DateField(widget=forms.DateInput)
headers = forms.MultipleChoiceField(widget=forms.SelectMultiple, choices=OPTIONS)
def return_vend(self):
self.vendor()
def __init__(self, user, *args, **kwargs):
super(ReportForm, self).__init__(*args, **kwargs)
self.user = user
# assign a (computed, I assume) default value to the choice field
self.initial['headers'] = ['model_number', 'asin_name', 'shipped_cogs', 'shipped_units', 'sellable_oh_inven',
'aged_90_sellable', 'repl_category', 'impressions', 'clicks', 'ctr', 'cpc', 'spend', 'f_day_sales',
'f_day_orders', 'f_day_units', 'week_forecast']
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | reporting/forms.py | dgmarko/django_jdsreporting |
#!/usr/bin/env python3
import hashlib
def md5(s: str) -> str:
h = hashlib.md5()
h.update(s.encode("utf-8"))
return h.hexdigest()
def mine_coin(key: str) -> int:
count = 1
while not md5(key + str(count)).startswith("000000"):
count += 1
return count
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | 2015/day04.py | 0x4448/advent-of-code |
import os
import json
import tempfile
import shutil
from lxml import etree
from rest_framework import status
from hs_core.hydroshare import resource
from .base import HSRESTTestCase
class TestResourceMetadata(HSRESTTestCase):
def setUp(self):
super(TestResourceMetadata, self).setUp()
self.rtype = 'GenericResource'
self.title = 'My Test resource'
res = resource.create_resource(self.rtype,
self.user,
self.title)
self.pid = res.short_id
self.resources_to_delete.append(self.pid)
def test_get_sysmeta(self):
# Get the resource system metadata
sysmeta_url = "/hsapi/resource/{res_id}/sysmeta/".format(res_id=self.pid)
response = self.client.get(sysmeta_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = json.loads(response.content.decode())
self.assertEqual(content['resource_type'], self.rtype)
self.assertEqual(content['resource_title'], self.title)
res_tail = '/' + os.path.join('resource', self.pid) + '/'
self.assertTrue(content['resource_url'].startswith('http://'))
self.assertTrue(content['resource_url'].endswith(res_tail))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | hs_core/tests/api/rest/test_resource_meta.py | hydroshare/hydroshare |
import os
import math
import csv
import tqdm
import argparse
"""
超新星跑分程序:用于对比submit.csv与list.csv的结果,计算得分
由于训练和测试都在使用这个集合,需要注意实际测试如果与训练集分布不一致,那么分数可能稍低
用于计算部分的得分
"""
parser = argparse.ArgumentParser(description="compare with --csv")
parser.add_argument('--csv',type=str,default="test_valid.csv")
args = parser.parse_args()
submitfile = "./submit.csv"
listfile = args.csv
print("comparing %s and %s." % (submitfile,listfile))
if not os.path.exists(submitfile) or not os.path.exists(listfile):
print("%s and %s don't exist"%(submitfile,listfile))
fsubmit = open(submitfile,'r')
flist = open(listfile,'r')
reader_submit = list(csv.reader(fsubmit))[1:]
reader_list = list(csv.reader(flist))[:]
if len(reader_submit) != len(reader_list):
print("size don't match! submit:list",len(reader_submit),"!=", len(reader_list))
else:
print("submit and list size match!")
def dist(x,y,x1,y1):
if math.sqrt((int(float(x))-int(float(x1)))*(int(float(x))-int(float(x1)))+(int(float(y))-int(float(y1)))*(int(float(y))-int(float(y1)))) < 15:
return True
else:
return False
total = len(reader_list)
right = 0
def getID(name):
for i in range(len(reader_submit)):
if reader_submit[i][0] == name:
return i
return 0
for name, x , y ,_ in reader_list:
target = reader_submit[getID(name)]
if len(target) < 8:
print(name,"is not enough")
continue
else:
x1,y1 = int(target[1]),int(target[2])
x2,y2 = int(target[3]),int(target[4])
x3,y3 = int(target[5]),int(target[6])
x ,y = int(float(x)), int(float(y))
if target[7] == "1":
if dist(x,y,x1,y1) or dist(x,y,x2,y2) or dist(x,y,x3,y3):
right = right + 1
if right % 2 == 0:
print("\r real time score:%.3f"%(float(right)/float(total+1)),end="")
print("\nright:%d total:%d score:%.3f"%(right,total,float(right)/float(int(float(total))+1)))
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | score.py | pprp/faster-rcnn.Supernova |
from django.shortcuts import render, redirect
import crawler
from calendarapp.models import Event
from crawler import Subject
import datetime
# Create your views here.
def connect_everytime(request):
return render(request, 'connect-everytime.html')
def post(request):
if request.method == "POST":
user_url = request.POST.get("user-code")
print("사용자 요청 URL : " + user_url)
timetable_list = crawler.run(user_url)
if timetable_list == None:
return render(request, 'error-url.html', {'user_url': user_url})
info_list = crawler.lecture_list(timetable_list)
# print(info_list)
this_monday = datetime.date(2019, 9, 2)
while this_monday < datetime.date(2019, 12, 31):
for i in timetable_list: #과목별
for j in i.dates: #날자별
d = this_monday + datetime.timedelta(days=int(j['day']))
hour, min = crawler.calc_time(int(j['start_time']))
s = datetime.time(hour, min, 0)
hour, min = crawler.calc_time(int(j['end_time']))
e = datetime.time(hour, min, 0)
start = datetime.datetime.combine(d, s)
end = datetime.datetime.combine(d, e)
Event(owner=request.user, title=i.name, place=i.place, start=start, end=end, is_from_timetable=True).save()
this_monday += datetime.timedelta(days=7)
return render(request, 'check-info.html', {'info_list': info_list})
# form = PostForm(request.POST)
# if form.is_valid():
# lotto = form.save(commit = False)
# lotto.generate()
# return redirect('connect-everytime')
else:
return render(request, "error.html")
def howto(request):
return render(request, 'connect-everytime.html') | [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"a... | 3 | everytime/views.py | SmartWebService/SmartStudyCalendar |
from rest_framework import serializers
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from .models import User
from .models import ShortenedUrl
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("id", "email", "username", "fullname", "password")
extra_kwargs = {"password": {"write_only": True}}
def create(self, validated_data):
user = User.objects.create_user(validated_data['email'], validated_data['username'],
validated_data['fullname'], validated_data['password'])
return user
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("id", "email", "username", "fullname")
class ShortenBodySerializer(serializers.ModelSerializer):
class Meta:
model = ShortenedUrl
fields = ("id", "original_url", "shortened_url")
def validate(self, data):
url_validator = URLValidator()
if len(data["shortened_url"]) > 10:
raise serializers.ValidationError
try:
url_validator(data["original_url"])
except ValidationError:
raise serializers.ValidationError("Invalid URL to Shorten")
return data
class ShortenUrlSerializer(serializers.ModelSerializer):
class Meta:
model = ShortenedUrl
fields = ("id", "user", "original_url", "shortened_url")
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
... | 3 | url_shorten_api/core/serializers.py | dorukuzucu/url_shorten |
# Dummy test
import numpy as np
from gsea import *
from numpy.testing import assert_almost_equal
def test_rank_genes():
D = np.array([[-1,1],[1,-1]])
C = [0,1]
L,r = rank_genes(D,C)
assert_almost_equal(L, [0,1])
assert_almost_equal(r, [1,-1])
def test_enrichment_score():
L = [1,0]
r = [-1,1]
S = [0,1]
ES = enrichment_score(L,r,S)
assert_almost_equal(ES,1)
L = [0,1,2]
r = [-1,0,1]
assert_almost_equal(enrichment_score(L,r,[0]),1)
assert_almost_equal(enrichment_score(L,r,[1]),-1)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | tests/test_gsea.py | jacobkimmel/GSEA.py |
"""
This example uses a finite number of workers, rather than slamming the system with endless subprocesses.
This is more effective than endless context switching for an overloaded CPU.
"""
import asyncio
from pathlib import Path
import shutil
import sys
from typing import Iterable
import os
FFPLAY = shutil.which("ffplay")
if not FFPLAY:
raise ImportError("FFPLAY not found")
async def ffplay(queue: asyncio.Queue):
"""
Play media asynchronously.
Each task runs endlessly until .cancel()
"""
assert isinstance(FFPLAY, str)
while True:
filein = await queue.get()
cmd = [FFPLAY, "-loglevel", "warning", "-autoexit", str(filein)]
proc = await asyncio.create_subprocess_exec(*cmd)
ret = await proc.wait()
if ret != 0:
print(filein, "playback failure", file=sys.stderr)
queue.task_done()
async def main(flist: Iterable[Path]):
Ntask = os.cpu_count() # includes logical cores
if not isinstance(Ntask, int):
Ntask = 2
# %% setup queue
queue = asyncio.Queue() # type: ignore
for f in flist:
await queue.put(f)
# %% setup Tasks
if sys.version_info >= (3, 7):
tasks = [asyncio.create_task(ffplay(queue)) for i in range(Ntask)]
else:
tasks = [asyncio.ensure_future(ffplay(queue)) for i in range(Ntask)]
await queue.join()
# %% program done, teardown Tasks
for task in tasks:
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | src/asyncioffmpeg/ffplay.py | scivision/asyncio-subprocess-ffmpeg |
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
class Queue(list):
def shift(self):
if self == []:
return None
result = self[0]
del self[0]
return result
def push(self, value):
self.append(value)
class MessageQueue(Queue):
def read(Queue):
raise NotImplementedError
def write(Queue):
raise NotImplementedError
class InboundQueue(MessageQueue):
def read(Queue):
"Should be defined in this class"
class OutboundQueue(MessageQueue):
def write(Queue, *Messages):
"Should be defined in this class"
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": fa... | 3 | Sketches/MPS/Old/queue.py | sparkslabs/kamaelia_orig |
import repo
from collectors import basic
def extract_content(url, soup):
return soup.title.string # extract page's title
def store_content(url, content):
# store in a hash with the URL as the key and the title as the content
repo.set_content(url, content)
def allow_url_filter(url):
return True # allow all by default
def get_html(url):
return basic.get_html(url)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | parsers/defaults.py | ZenRows/scaling-to-distributed-crawling |
import aiohttp, asyncio
from bs4 import BeautifulSoup
import json
import time
VC_SEARCH = "https://vc.ru/search/v2/content/new"
async def parse_urls(key_word):
async with aiohttp.ClientSession() as session:
async with session.get(VC_SEARCH, params={
"query": key_word,
"target_type": 'posts',
}) as r:
soup = BeautifulSoup(await r.text(), 'html.parser')
urls = [x["href"] for x in soup.find_all("a", {"class": "content-feed__link"})]
return urls
async def get_text(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
soup = BeautifulSoup(await r.text(), 'html.parser')
text = " ".join(map(lambda x: x.text, soup.find("div", {"class": "l-entry__content"}).find_all("p")))
return text
async def get_all_texts(keyword):
urls = await parse_urls(keyword)
all_texts = []
for u in urls[:25]:
text = await get_text(u)
all_texts.append(text)
return all_texts
async def vc_get_data(keyword, result_file_path='result-vc.json'):
texts = await get_all_texts(keyword)
result_dict = {"company": keyword,
"texts": texts}
result_json = json.loads(json.dumps(result_dict))
return result_json
#with open(result_file_path, 'w', encoding='utf-8') as f:
# json.dump(result_json, f, ensure_ascii=False, indent=4)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(vc_get_data("сбер", "other/sber-vc.json"))
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | Parsers/vcru.py | OverFitted/hacksai2021spb |
class UnoInfo:
def __init__(self):
self.dataPins = 13
self.analogInPins = 5
self.GND = 3
self.pow = [3.3, 5]
self.TX = 1
self.RX = 0
def getMainInfo(self):
return {"0": self.dataPins, "1": self.GND, "2": self.pow}
def getDigitalPins(self):
return self.dataPins
def getAnalogPins(self):
return self.analogInPins
def getAmountGND(self):
return self.GND
def getPowOut(self):
return self.pow
def getTXSlot(self):
return self.TX
def getRXSlot(self):
return self.RX
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer":... | 3 | Pyduino/Boards/Uno.py | ItzTheDodo/Pyduino |
# Aim: Mostly for phenix users and those don't like using Miniconda
# 1. wget url_to_tar_file.tar
# 2. tar -xf url_to_tar_file.tar
# 3. source amber17/ambersh
# 4. Just it
""" Usage example: python pack_non_conda.py ambertools-17.0.1-py27_1.tar.bz2
Note: You can use file pattern
This script will unpack that bz2 file, then do some editing, then pack it to ./non-conda-install folder.
This should be done after doing conda-build
"""
import os
import subprocess
from glob import glob
import argparse
# local file, in the same folder as this script
from edit_package import editing_conda_package
import update_shebang
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tarfile', nargs='?', help='targer file')
parser.add_argument(
"--output-dir",
type=str,
default='./non-conda-install',
dest="output_dir",
help="output directory")
parser.add_argument(
"--date", action="store_true", help="Add date to output tarfile")
parser.add_argument("-d", "--dry_run", action="store_true", help="dry run")
opt = parser.parse_args()
pack_non_conda_package(opt)
def pack_non_conda_package(opt):
with editing_conda_package(
opt.tarfile,
output_dir=opt.output_dir,
add_date=opt.date,
dry_run=opt.dry_run):
update_shebang.update_python_env('./bin/')
# No need to copy here since we alread done in conda build step?
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | conda_tools/pack_non_conda.py | Amber-MD/ambertools-binary-build |
from tabulate import tabulate
import re
from cesium.features.graphs import (feature_categories, dask_feature_graph,
extra_feature_docs)
def feature_graph_to_rst_table(graph, category_name):
"""Convert feature graph to Sphinx-compatible ReST table."""
header = [category_name, 'Description']
table = []
for feature_name in sorted(
graph, key=lambda s: [int(t) if t.isdigit() else t
for t in re.split('(\d+)', s)]):
description = (extra_feature_docs[feature_name] if feature_name in
extra_feature_docs else
' '.join([e.strip() for e in
graph[feature_name][0].__doc__.split('\n\n')[0]
.strip().split('\n')]))
table.append([feature_name, description])
return tabulate(table, headers=header, tablefmt='rst')
def write_feature_tables(fname):
with open(fname, 'w') as f:
f.write('==============================\n'
'Cesium Features - By Category\n'
'==============================\n\n')
dfg = dask_feature_graph
for category in feature_categories:
graph = {feature: dfg[feature]
for feature in feature_categories[category]}
f.write(feature_graph_to_rst_table(graph, category) + '\n\n')
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | doc/tools/feature_table.py | acrellin/cesium |
#!/usr/bin/env python3
def countdown(n):
if n <= 0:
print()
return
print(n, end=' ')
countdown(n-1)
countdown(5)
print(list(range(5, 0, -1)))
print(list(x for x in range(5, 0, -1)))
def countdown2(n):
if n <= 0:
yield 'stop'
else:
yield n
for i in countdown2(n-1): yield i
print(list(countdown2(5)))
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | PartIVExercises/11/countdown.py | eroicaleo/LearningPython |
import csv
import os
from histdata.api import download_hist_data
def mkdir_p(path):
import errno
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def download_all():
with open('pairs.csv', 'r') as f:
reader = csv.reader(f, delimiter=',')
next(reader, None) # skip the headers
for row in reader:
currency_pair_name, pair, history_first_trading_month = row
year = int(history_first_trading_month[0:4])
print(currency_pair_name)
output_folder = os.path.join('output', pair)
mkdir_p(output_folder)
try:
while True:
could_download_full_year = False
try:
print('-', download_hist_data(year=year,
pair=pair,
output_directory=output_folder,
verbose=False))
could_download_full_year = True
except AssertionError:
pass # lets download it month by month.
month = 1
while not could_download_full_year and month <= 12:
print('-', download_hist_data(year=str(year),
month=str(month),
pair=pair,
output_directory=output_folder,
verbose=False))
month += 1
year += 1
except Exception:
print('[DONE] for currency', currency_pair_name)
if __name__ == '__main__':
download_all()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | download_all_fx_data.py | feilongbk/FX-1-Minute-Data |
#!/usr/bin/env python
# file name: google_search.py
# created by: Ventura Del Monte
# purpose: Google Search Implementation
# last edited by: Ventura Del Monte 04-10-2014
from internal_browser import *
from bs4 import BeautifulSoup
import urlparse
import re
class GoogleSearch(InternalBrowser):
# base_url = "https://www.google.it/search?q="
def __init__(self):
InternalBrowser.__init__(self, usrAgent = ['windows7', 'firefox'])
self.open("www.google.com")
def search(self, query):
data = self.queryForm('gbqf', 'q', query)
html = BeautifulSoup(data)
ret = []
#for li in html.findAll(attrs = {'class' : re.compile("rc")}):
for a in html.select('h3 > a'):
href = a['href']
ret.append((href, urlparse.urlparse(href)))
return ret
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | analyzer/google_search.py | Gr1ph00n/staticwebanalyzer |
"""Create an animation asset."""
import bpy
from avalon import api
from avalon.blender import lib, ops
from avalon.blender.pipeline import AVALON_INSTANCES
from openpype.hosts.blender.api import plugin
class CreateAnimation(plugin.Creator):
"""Animation output for character rigs"""
name = "animationMain"
label = "Animation"
family = "animation"
icon = "male"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
def _process(self):
# Get Instance Containter or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
# name = self.name
# if not name:
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
# asset_group = bpy.data.objects.new(name=name, object_data=None)
# asset_group.empty_display_type = 'SINGLE_ARROW'
asset_group = bpy.data.collections.new(name=name)
instances.children.link(asset_group)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(asset_group, self.data)
if (self.options or {}).get("useSelection"):
selected = lib.get_selection()
for obj in selected:
asset_group.objects.link(obj)
elif (self.options or {}).get("asset_group"):
obj = (self.options or {}).get("asset_group")
asset_group.objects.link(obj)
return asset_group
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | openpype/hosts/blender/plugins/create/create_animation.py | philipluk/OpenPype |
import asyncio
import importlib.resources
import aioconsole
from playsound import playsound
import niescraper.resources
async def play_alarm():
with importlib.resources.path(niescraper.resources, 'alarm.mp3') as alarm_file:
while asyncio.get_running_loop().is_running():
playsound(alarm_file, False)
await asyncio.sleep(0.5)
async def play_alarm_until_input_async():
alarm_task = asyncio.create_task(play_alarm())
await aioconsole.ainput("Please press Enter to acknowledge alarm...")
alarm_task.cancel()
def play_alarm_until_input():
asyncio.run(play_alarm_until_input_async())
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | niescraper/alarm.py | elKei24/niescraper |
import os
import tempfile
import pytest
import mlagents.trainers.tensorflow_to_barracuda as tf2bc
from mlagents.trainers.tests.test_nn_policy import create_policy_mock
from mlagents.trainers.settings import TrainerSettings
from mlagents.tf_utils import tf
from mlagents.model_serialization import SerializationSettings, export_policy_model
def test_barracuda_converter():
path_prefix = os.path.dirname(os.path.abspath(__file__))
tmpfile = os.path.join(
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()) + ".nn"
)
# make sure there are no left-over files
if os.path.isfile(tmpfile):
os.remove(tmpfile)
tf2bc.convert(path_prefix + "/BasicLearning.pb", tmpfile)
# test if file exists after conversion
assert os.path.isfile(tmpfile)
# currently converter produces small output file even if input file is empty
# 100 bytes is high enough to prove that conversion was successful
assert os.path.getsize(tmpfile) > 100
# cleanup
os.remove(tmpfile)
@pytest.mark.parametrize("discrete", [True, False], ids=["discrete", "continuous"])
@pytest.mark.parametrize("visual", [True, False], ids=["visual", "vector"])
@pytest.mark.parametrize("rnn", [True, False], ids=["rnn", "no_rnn"])
def test_policy_conversion(tmpdir, rnn, visual, discrete):
tf.reset_default_graph()
dummy_config = TrainerSettings(output_path=os.path.join(tmpdir, "test"))
policy = create_policy_mock(
dummy_config, use_rnn=rnn, use_discrete=discrete, use_visual=visual
)
policy.save_model(1000)
settings = SerializationSettings(
policy.model_path, os.path.join(tmpdir, policy.brain.brain_name)
)
export_policy_model(settings, policy.graph, policy.sess)
# These checks taken from test_barracuda_converter
assert os.path.isfile(os.path.join(tmpdir, "test.nn"))
assert os.path.getsize(os.path.join(tmpdir, "test.nn")) > 100
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | ml-agents/mlagents/trainers/tests/test_barracuda_converter.py | bobcy2015/ml-agents |
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name' , 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
def update(self, instance, validated_data):
"""Handle updating user account"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {'user_profile': {'read_only': True}} | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | profiles_api/serializers.py | mcabrejos24/profiles-rest-api |
_TF_INCLUDE_PATH = "TF_INCLUDE_PATH"
_TF_LIB_PATH = "TF_LIB_PATH"
def _get_env_var_with_default(repository_ctx, env_var):
"""Returns evironment variable value."""
if env_var in repository_ctx.os.environ:
value = repository_ctx.os.environ[env_var]
return value
else:
fail("Environment variable '%s' was not set." % env_var)
def _get_tf_conf(repository_ctx):
"""Returns structure containing all required information about tensorflow
configuration on host platform.
"""
include_path = _get_env_var_with_default(repository_ctx, _TF_INCLUDE_PATH)
lib_path = _get_env_var_with_default(repository_ctx, _TF_LIB_PATH)
return struct(
include_path = include_path,
lib_path = lib_path
)
def _tensorflow_autoconf_impl(repository_ctx):
"""Implementation of tensorflow autoconf. rule."""
tf_conf = _get_tf_conf(repository_ctx)
print("Using %s=%s" % (_TF_INCLUDE_PATH, tf_conf.include_path))
print("Using %s=%s" % (_TF_LIB_PATH, tf_conf.lib_path))
repository_ctx.symlink(tf_conf.include_path, 'include')
repository_ctx.symlink(tf_conf.lib_path, 'lib')
repository_ctx.template('BUILD', Label("//third_party/tensorflow:tensorflow.BUILD"))
tensorflow_configure = repository_rule(
implementation = _tensorflow_autoconf_impl,
environ = [
_TF_INCLUDE_PATH,
_TF_LIB_PATH
]
) | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | third_party/tensorflow/tensorflow_configure.bzl | waveflow-team/waveflow |
"""App drf url tests.
"""
from unittest import mock
import pytest
from django.urls import resolve, reverse
from .factories import ProjectFactory
pytestmark = pytest.mark.django_db
@pytest.mark.fast
@mock.patch(
"vision_on_edge.azure_projects.models.Project.validate",
mock.MagicMock(return_value=True),
)
def test_project_detail():
"""test_project_detail.
Args:
project (Project): project
"""
project = ProjectFactory()
assert (
reverse("api:project-detail", kwargs={"pk": project.id})
== f"/api/projects/{project.id}"
)
assert resolve(f"/api/projects/{project.id}").view_name == "api:project-detail"
@pytest.mark.fast
def test_project_list():
"""test_project_list."""
assert reverse("api:project-list") == "/api/projects"
assert resolve("/api/projects").view_name == "api:project-list"
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_projects/tests/test_drf_urls.py | kaka-lin/azure-intelligent-edge-patterns |
import unittest
from python_oop.testing.exercise.vehicle.project.vehicle import Vehicle
# from project.vehicle import Vehicle
class VehicleTest(unittest.TestCase):
def setUp(self):
self.vehicle = Vehicle(50.0, 300.0)
def test_vehicle__init_method(self):
self.assertEqual(50.0, self.vehicle.fuel)
self.assertEqual(50.0, self.vehicle.capacity)
self.assertEqual(300.0, self.vehicle.horse_power)
self.assertEqual(self.vehicle.DEFAULT_FUEL_CONSUMPTION, self.vehicle.fuel_consumption)
def test_vehicle__fuel_capacity_if_fuel_changed(self):
self.assertEqual(50.0, self.vehicle.capacity)
self.vehicle.fuel = 20.0
self.assertEqual(50.0, self.vehicle.capacity)
def test_vehicle__str_method(self):
expected_result = f"The vehicle has {self.vehicle.horse_power} " \
f"horse power with {self.vehicle.fuel} fuel left and {self.vehicle.fuel_consumption} fuel consumption"
actual_result = self.vehicle.__str__()
self.assertEqual(expected_result, actual_result)
def test_vehicle__drive_method_success(self):
self.vehicle.drive(5)
self.assertEqual(43.75, self.vehicle.fuel)
def test_vehicle__drive_method__expect_exception(self):
expected_result = "Not enough fuel"
with self.assertRaises(Exception) as context:
self.vehicle.drive(100)
self.assertEqual(expected_result, str(context.exception))
def test_vehicle__refuel_method_success(self):
self.vehicle.drive(5)
self.vehicle.refuel(6.25)
self.assertEqual(50.0, self.vehicle.fuel)
def test_vehicle__refuel_method__expect_exception(self):
expected_result = "Too much fuel"
with self.assertRaises(Exception) as context:
self.vehicle.refuel(100)
self.assertEqual(expected_result, str(context.exception))
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | testing/exercise/vehicle/test/test_vehicle.py | PetkoAndreev/Python-OOP |
#! /usr/bin/env python
# Bruce Schneier algorithm
def mpow(a, b, m):
if a < 0:
a += m
c = 1
while b >= 1:
if b % 2 == 1:
c = (a * c) % m
a = pow(a, 2) % m
b = b // 2
return c
# extended Euclidean algorithm
def minv(a, m):
b = m
u = 1
v = 0
while b != 0:
t = a // b
a = a - t * b
u = u - t * v
a, b = b, a
u, v = v, u
return u + m if u < 0 else u
def min_residue(a, m):
b = a % m
c = b - m
if b >= abs(c):
return c
else:
return b
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | eclib/modutils.py | KaoruTeranishi/EncryptedControl |
import os
import numpy as np
from keras import backend as K
from keras.losses import mean_absolute_error
import utils
from model import wdsr_b
def psnr(hr, sr, max_val=2):
mse = K.mean(K.square(hr - sr))
return 10.0 / np.log(10) * K.log(max_val ** 2 / mse)
def data_generator(path, batch_size=8, input_shape=96, scale=2):
'''data generator for fit_generator'''
fns = os.listdir(path)
n = len(fns)
i = 0
while True:
lrs, hrs = [], []
for b in range(batch_size):
if i == 0:
np.random.shuffle(fns)
fn = fns[i]
fn = os.path.join(path, fn)
lr, hr = utils.pair(fn, input_shape, scale)
lr = utils.normalization(lr)
hr = utils.normalization(hr)
lrs.append(lr)
hrs.append(hr)
i = (i + 1) % n
lrs = np.array(lrs)
hrs = np.array(hrs)
yield lrs, hrs
model = wdsr_b()
model.compile(optimizer='adam',
loss=mean_absolute_error, metrics=[psnr])
model.fit_generator(data_generator('./datasets/train/'),
steps_per_epoch=50,
epochs=1250)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | src/train.py | zhaipro/keras-wdsr |
# Example module for Online Python Tutor
# Philip Guo
# 2013-08-03
# To get the Online Python Tutor backend to import this custom module,
# add its filename ('htmlexample_module') to the CUSTOM_MODULE_IMPORTS
# tuple in pg_logger.py
# To see an example of this module at work, write the following code in
# http://pythontutor.com/visualize.html
'''
from htmlexample_module import ColorTable
t = ColorTable(3, 4)
t.set_color(0, 0, 'red')
t.render_HTML()
t.set_color(1, 1, 'green')
t.render_HTML()
t.set_color(2, 2, 'blue')
t.render_HTML()
for i in range(3):
for j in range(4):
t.set_color(i, j, 'gray')
t.render_HTML()
'''
# defines a simple table where you can set colors for individual rows and columns
class ColorTable:
def __init__(self, num_rows, num_columns):
self.num_rows = num_rows
self.num_columns = num_columns
# create a 2D matrix of empty strings
self.table = []
for i in range(self.num_rows):
new_lst = ['' for e in range(self.num_columns)]
self.table.append(new_lst)
# color must be a legal HTML color string
def set_color(self, row, column, color):
assert 0 <= row < self.num_rows
assert 0 <= column < self.num_columns
self.table[row][column] = color
# call this function whenever you want to render this table in HTML
def render_HTML(self):
# incrementally build up an HTML table string
html_string = '<table>'
for i in range(self.num_rows):
html_string += '<tr>'
for j in range(self.num_columns):
color = self.table[i][j]
if not color:
color = "white"
html_string += '''<td style="width: 30px; height: 30px; border: 1px solid black;
background-color: %s;"></td>''' % color
html_string += '</tr>'
html_string += '</table>'
# then call the magic setHTML function
setHTML(html_string)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | v3/htmlexample_module.py | ambadhan/OnlinePythonTutor |
import unittest, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i
# test some random csv data, and some lineend combinations
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
# believe the interesting thing is the NaN in the csv
def test_A_parse3(self):
parseResult = h2i.import_parse(bucket='smalldata', path='parse3.csv', schema='put')
h2o_cmd.runRF(parseResult=parseResult, trees=37, timeoutSecs=10)
if __name__ == '__main__':
h2o.unit_main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | py/testdir_single_jvm/notest_parse3.py | vkuznet/h2o |
import motor.motor_asyncio
from fastapi import FastAPI
from fastapi_users import FastAPIUsers, models
from fastapi_users.authentication import JWTAuthentication
from fastapi_users.db import MongoDBUserDatabase
DATABASE_URL = "mongodb://localhost:27017"
SECRET = "SECRET"
class User(models.BaseUser):
pass
class UserCreate(User, models.BaseUserCreate):
pass
class UserUpdate(User, models.BaseUserUpdate):
pass
class UserDB(User, models.BaseUserDB):
pass
client = motor.motor_asyncio.AsyncIOMotorClient(DATABASE_URL)
db = client["database_name"]
collection = db["users"]
user_db = MongoDBUserDatabase(UserDB, collection)
auth_backends = [
JWTAuthentication(secret=SECRET, lifetime_seconds=3600),
]
app = FastAPI()
fastapi_users = FastAPIUsers(
user_db, auth_backends, User, UserCreate, UserUpdate, UserDB, SECRET,
)
app.include_router(fastapi_users.router, prefix="/users", tags=["users"])
@fastapi_users.on_after_register()
def on_after_register(user: User):
print(f"User {user.id} has registered.")
@fastapi_users.on_after_forgot_password()
def on_after_forgot_password(user: User, token: str):
print(f"User {user.id} has forgot their password. Reset token: {token}")
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | docs/src/full_mongodb.py | rnd42/fastapi-users |
# -*- coding: utf-8 -*-
from os.path import dirname, abspath
import sys
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import utils.config_loader as config
import utils.config_loader as config
import utils.tools as tools
import torch
import shutil
versions = ['sl', 'alpha']
para_org = True
for vv in versions:
if config.meta_model_name.endswith(vv):
para_org = False
def sort_sid2score(sid2score):
sid_score_list = sorted(sid2score.items(), key=lambda item: item[1], reverse=True)
return sid_score_list
def get_rank_records(sid_score_list, sents=None, flat_sents=False):
"""
optional: display sentence in record
:param sid_score_list:
:param sents:
:param flat_sents: if True, iterate sent directly; if False, need use sid to get doc_idx and sent_idx.
:return:
"""
rank_records = []
for sid, score in sid_score_list:
items = [sid, str(score)]
if sents:
if flat_sents:
sent = sents[len(rank_records)] # the current point
else:
doc_idx, sent_idx = tools.get_sent_info(sid)
sent = sents[doc_idx][sent_idx]
items.append(sent)
record = '\t'.join(items)
rank_records.append(record)
return rank_records
def dump_rank_records(rank_records, out_fp, with_rank_idx):
"""
each line is
ranking sid score
sid: config.SEP.join((doc_idx, para_idx, sent_idx))
:param sid_score_list:
:param out_fp:
:return:
"""
lines = rank_records
if with_rank_idx:
lines = ['\t'.join((str(rank), record)) for rank, record in enumerate(rank_records)]
with open(out_fp, mode='a', encoding='utf-8') as f:
f.write('\n'.join(lines))
return len(lines)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | src/summ/rank_sent.py | yumoxu/querysum |
# https://app.codesignal.com/company-challenges/mz/zCYv3tuxRE4JajQNY
def questEfficiencyItem(hours, points, time_for_quests):
# Time is short, you want to complete as many quests as possible
# but it's difficult to do so. So we want to maximize the points
# we can obtain with quests in a given limited time.
# hours: hours it takes to complete a quest
# points: points each quest gives you
# time_for_quests: the limit of time to do stuff.
# Recursively, at each position, decide whether to take this quest
# or not. This 'iteration' can be done since the order of the quests
# doesn't matter so you can check from left to right whether to take
# each one or not, generating unique combinations.
def recursive(idx, score_acum, time_left):
# Time ran out, acum with last step is invalid.
if time_left < 0:
return 0
# Time was precise, return until here.
if time_left == 0:
return score_acum
# Ran out of quests to
if idx == len(hours):
return score_acum
score = 0
hours_idx = hours[idx]
points_idx = points[idx]
# At each position decide whats better, whether to consume it or
# advance to the next without consuming current.
res_1 = recursive(idx + 1, score_acum + points_idx, time_left - hours_idx)
res_2 = recursive(idx + 1, score_acum, time_left)
return max(res_1, res_2)
# Start with 0 accumulated points and all the time left.
return recursive(0, 0, time_for_quests)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined insid... | 3 | CodeSignal/Challenges/MZ/06_Quest_Efficiency_Item.py | Zubieta/CPP |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="admin@londonappdev.com", password="password123"
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="test@gmail.com", password="password123", name="Full Name Jr."
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse("admin:core_user_changelist")
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
self.assertEqual(url, "/admin/core/user/")
def test_user_change_page(self):
"""Test that the user page works"""
url = reverse("admin:core_user_change", args=[self.user.id])
# /admin/core/user/{id}
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse("admin:core_user_add")
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | app/core/tests/test_admin.py | EvandroLippert/recipe-app-api |
import json
from allensdk.core.brain_observatory_cache import BrainObservatoryCache
def compress_roi(roi):
mask = []
for i, row in enumerate(roi):
for j, flag in enumerate(row):
if flag:
mask.append((i, j))
return mask
def sample(signal, n):
size = int(len(signal) / n)
extra = len(signal) % size
sampled = []
for i in range(n):
sampled.append(sum(signal[i*size:(i+1)*size]) / size)
if extra > 0:
sampled.append(sum(signal[n*size:]) / extra)
return sampled
# Create a brain observatory.
manifest_file = './brain_observatory_manifest.json'
boc = BrainObservatoryCache(manifest_file=manifest_file)
# Select a visual area and a Cre line.
visual_area = 'VISal'
cre_line ='Cux2-CreERT2'
# Extract experiments.
exps = boc.get_experiment_containers(targeted_structures=[visual_area], cre_lines=[cre_line])
# Select one experiment.
experiment_container_id = 511510736
# Extract a session.
session_id = boc.get_ophys_experiments(experiment_container_ids=[experiment_container_id], stimuli=['natural_scenes'])[0]['id']
# Extract the full dataset for that session.
dataset = boc.get_ophys_experiment_data(ophys_experiment_id=session_id)
# Pull out the max intensity projection.
mip = dataset.get_max_projection()
# Pull out the ROI masks.
rois = dataset.get_roi_mask_array()
# Get timestamps and Dff data.
ts, dff = dataset.get_dff_traces()
# Pull out the stimulus epoch data.
stim_epoch = dataset.get_stimulus_epoch_table()
# Dump all the data out into data files.
with open('mip.json', 'w') as f:
f.write(json.dumps(mip.tolist()))
with open('rois.json', 'w') as f:
f.write(json.dumps(list(map(compress_roi, rois.tolist()))))
with open('dff.json', 'w') as f:
# f.write(json.dumps(dff.tolist()))
f.write(json.dumps(list(map(lambda x: sample(x, 2000), dff.tolist()))))
with open('stim_epoch.json', 'w') as f:
f.write(stim_epoch.to_json())
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | data/getdata.py | arclamp/roi-thumbnail |
# push: O(1)
# pop: O(1)
# top: O(1)
# getMin: O(1)
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.min = float('inf')
def push(self, x: int) -> None:
if x<self.min:
self.min = x
self.stack.append([x,self.min])
def pop(self) -> None:
self.stack.pop()
if self.stack:
self.min = self.stack[-1][1]
else:
self.min = float('inf')
def top(self) -> int:
return self.stack[-1][0]
def getMin(self) -> int:
return self.min
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"ans... | 3 | 30DayChallenge_April/april_10_minstack.py | cmattey/leetcode_problems |
# -*- coding: utf-8 -*-
from pytest import raises
from mock import Mock
from pyee import EventEmitter, PatternException
def test_is_pattern():
ee = EventEmitter()
assert ee._isPattern('a/#')
assert ee._isPattern('a/+/+')
assert not ee._isPattern('a/#/+')
assert not ee._isPattern('a/c+/c')
assert not ee._isPattern('a/#/c')
def test_pattern_matching():
"""Test that patterns are correctly interpreted"""
ee = EventEmitter()
assert ee._matches('#', 'a/b/c')
assert ee._matches('+/b/c', 'a/b/c')
assert ee._matches('a/#', 'a/b/c')
assert not ee._matches('a/#', 'c/a/b/c')
with raises(PatternException) as e:
ee._matches('#/b/c', 'c')
assert not ee._matches('a/+/d/e', 'a/b/c/d/e')
def test_matching_topic():
"""Test that a pattern can be passed as an event"""
ee = EventEmitter()
call_me = Mock()
@ee.on('event/+/ok', call_me)
def event_handler(data, **kwargs):
call_me()
ee.emit('event/first/ok')
ee.emit('event/second/ok')
ee.emit('event/first/ok2')
assert call_me.call_count == 3
def test_shorter_pattern():
"""Tests correct behaviour with shorter patterns"""
ee = EventEmitter()
call_me = Mock()
@ee.on('#')
def event_handler(ev):
call_me()
ee.emit('a/b/c')
ee.emit('cool')
assert call_me.call_count == 2
def test_longer_pattern():
"""Tests correct behaviour with longer patterns"""
ee = EventEmitter()
call_me = Mock()
@ee.on('a/b/#')
def event_handler(ev):
call_me()
ee.emit('c')
ee.emit('c')
call_me.assert_not_called()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | tests/test_matching.py | joliveros/pyee-topics |
import time
from M4i6622 import *
from Functions.functions import *
#4 functions to be used
def f0(x):
return sin(x)#sin_for_time(60000000, 40000000, 20000,10000, x)
def f1(x):
return sin(x)
def f2(x):
return sin(x,f=1000)
def f3(x):
return x
t0 = time.perf_counter()
M4i = M4i6622(channelNum=3,sampleRate=625,clockOut=True,referenceClock=False)
r = M4i.setSoftwareBuffer()
M4i.setupCard( (f0,f1,f2) )
tf = time.perf_counter() - t0
print("Done")
print("Time elapsed: {0: 10f} s".format(tf))
M4i.startCard()
r = M4i.stop()
print("Card has been stopped with error code: ",str(r))
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | testing.py | vuthalab/spectrum-awg |
from PIL import Image
import os
import pathlib
from .constants import *
def calcDP(px):
index = -1
if px in SIZES_PX:
index = SIZES_PX.index(px)
else:
for item in SIZES_PX:
if item > px:
index = SIZES_PX.index(item)
break
if index == -1:
index = len(SIZES_PX) - 1
return px / (SIZES_DPI[index] / 160)
def calcNewSizes(dp):
res = []
for item in SCALING:
res.append(int(item * dp))
return res
def getImageFilesList():
files = []
for file in os.listdir("."):
if isImage(file):
files.append(file)
return files
def isImage(name):
if os.path.isfile(name):
file_parts = pathlib.Path(name)
if file_parts.suffix.removeprefix(".") in SUPPORTED_FORMATS:
return True
return False
def checkImageNameForSave(name: str):
# in android drawables images can't start with numbers
# and is better they haven't capitalize words
name = name.lower()
if name[0].isnumeric():
name = '_' + name
return name
def createNewImage(img_path):
img = Image.open(img_path)
x, y = img.size
ratio = x / y
new_xs = calcNewSizes(calcDP(x))
new_ys = []
for item in new_xs:
new_ys.append(int(item / ratio))
for i in range(len(SCALING)):
root = "Drawable"
folder = "drawable-" + DRAWABLE_SIZE_NAMES[i]
name = checkImageNameForSave(img_path)
path = os.path.join(root, folder, name)
try:
pathlib.Path(os.path.join(root, folder)).mkdir(parents=True, exist_ok=True)
new_img = img.resize((new_xs[i], new_ys[i]))
new_img.save(checkImageNameForSave(path))
except Exception as ex:
print(ex)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | drimg/functions.py | hojjat-faryabi/drawable_image |
import argparse
from train import start_training
import cv2
from skimage import feature
import numpy as np
import dlib
import tensorflow as tf
import keras
def get_cmd_args():
""" Parse user command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-d","--dataset_dir",default="dataset",type=str)
parser.add_argument("-e","--epoch",default=10,type=int)
parser.add_argument("-b","--batch",default=100,type=int)
parser.add_argument("-s","--step",default=1000,type=int)
parser.add_argument("-l","--lr",default=1e-4,type=float)
parser.add_argument("-i","--input_shape",nargs=3,type=int,default=[48,48,1])
parser.add_argument("-m","--model_output",type=str,default="model")
parser.add_argument("-f","--features",type=str,default="all")
args = parser.parse_args()
return args
def main():
"""Start of training program.
"""
np.random.seed(1)
tf.set_random_seed(2)
args = get_cmd_args()
if args.input_shape[2]!=1:
raise Exception("Currenly tested for only gray scale images. input_shape should be [height,width,1]")
start_training(args)
if __name__ == '__main__':
main() | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | train/__main__.py | mitiku1/Emopy-Multi-Input- |
#!/usr/bin/env python
# coding=utf-8
import socket
import requests
requests.packages.urllib3.disable_warnings()
from lib.common import save_user_script_result
def do_check(self, url):
if url != '/':
return
ip = self.host.split(':')[0]
ports_open = is_port_open(ip)
headers = {
"User-Agent": "BugScan plugins http_proxy v0.1",
"Connection": "close"
}
for port in ports_open:
proxy_url = "http://{}:{}".format(ip, port)
proxy = {"http": proxy_url, "https": proxy_url}
try:
_ = requests.get('http://weibo.com/robots.txt', headers=headers, proxies=proxy, timeout=10.0)
code = _.status_code
html = _.text
if code == 200 and html.find("http://weibo.com/sitemap.xml") >= 0:
save_user_script_result(self, '', '%s:%s' % (ip, port), 'HTTP Proxy Found')
except Exception as e:
pass
def is_port_open(arg):
ports_open = []
for port in [80, 8080, 8088, 8888]:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3.0)
if s.connect_ex((arg, port)) == 0:
ports_open.append(port)
except Exception as e:
pass
finally:
s.close()
return ports_open
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | scripts/http_proxy.py | aStrowxyu/bbscan |
from django.shortcuts import get_object_or_404
from rest_framework import serializers
from .models import Category, Comment, Genre, Review, Title
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = (
"name",
"slug",
)
class GenreSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = (
"name",
"slug",
)
class TitleReadSerializer(serializers.ModelSerializer):
genre = GenreSerializer(many=True, read_only=True)
category = CategorySerializer(read_only=True)
class Meta:
model = Title
fields = "__all__"
class TitleCreateSerializer(serializers.ModelSerializer):
genre = serializers.SlugRelatedField(
slug_field="slug", many=True, queryset=Genre.objects.all()
)
category = serializers.SlugRelatedField(
slug_field="slug", queryset=Category.objects.all()
)
class Meta:
model = Title
fields = "__all__"
class ReviewSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
slug_field="username", read_only=True
)
class Meta:
model = Review
exclude = ("title",)
def validate(self, attrs):
if (
Review.objects.filter(
author=self.context["request"].user, title=self.get_title()
).exists()
and self.context["request"].method != "PATCH"
):
raise serializers.ValidationError("Вы уже оставили отзыв")
return attrs
def get_title(self):
title = get_object_or_404(
Title, id=self.context.get("view").kwargs.get("title_id")
)
return title
class CommentSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
slug_field="username", read_only=True
)
class Meta:
model = Comment
exclude = ("review",)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | api/serializers.py | Vadim3x4/yamdb_final |
import textwrap
def test_day01():
"""Tests that the simple test case for 2020 day 01 works"""
from advent_of_code.y2020.day01 import report_repair
data = textwrap.dedent("""
1721
979
366
299
675
1456
""")
data = data.strip().split("\n")
result = report_repair(data)
assert isinstance(result, tuple)
assert result[0] == 514579, "Part 1 is wrong"
assert result[1] == 241861950, "Part 2 is wrong"
def test_day01_alt():
"""Tests that the simple test case for 2020 day 01 works (using
itertools.combination)"""
from advent_of_code.y2020.day01 import report_repair_itertools
data = textwrap.dedent("""
1721
979
366
299
675
1456
""")
data = data.strip().split("\n")
result = report_repair_itertools(data)
assert isinstance(result, tuple)
assert result[0] == 514579, "Part 1 is wrong"
assert result[1] == 241861950, "Part 2 is wrong"
def test_day02():
"""Tests that the simple test case for 2020 day 02 works"""
from advent_of_code.y2020.day02 import password_philosophy
data = textwrap.dedent("""
1-3 a: abcde
1-3 b: cdefg
2-9 c: ccccccccc
""")
data = data.strip().split("\n")
result = password_philosophy(data)
assert isinstance(result, tuple)
assert result[0] == 2, "Part 1 is wrong"
assert result[1] == 1, "Part 2 is Wrong"
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | python/tests/test_2020.py | stonecharioteer/advent-of-code |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Keyword', fields ['kw_text']
db.create_unique(u'okhelptexts_keyword', ['kw_text'])
def backwards(self, orm):
# Removing unique constraint on 'Keyword', fields ['kw_text']
db.delete_unique(u'okhelptexts_keyword', ['kw_text'])
models = {
u'okhelptexts.helptext': {
'Meta': {'object_name': 'Helptext'},
'fulltext': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moreinfo': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
u'okhelptexts.keyword': {
'Meta': {'object_name': 'Keyword'},
'helptext': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['okhelptexts.Helptext']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kw_text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['okhelptexts'] | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | okhelptexts/migrations/0003_auto__add_unique_keyword_kw_text.py | MeirKriheli/Open-Knesset |
from flask import Flask, render_template
from Blueprints.Browser.route import BrowserBP
app = Flask(__name__)
app.secret_key = b'YOUR_SUPER_SECRET_KEY'
app.register_blueprint(BrowserBP)
@app.route('/')
def index():
return render_template('Homepage.html')
@app.route('/contact')
def contactme():
return render_template('Contact.html')
if __name__ == '__main__':
app.run(debug=True)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answe... | 3 | app.py | mass1ve-err0r/HeadsInTheCloud |
import os
try:
import cPickle as pickle
except ImportError:
import pickle
from Histogrammer import HistReader, HistCollector
from drawing.dist_multicomp_v2 import dist_multicomp_v2
class GStarCorrectionReader(HistReader):
def begin(self, event):
self.histograms.begin(event, [event.config.dataset.parent], {})
class GStarCorrectionCollector(HistCollector):
def draw(self, histograms):
df = histograms.histograms
binning = histograms.binning
all_indices = list(df.index.names)
# draw nominal only
df = df[df.index.get_level_values("weight").isin(["nominal"])]
df.index.names = [n if n != "process" else "key" for n in df.index.names]
all_indices[all_indices.index("process")] = "key"
df_z = df[df.index.get_level_values("key").isin(["ZJetsToLL"])].reset_index("key", drop=True)
df_g = df[df.index.get_level_values("key").isin(["GStarJetsToLL"])].reset_index("key", drop=True)
df_z, df_g = df_z.align(df_g, fill_value=0.)
df_zg = (df_z + df_g)
df_zg["key"] = "Z+GStarJetsToLL"
df_zg = df_zg.set_index("key", append=True).reorder_levels(all_indices)
df = df.append(df_zg)
args = []
for (d, r, w, n), df_group in df.groupby(["dataset", "region", "weight", "name"]):
path = os.path.join(self.outdir, "plots", d, r)
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.abspath(os.path.join(path, n))
if w != "": filepath += "_" + w
bins = binning[n][0]
with open(filepath+".pkl", 'w') as f:
pickle.dump((df_group, bins, filepath, self.cfg), f)
args.append((dist_multicomp_v2, (df_group, bins, filepath, self.cfg)))
return args
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
}... | 3 | sequence/Collectors/GStarCorrection.py | albertdow/zinv-analysis |
"""Utilities for running custom scripts
"""
from argparse import Namespace
from core.constructs.workspace import Workspace
from core.constructs.output_manager import OutputManager
def execute_run_cli(args) -> None:
ws = Workspace.instance()
output_manager = OutputManager()
run_command(ws, output_manager, args)
def run_command(
workspace: Workspace, output: OutputManager, cli_args: Namespace
) -> None:
"""Attempts to find and run a user defined command.
format:
cdev run <sub_command> <args>
Args:
workspace (Workspace): Workspace to execute the process within.
output (OutputManager): Output manager for sending messages to the console.
cli_args (Namespace): Arguments for the command.
"""
# Convert namespace into dict
params = vars(cli_args)
# This is the command to run... It can be a single command or a path to the command where the path is '.' delimitated
sub_command = params.get("subcommand")
command_args = params.get("args") if params.get("args") else []
try:
workspace.execute_command(sub_command, command_args)
except Exception as e:
print(e)
return
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | src/core/commands/run.py | cdev-framework/cdev-sdk |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.domreg.lt/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisDomregLtStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.domreg.lt/status_available.txt"
host = "whois.domreg.lt"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'available')
def test_available(self):
eq_(self.record.available, True)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_updated_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.updated_on)
def test_expires_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.expires_on)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | test/record/parser/test_response_whois_domreg_lt_status_available.py | huyphan/pyyawhois |
import os
def encode_path(path):
if not path:
return path
if os.name == "nt":
if os.path.isabs(path):
drive, rest = os.path.splitdrive(path)
return "/" + drive[:-1].upper() + rest.replace("\\", "/")
else:
return path.replace("\\", "/")
else:
return path
def decode_path(path):
if not path:
return path
if os.name == "nt":
if path.startswith("/"):
path = path[1:]
iof = path.find("/")
if iof == -1:
drive = path
rest = ""
else:
drive = path[:iof]
rest = path[iof:]
return (drive + ":" + rest).replace("/", "\\")
else:
return path.replace("/", "\\")
else:
return path
def same_paths(path1, path2):
if not path1 or not path2:
return False
path1_normalized = os.path.normcase(os.path.realpath(path1))
path2_normalized = os.path.normcase(os.path.realpath(path2))
return path1_normalized == path2_normalized
def is_subpath(root, wannabe):
if not root or not wannabe:
return False
root = os.path.normcase(os.path.realpath(root))
wannabe = os.path.normcase(os.path.realpath(wannabe))
return wannabe.startswith(root)
def relative_path(root, wannabe):
if not root or not wannabe:
return None
if not is_subpath(root, wannabe):
return None
root = os.path.normcase(os.path.realpath(root))
wannabe = os.path.normcase(os.path.realpath(wannabe))
return wannabe[len(root) + 1:]
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | paths.py | backchatio/sublime-ensime |
# Copyright (c) 2020 Julian Bernhard, Klemens Esterle, Patrick Hart and
# Tobias Kessler
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from bark.world.opendrive import *
from bark.world import *
from bark.geometry import *
from bark.runtime import PyRuntime
class Runtime(PyRuntime):
def __init__(self,
step_time,
viewer,
scenario_generator=None,
render=False):
self._step_time = step_time
self._viewer = viewer
self._scenario_generator = scenario_generator
self._scenario_idx = None
self._scenario = None
self._render = render
self._reset_has_been_called = False
def reset(self, scenario=None):
if scenario:
self._scenario = scenario
else:
self._scenario, self._scenario_idx = \
self._scenario_generator.get_next_scenario()
self._world = self._scenario.GetWorldState()
self._reset_has_been_called = True
self._viewer.Reset()
def step(self):
assert(self._reset_has_been_called==True)
self._world.Step(self._step_time)
if self._render:
self.render()
def render(self):
# self._viewer.clear()
self._viewer.drawWorld(
self._world,
self._scenario._eval_agent_ids,
scenario_idx=self._scenario_idx)
self._viewer.clear()
def run(self, steps):
for step_count in range(steps):
self.Step() | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | modules/runtime/runtime.py | Lizhu-Chen/bark |
import sys
import json
import requests
from flask import Flask
from flask import request
from tracing import init_tracer, flask_to_scope
import opentracing
from opentracing.ext import tags
from opentracing_instrumentation.client_hooks import install_all_patches
from flask_opentracing import FlaskTracer
from flask_cors import CORS, cross_origin
app = Flask('newsletter')
init_tracer('newsletter')
install_all_patches()
CORS(app)
flask_tracer = FlaskTracer(opentracing.tracer, True, app)
@app.route("/sayHello/<name>")
@cross_origin()
def say_hello(name):
with flask_to_scope(flask_tracer, request) as scope:
person = get_person(name)
resp = format_greeting(person)
opentracing.tracer.active_span.set_tag('response', resp)
return resp
def get_person(name):
with opentracing.tracer.start_active_span(
'get-person',
) as scope:
url = 'http://localhost:3001/getPerson/%s' % name
res = _get(url)
person = json.loads(res)
scope.span.log_kv({
'name': person['name'],
'title': person['title'],
'description': person['description'],
})
return person
def format_greeting(person):
with opentracing.tracer.start_active_span(
'format-greeting',
):
url = 'http://localhost:3002/formatGreeting'
return _get(url, params=person)
def _get(url, params=None):
r = requests.get(url, params=params)
assert r.status_code == 200
return r.text
if __name__ == "__main__":
app.run(port=3000)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | newsletter/src/newsletter.py | emilybache/BeeFriendly |
import datetime
import unittest
from time import sleep
from unittest import TestCase
from pbx_gs_python_utils.utils.Dev import Dev
from gw_bot.elastic.Save_To_ELK import Save_To_ELK
from gw_bot.helpers.Test_Helper import Test_Helper
class Test_Save_To_ELK(Test_Helper):
def setUp(self):
super().setUp()
self.save_to_elk = Save_To_ELK()
self.elastic = self.save_to_elk.elastic
self.doc_type = 'unit-test'
def test___init__(self):
self.elastic.create_index()
assert 'save_to_elk' in self.elastic.index_list()
def test_add_document(self):
test_doc = { "answer" : 42, "source" : "from_unit_test", "now": str(datetime.datetime.utcnow())}
doc_type = 'unit-test'
response = self.save_to_elk.add_document(doc_type, test_doc)
#Dev.pprint(data)
#Dev.pprint(response)
return test_doc
def test_get_most_recent_version_of_document(self):
test_doc = self.test_add_document()
sleep(1) # give ES time to index it
lucene_query = "doc_data.source:from_unit_test"
match = self.save_to_elk.get_most_recent_version_of_document(lucene_query)
assert match == test_doc
def test_find_document_by_type(self):
data = self.save_to_elk.find_documents_of_type(self.doc_type)
#Dev.pprint(data)
assert len(data) > 0
@unittest.SkipTest
def test_delete_documents_with_id(self):
id = "9QckDmcByR-UEnoIswj8"
result = self.save_to_elk.delete_documents_with_id(id)
Dev.pprint(result)
def test_delete_documents_with_type(self):
self.save_to_elk.delete_documents_with_type(self.doc_type)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer":... | 3 | tests/unit/elastic/test_Save_To_ELK.py | atykhonov/GW-Bot |
from test.parser.pattern.nodes.base import PatternTestBaseClass
from programy.parser.exceptions import ParserException
from programy.parser.pattern.nodes.that import PatternThatNode
from programy.parser.pattern.nodes.root import PatternRootNode
class PatternThatNodeTests(PatternTestBaseClass):
def test_init(self):
node = PatternThatNode()
self.assertIsNotNone(node)
self.assertFalse(node.is_root())
self.assertFalse(node.is_priority())
self.assertFalse(node.is_wildcard())
self.assertFalse(node.is_zero_or_more())
self.assertFalse(node.is_one_or_more())
self.assertFalse(node.is_set())
self.assertFalse(node.is_bot())
self.assertFalse(node.is_template())
self.assertTrue(node.is_that())
self.assertFalse(node.is_topic())
self.assertFalse(node.is_wildcard())
self.assertIsNotNone(node.children)
self.assertFalse(node.has_children())
self.assertTrue(node.equivalent(PatternThatNode()))
self.assertEqual(node.to_string(), "THAT [P(0)^(0)#(0)C(0)_(0)*(0)To(0)Th(0)Te(0)]")
def test_that_to_root(self):
node1 = PatternRootNode()
node2 = PatternThatNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add that node to root node")
def test_multiple_thats(self):
node1 = PatternThatNode()
node2 = PatternThatNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add that node to that node")
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | src/test/parser/pattern/nodes/test_that.py | hiitsme123/python |
from django.core.files.storage import Storage
from django.conf import settings
class FastDFSStorage(Storage):
"""自定义文件存储系统,修改存储的方案"""
def __init__(self, fdfs_base_url=None):
"""
构造方法,可以不带参数,也可以携带参数
:param base_url: 存储服务器的位置
"""
self.fdfs_base_url = fdfs_base_url or settings.FDFS_BASE_URL
# self.fdfs_base_url = settings.FDFS_BASE_URL
def _open(self, name, mode='rb'):
"""
用于打开文件
:param name: 要打开的文件的名字
:param mode: 打开文件方式
:return: None
"""
pass
def _save(self, name, content):
"""
用于保存文件
:param name: 要保存的文件名字
:param content: 要保存的文件的内容
:return: None
"""
# 保存文件时使用的,此时不需要,而文档告诉说明必须实现,所以pass
pass
def url(self, name):
"""
返回name所指文件的绝对URL
:param name: 要读取文件的引用:group1/M00/00/00/wKhnnlxw_gmAcoWmAAEXU5wmjPs35.jpeg
:return: http://192.168.103.158:8888/group1/M00/00/00/wKhnnlxw_gmAcoWmAAEXU5wmjPs35.jpeg
"""
# return 'http://192.168.103.158:8888/' + name
return self.fdfs_base_url + name
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | meiduo_mall/meiduo_mall/utils/fastdfs/fdfs_storage.py | Noah-Smith-wgp/meiduo_project |
from flask import render_template,redirect,url_for, flash,request
from . import auth
from flask import render_template,redirect,url_for
from ..models import User
from .forms import RegistrationForm,LoginForm
from .. import db
from flask_login import login_user,logout_user,login_required
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "watchlist login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | app/auth/views.py | Dnmrk4/watchlist |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This is rumdom run node.
subscribe No topcs.
Publish 'cmd_vel' topic.
mainly use for simple sample program
by Takuya Yamaguhi.
'''
import rospy
import random
from geometry_msgs.msg import Twist
class RandomBot():
def __init__(self, bot_name="NoName"):
# bot name
self.name = bot_name
# velocity publisher
self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)
def calcTwist(self):
value = random.randint(1,1000)
x = 0.2
th = 0
twist = Twist()
twist.linear.x = x; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th
return twist
def strategy(self):
r = rospy.Rate(1) # change speed 1fps
target_speed = 0
target_turn = 0
control_speed = 0
control_turn = 0
while not rospy.is_shutdown():
twist = self.calcTwist()
print(twist)
self.vel_pub.publish(twist)
r.sleep()
if __name__ == '__main__':
rospy.init_node('random_run')
bot = RandomBot('Random')
bot.strategy()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | burger_war_dev/scripts/randomRun.py | Satori063/burger_war_dev |
"""
localhost
---------
"""
import socket, os
def get_localhostname():
if os.environ.get("DOC", False) == True:
return socket.gethostname()
else:
return "sphinx-doc"
def get_ip_adress():
if os.environ.get("DOC", False) == True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
except Exception:
pass
return "123.4.567.890"
localhostname: str #: the name of the local machine
localhostname = get_localhostname()
localhost: str #: the localhost
localhost = "127.0.0.1"
localip: str #: the local ip address
localip = get_ip_adress()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | liesl/streams/__init__.py | jasmainak/pyliesl |
from flask import jsonify
class CustomResponse :
def __init__(self, statuscode, data):
self.statuscode = {"status":statuscode, "response_body" : data}
self.response = {"status":statuscode, "data" : data}
self.data_out = data
def getres(self):
return jsonify(self.statuscode)
def jsonify_res(self):
return jsonify(self.response)
def jsonify_data(self):
return jsonify(self.data_out)
def getresjson(self):
return self.statuscode
def get_res_json(self):
return self.response
def get_res_json_data(self):
return self.data_out
@staticmethod
def jsonify(request):
return jsonify(request) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | anuvaad-etl/anuvaad-nmt-models-fetch/src/models/response.py | ManavTriesStuff/anuvaad |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_create_user_with_email_successfully(self):
email = 'test@test.com'
password = '12345'
user = get_user_model().objects.create_user(
email=email,
password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@TEST.COM'
user = get_user_model().objects.create_user(email, '1234')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(email=None, password='1234')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(email='test@test.com',
password='123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | app/core/tests/test_model.py | burakkirlaroglu/recipe-app-api |
from decrypt_file import decrypt
from get_commands import fetch_commands
import netmiko
import os
import concurrent.futures
hosts = decrypt(f'{os.getcwd()}/device_json.gpg')
def send_commands(connection, host, commands):
connection.send_config_set(commands)
return
def run(ip_address):
for device in hosts:
device_info = {
"username": hosts[device][0],
"port": 22,
"device_type": hosts[device][-2],
"host": ip_address,
"verbose": True,
"password": hosts[device][1]
}
connect = netmiko.ConnectHandler(**device_info)
commands = fetch_commands(hosts[device][-1])
send_commands(connect, device_info['host'], commands)
return
if __name__ == '__main__':
with concurrent.futures.ThreadPoolExecutor() as executor:
host_addresses = [hosts[ip][2] for ip in hosts]
executor.map(run, host_addresses)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | main.py | flatman123/device_auto_config_v0.0.1 |
from django.db import models
from .utils import unique_id, get_object_or_none
class BaseModel(models.Model):
"""Base model"""
id = models.CharField(max_length=22, editable=False, primary_key=True)
def __make_id(self):
uid = unique_id()[::2] # Limited to 11 characters
obj = get_object_or_none(self.__class__, pk=uid)
self.id = uid if not obj else ''
def save(self, *args, **kwargs):
while not self.id: self.__make_id()
super(BaseModel, self).save(*args, **kwargs)
class Meta:
abstract = True
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
}... | 3 | common/models.py | ssa17021992/djrest |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ai.h2o.sparkling.ml.params.H2OTypeConverters import H2OTypeConverters
from pyspark.ml.param import *
class HasInitialBiases(Params):
initialBiases = Param(
Params._dummy(),
"initialBiases",
"A array of weight vectors to be used for bias initialization of every network layer. "
"If this parameter is set, the parameter 'initialWeights' has to be set as well.",
H2OTypeConverters.toNullableListDenseVector())
def getInitialBiases(self):
return self.getOrDefault(self.initialBiases)
def setInitialBiases(self, value):
return self._set(initialBiases=value)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | py/src/ai/h2o/sparkling/ml/params/HasInitialBiases.py | krmartin/sparkling-water |
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.minigame.Distributed7StudTable
from pirates.minigame import PlayingCardGlobals
from pirates.minigame import DistributedPokerTable
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import Point3, Vec3
from pirates.piratesbase import PLocalizer
class Distributed7StudTable(DistributedPokerTable.DistributedPokerTable):
__module__ = __name__
def __init__(self, cr):
DistributedPokerTable.DistributedPokerTable.__init__(self, cr, '7stud', numRounds=6)
self.maxCommunityCards = 0
self.maxHandCards = 7
self.gameType = 1
def getGameType(self):
return PlayingCardGlobals.SevenStud
def getInteractText(self):
return PLocalizer.InteractTable7StudPoker
def getSitDownText(self):
return PLocalizer.PokerSitDown7StudPoker
def dealerAnim(self, round):
deals = Sequence()
if round == 0:
if self.isLocalAvatarSeated():
self.gui.disableAction()
self.gui.clearTable()
for card in self.PocketCards:
card.hide()
if round == 1:
deals.append(self.dealPlayerCards(numCards=3))
if round in [2, 3, 4, 5]:
deals.append(self.dealPlayerCards(numCards=1))
return deals
def checkForVisiblePair(self):
return self.sevenStudCheckForVisiblePair(self.playerHands) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | pirates/minigame/Distributed7StudTable.py | itsyaboyrocket/pirates |
class Country:
def __init__(self):
self.code = ""
self.name = ""
self.population = ""
self.continent = ""
self.surfaceArea = ""
def __str__(self):
return f"Country [code= {self.code}, name= {self.name}, continent= {self.continent}, population= {self.population}]"
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | module05-xml.processing.using.python/world/domain.py | deepcloudlabs/dcl162-2020-sep-02 |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Björn Larsson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Robot(object):
def __init__(self):
pass
def setup(self):
pass
def update(self):
pass
def purge(self):
pass | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | robot/control/controller.py | avasanthc/Autonomous-Robot-Code |
import unittest
from os import path
import json
import ndex.beta.layouts as layouts
from ndex.networkn import NdexGraph
HERE = path.abspath(path.dirname(__file__))
class NetworkNConstructorTests(unittest.TestCase):
def test1(self):
with open(path.join(HERE, 'tiny_corpus.cx'),'r') as cx_file:
cx = json.load(cx_file)
G = NdexGraph(cx=cx)
self.assertEqual(len(G.edgemap),37 )
self.assertEqual(len(G.node), 37)
self.assertEqual(G.provenance['entity']['properties'][0]['name'], 'edge count')
self.assertEqual(G.provenance['entity']['properties'][0]['value'], '37')
self.assertEqual(len(G.support_map), 15)
self.assertEqual(len(G.citation_map), 1)
self.assertEqual(len(G.function_term_map),35)
self.assertEqual(len(G.node_citation_map),0)
self.assertEqual(len(G.node_support_map), 0)
self.assertEqual(len(G.node_citation_map),0)
self.assertEqual(len(G.reified_edges), 2)
self.assertEqual(len(G.edge_citation_map),37)
self.assertEqual(len(G.edge_support_map),37)
self.assertEqual(len(G.namespaces),39)
def test2(self):
with open (path.join(HERE,'filtered.cx'),'r') as cx_file:
cx=json.load(cx_file)
g = NdexGraph(cx)
layouts.apply_directed_flow_layout(g)
self.assertEqual(g.node[80]['diffusion_input'], 1.0)
if __name__ == '__main__':
unittest.main() | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answe... | 3 | ndex/test/test_NetworkNConstructor.py | idekerlab/heat-diffusion |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC call related to the uptime command.
Test corresponds to code in rpc/server.cpp.
"""
import time
from test_framework.test_framework import BitcoinTestFramework
class UptimeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self._test_uptime()
def _test_uptime(self):
wait_time = 10
self.nodes[0].setmocktime(int(time.time() + wait_time))
assert self.nodes[0].uptime() >= wait_time
if __name__ == '__main__':
UptimeTest().main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | test/functional/rpc_uptime.py | HUSKI3/Neblio-Node |
import os, sys, subprocess
def writetofile(args):
with open(args[0], 'w') as f:
f.write(' '.join(args[1:]))
def writeenvtofile(args):
with open(args[0], 'w') as f:
f.write(os.environ[args[1]])
def writesubprocessenvtofile(args):
with open(args[0], 'w') as f:
p = subprocess.Popen([sys.executable, "-c",
"import os; print(os.environ['%s'])" % args[1]],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
assert p.returncode == 0
f.write(stdout.decode("utf-8"))
def convertasplode(arg):
try:
return int(arg)
except:
return (None if arg == "None" else arg)
def asplode(args):
arg0 = convertasplode(args[0])
sys.exit(arg0)
def asplode_return(args):
arg0 = convertasplode(args[0])
return arg0
def asplode_raise(args):
raise Exception(args[0])
def delayloadfn(args):
import delayload
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (ex... | 3 | tests/pycmd.py | JeremyMarshall/pymake |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class ListImageFiltersNegativeTestJSON(base.BaseV2ComputeTest):
"""Negative tests of listing images using compute images API
Negative tests of listing images using compute images API with
microversion less than 2.36.
"""
max_microversion = '2.35'
@classmethod
def skip_checks(cls):
super(ListImageFiltersNegativeTestJSON, cls).skip_checks()
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(ListImageFiltersNegativeTestJSON, cls).setup_clients()
cls.client = cls.compute_images_client
@decorators.attr(type=['negative'])
@decorators.idempotent_id('391b0440-432c-4d4b-b5da-c5096aa247eb')
def test_get_nonexistent_image(self):
"""Test getting a non existent image should fail"""
nonexistent_image = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.show_image,
nonexistent_image)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | tempest/api/compute/images/test_list_image_filters_negative.py | cityofships/tempest |
import logging
from ambianic.configuration import get_root_config
from dynaconf.vendor.box.exceptions import BoxKeyError
from fastapi import HTTPException, status
from pydantic import BaseModel
log = logging.getLogger(__name__)
# Base class for pipeline input sources such as cameras and microphones
class SensorSource(BaseModel):
id: str
uri: str
type: str
live: bool = True
source_types = ["video", "audio", "image"]
def get(source_id):
"""Retrieve a source by id"""
log.info("Get source_id=%s", source_id)
try:
root_config = get_root_config()
source = root_config.sources[source_id]
except BoxKeyError:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="source id not found"
)
return source
def remove(source_id):
"""Remove source by id"""
log.info("Removing source_id=%s", source_id)
get(source_id)
root_config = get_root_config()
del root_config.sources[source_id]
def save(source: SensorSource):
"""Save source configuration information"""
log.info("Saving source_id=%s", source.id)
root_config = get_root_config()
root_config.sources[source.id] = source
return root_config.sources[source.id]
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | src/ambianic/webapp/server/config_sources.py | ivelin/ambianic-edge |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.base import Tool
from polygraphy.tools.debug.subtool import (Build, DiffTactics, Precision,
Reduce, Repeat)
class Debug(Tool):
"""
[EXPERIMENTAL] Debug model accuracy issues.
"""
def __init__(self):
super().__init__("debug")
def add_parser_args(self, parser):
subparsers = parser.add_subparsers(title="Debug Subtools", dest="subtool")
subparsers.required = True
SUBTOOLS = [
Build(),
Precision(),
DiffTactics(),
Reduce(),
Repeat(),
]
for subtool in SUBTOOLS:
subtool.setup_parser(subparsers)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | tools/Polygraphy/polygraphy/tools/debug/debug.py | hwkyai/TensorRT |
import os
from pathlib import Path
from torchaudio.datasets import utils as dataset_utils
from torchaudio.datasets.commonvoice import COMMONVOICE
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_asset_path,
)
class TestWalkFiles(TempDirMixin, TorchaudioTestCase):
root = None
expected = None
def _add_file(self, *parts):
path = self.get_temp_path(*parts)
self.expected.append(path)
Path(path).touch()
def setUp(self):
self.root = self.get_temp_path()
self.expected = []
# level 1
for filename in ['a.txt', 'b.txt', 'c.txt']:
self._add_file(filename)
# level 2
for dir1 in ['d1', 'd2', 'd3']:
for filename in ['d.txt', 'e.txt', 'f.txt']:
self._add_file(dir1, filename)
# level 3
for dir2 in ['d1', 'd2', 'd3']:
for filename in ['g.txt', 'h.txt', 'i.txt']:
self._add_file(dir1, dir2, filename)
print('\n'.join(self.expected))
def test_walk_files(self):
"""walk_files should traverse files in alphabetical order"""
n_ites = 0
for i, path in enumerate(dataset_utils.walk_files(self.root, '.txt', prefix=True)):
found = os.path.join(self.root, path)
assert found == self.expected[i]
n_ites += 1
assert n_ites == len(self.expected)
class TestIterator(TorchaudioTestCase):
backend = 'default'
path = get_asset_path()
def test_disckcache_iterator(self):
data = COMMONVOICE(self.path, url="tatar")
data = dataset_utils.diskcache_iterator(data)
# Save
data[0]
# Load
data[0]
def test_bg_iterator(self):
data = COMMONVOICE(self.path, url="tatar")
data = dataset_utils.bg_iterator(data, 5)
for _ in data:
pass
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | test/torchaudio_unittest/datasets/utils_test.py | adefossez/audio |
# -*- coding: utf-8 -*-
from multiplierless.csd import to_csd, to_csdfixed, to_decimal
def test_csd1():
"""[summary]"""
csdstr = "+00-00+"
csdnumber = to_decimal(csdstr)
csdnew = to_csd(csdnumber)
assert csdnew == csdstr
def test_csd2():
"""[summary]"""
csdstr = "+00-.000+"
csdnumber = to_decimal(csdstr)
csdnew = to_csd(csdnumber, places=4)
assert csdnew == csdstr
def test_csd3():
"""[summary]"""
csdstr = "+00-.000+"
csdnumber = to_decimal(csdstr)
csdnew = to_csdfixed(csdnumber, nnz=3)
assert csdnew == csdstr
def test_csd4():
"""[summary]"""
n = 545
csdstr = to_csd(n)
n2 = to_decimal(csdstr)
assert n == n2
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | tests/test_csd.py | luk036/multiplierless-py |
import os
from bson.json_util import dumps
from flask_restful import Resource
from flask import Response
from utils.cache import cache
from utils.deepzoom import get_slide
class DeepZoom(Resource):
def __init__(self, config):
"""initialize DeepZoom resource
Args:
db: mongo db connection
config: application configurations
opt: deep zoom configurations
Returns:
None
"""
self.config = config
@cache.cached()
def get(self, path):
"""
Get XML metadata fo the deep zoom image
---
tags:
- Deep Zoom
parameters:
- in: path
name: path
description: Example SLIDES/ADRC/DG_ADRC_Slides/ADRC59-164/aBeta/ADRC59-164_1A_AB.ndpi
type: string
required: true
default: ""
responses:
200:
description: XML metadata for the Deep Zoom file
404:
description: Invalid path or openslide error
"""
path = "/" + path
if not os.path.exists(path):
resp = {"status": 404, "message": "Path not found: " + path}
return Response(dumps(resp), status=404, mimetype='application/json')
slide = get_slide(path)
if slide == None:
Response("", status=404, mimetype='application/xml')
else:
return Response(slide.get_dzi(self.config['deepzoom_format']), status=200, mimetype='application/xml')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | routes/v1/DeepZoom.py | scimk/path_deepzoom |
# Monotonic array optimised solution
# Time complexity = O(n) | Space Complexity : O(1)
# method to check if the direction breaks
def directionChanged(direction, previous, current) :
difference = current - previous
if direction > 0 :
return difference < 0
return difference > 0
# function to check if the array is monotic or not
def isMonotonicArray(arr : list) -> bool :
if len(arr) <= 2 :
return True
direction = arr[1] - arr[0]
for i in range(2, len(arr)) :
if direction == 0 :
direction = arr[i] - arr[i-1]
continue
if directionChanged(direction, arr[i-1], arr[i]) :
return True
return False
# driver code
if __name__ == '__main__' :
print(isMonotonicArray([1,2,3,4,5])) | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | Arrays/Task.py | ayushkr459/Data-Structures-And-Algorithms |
import sys
import os.path
currentDir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(currentDir))
import aiohttp
import asyncio
import pypillary.request as request
import pypillary.model as model
class TestImageRequests:
def __init__(self):
with open(currentDir + "/clientInfo.txt", "r") as file:
self.clientId = file.readline().replace("\n", "")
self.clientSecret = file.readline().replace("\n", "")
def executeAsync(self, req):
async def execute(req):
async with aiohttp.ClientSession() as sess:
await req.execute(sess)
loop = asyncio.get_event_loop()
loop.run_until_complete(execute(req))
def test_ctor(self):
req = request.ImageRequest(self.clientId, self.clientSecret, "uTHY8_SvFPOXr16D5oAAUg")
assert req.requestString == "https://a.mapillary.com/v3/images/uTHY8_SvFPOXr16D5oAAUg"
def test_execute(self):
img_req = request.ImageRequest(self.clientId, self.clientSecret, "uTHY8_SvFPOXr16D5oAAUg")
self.executeAsync(img_req)
img = img_req.response
assert isinstance(img, model.Image)
assert img.key == "uTHY8_SvFPOXr16D5oAAUg"
assert img.captureDate is not None | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | tests/ImageRequestsTest.py | ShkalikovOleh/PyPillary |
from sanic.views import HTTPMethodView
from sanic.response import json, text, html
from jinja2 import Environment, PackageLoader, select_autoescape, FileSystemLoader
import os
class BaseController(HTTPMethodView):
def __init__(self):
self.json = json
self.text = text
self.html = html
self.env = Environment(
variable_start_string = '{',
variable_end_string = '}',
loader = PackageLoader('webgame', 'templates'),
autoescape = select_autoescape(['html', 'xml'])
)
self.setting = {
'template_dir': 'views'
}
def render(self, filename, **kw):
'''渲染文件模板'''
# filename = os.path.join(self.setting['template_dir'], filename)
# print(filename)
print(self.env.list_templates())
template = self.env.get_template(filename)
render = template.render(**kw)
return self.html(render)
def render_string(self, string, **kw):
'''渲染字符串模板'''
render = self.env.from_string(string).render(**kw)
return self.html(render)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | webgame/controller/BaseController.py | xiaojieluo/webgame |
#add parent dir to find package. Only needed for source code build, pip install doesn't need it.
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import gym
from pybullet_envs.bullet.cartpole_bullet import CartPoleBulletEnv
from baselines import deepq
def callback(lcl, glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
return is_solved
def main():
env = CartPoleBulletEnv(renders=False)
model = deepq.models.mlp([64])
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_cartpole.py | frk2/bullet3 |
from unittest import TestCase
from mock import patch
from .. import constants
class mock_service_exeTestCase(TestCase):
def setUp(self):
super(mock_service_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service.bat')
class provider_verifier_exeTestCase(TestCase):
def setUp(self):
super(provider_verifier_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier.bat')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | pact/test/test_constants.py | dwang7/pact-python |
import pytest
from eth_utils import (
decode_hex,
)
from eth_keys import keys
from trinity.utils.chains import (
get_local_data_dir,
get_database_dir,
get_nodekey_path,
ChainConfig,
)
from trinity.utils.filesystem import (
is_same_path,
)
def test_chain_config_computed_properties():
data_dir = get_local_data_dir('muffin')
chain_config = ChainConfig(network_id=1234, data_dir=data_dir)
assert chain_config.network_id == 1234
assert chain_config.data_dir == data_dir
assert chain_config.database_dir == get_database_dir(data_dir)
assert chain_config.nodekey_path == get_nodekey_path(data_dir)
def test_chain_config_explicit_properties():
chain_config = ChainConfig(
network_id=1,
data_dir='./data-dir',
nodekey_path='./nodekey'
)
assert is_same_path(chain_config.data_dir, './data-dir')
assert is_same_path(chain_config.nodekey_path, './nodekey')
NODEKEY = '0xd18445cc77139cd8e09110e99c9384f0601bd2dfa5b230cda917df7e56b69949'
@pytest.fixture
def nodekey_bytes():
_nodekey_bytes = decode_hex(NODEKEY)
return _nodekey_bytes
@pytest.fixture
def nodekey_path(tmpdir, nodekey_bytes):
nodekey_file = tmpdir.mkdir('temp-nodekey-dir').join('nodekey')
nodekey_file.write_binary(nodekey_bytes)
return str(nodekey_file)
def test_chain_config_nodekey_loading(nodekey_bytes, nodekey_path):
chain_config = ChainConfig(
network_id=1,
nodekey_path=nodekey_path,
)
assert chain_config.nodekey.to_bytes() == nodekey_bytes
@pytest.mark.parametrize('as_bytes', (True, False))
def test_chain_config_explictely_provided_nodekey(nodekey_bytes, as_bytes):
chain_config = ChainConfig(
network_id=1,
nodekey=nodekey_bytes if as_bytes else keys.PrivateKey(nodekey_bytes),
)
assert chain_config.nodekey.to_bytes() == nodekey_bytes
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | tests/trinity/core/chains-utils/test_chain_config_object.py | theresume/py-evm |
#
from binho.errors import DriverCapabilityError
class binhoAccessory:
""" Base class for objects representing accessory boards. """
# Optional: subclasses can set this variable to override their accessory name.
# If not provided, their name will automatically be taken from their class names.
# This typically doesn't need to be overridden.
ACCESSORY_NAME = None
@classmethod
def get_name(cls):
""" Default implementation of a function that returns a class's name. """
# If we have an overridden accessory name, return it.
if cls.ACCESSORY_NAME:
return cls.ACCESSORY_NAME
# Otherwise, return the given class's name.
return cls.__name__
@classmethod
def available_accessories(cls):
""" Returns a list of available neighbors. """
return [accessory.get_name() for accessory in cls.__subclasses__()]
@classmethod
def from_name(cls, name, board, *args, **kwargs):
""" Creates a new binhoAccessory object from its name. """
target_name = name.lower()
for subclass in cls.__subclasses__():
# Grab the class's name, and check to see if it matches ours.
subclass_name = subclass.get_name()
# If this class matches our target name, this is the class we're looking for!
# Create an instance and return it.
if target_name == subclass_name.lower():
return subclass(board, *args, **kwargs)
raise DriverCapabilityError("No known driver for accessory '{}'.".format(name))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | binho/accessory.py | binhollc/binho-python-package |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayFundJointaccountOperationApproveResponse(AlipayResponse):
def __init__(self):
super(AlipayFundJointaccountOperationApproveResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayFundJointaccountOperationApproveResponse, self).parse_response_content(response_content)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | alipay/aop/api/response/AlipayFundJointaccountOperationApproveResponse.py | antopen/alipay-sdk-python-all |
"""
Created by Epic at 9/1/20
"""
class HTTPException(Exception):
def __init__(self, request, data):
self.request = request
self.data = data
super().__init__(data)
class Forbidden(HTTPException):
pass
class NotFound(HTTPException):
def __init__(self, request):
self.request = request
Exception.__init__(self, "The selected resource was not found")
class Unauthorized(HTTPException):
def __init__(self, request):
self.request = request
Exception.__init__(self, "You are not authorized to view this resource")
class LoginException(Exception):
pass
class InvalidToken(LoginException):
def __init__(self):
super().__init__("Invalid token provided.")
class ShardingNotSupported(LoginException):
def __init__(self):
super().__init__("SpeedCord does not support sharding at this time.")
class ConnectionsExceeded(LoginException):
def __init__(self):
super().__init__("You have exceeded your gateway connection limits")
class GatewayException(Exception):
pass
class GatewayClosed(GatewayException):
def __init__(self):
super().__init__("You can't do this as the gateway is closed.")
class GatewayUnavailable(GatewayException):
def __init__(self):
super().__init__("Can't reach the discord gateway. Have you tried checking your internet?")
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | speedcord/exceptions.py | MM-coder/speedcord |
import sys
import os
import time
from kafka import KafkaProducer
# USAGE:
# python3 ./temperature-send.py host:port kafka-topic device_id
#
# Messages are sent to the kafka broker residing at host:port with topic kafka-topic
# They are being formatted as a key-value pair consisting of device_id->temperature
#
# @author patryk.szczypien@gmail.com
def publish_message(producer_instance, topic_name, key, value):
try:
key_bytes = bytes(key, encoding='utf-8')
value_bytes = bytes(value, encoding='utf-8')
producer_instance.send(topic_name, key=key_bytes, value=value_bytes)
producer_instance.flush()
print('Message published successfully.')
except Exception as ex:
print('Exception in publishing message')
print(str(ex))
def connect_kafka_producer(server):
_producer = None
try:
_producer = KafkaProducer(bootstrap_servers=[server], api_version=(0, 10))
except Exception as ex:
print('Exception while connecting Kafka')
print(str(ex))
finally:
return _producer
def getCPUtemperature():
res = os.popen('vcgencmd measure_temp').readline()
return(res.replace("temp=","").replace("'C\n",""))
if len(sys.argv) != 4:
print ("""
USAGE:
python3 ./temperature-send.py host:port kafka-topic device_id
""")
sys.exit()
print (getCPUtemperature());
server = sys.argv[1]
topic = sys.argv[2]
host_key = sys.argv[3]
producer = connect_kafka_producer(server)
try:
while True:
publish_message(producer, topic, host_key, getCPUtemperature())
time.sleep(5)
except KeyboardInterrupt:
print('Manual break by user')
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | temperature-send.py | bredlej/alivetest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.