source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
from tatau_core.models import TaskDeclaration
from tatau_core.models.task import ListEstimationAssignments
from tatau_core.utils.ipfs import Directory
class Estimator:
@staticmethod
def get_data_for_estimate(task_declaration):
dataset = task_declaration.dataset
ipfs_dir = Directory(dataset.train_dir_ipfs)
dirs, files = ipfs_dir.ls()
return {
'chunk_ipfs': dirs[0].multihash,
'model_code_ipfs': task_declaration.train_model.code_ipfs,
}
@staticmethod
def estimate(task_declaration: TaskDeclaration, finished_assignments: ListEstimationAssignments):
failed = False
assert len(finished_assignments)
sum_tflops = 0.0
for estimation_assignment in finished_assignments:
sum_tflops += estimation_assignment.estimation_result.tflops
if estimation_assignment.estimation_result.error is not None:
failed = True
return 0.0, failed
av_tflops = sum_tflops / len(finished_assignments)
ipfs_dir = Directory(task_declaration.dataset.train_dir_ipfs)
dirs, files = ipfs_dir.ls()
chunks_count = len(dirs)
return av_tflops * chunks_count * task_declaration.epochs, failed
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | tatau_core/node/producer/estimator.py | makar21/core |
"""Run the simple SAT solver on the binary case of van der waerden's problem.
This problem asks for the smallest number n so that a binary number of n digits
that contains either j digits 0 or k digits 1, for given integers j and k.
"""
from __future__ import division
def _van_der_waerden_helper(j, n, sign):
clause_data = []
max_d = (n - 1) // (j - 1) + 1
for d in range(1, max_d + 1):
for i in range(1, n - (j - 1) * d + 1):
clause = [sign * (i + p * d) for p in range(0, j)]
clause_data.append(clause)
return clause_data
def van_der_waerden(j, k, n):
"""Generate clauses for the van der Waerden problem.
"""
return (
_van_der_waerden_helper(j, n, +1) +
_van_der_waerden_helper(k, n, -1)
)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | simplesat/examples/van_der_waerden.py | msfschaffner/sat-solver |
import httplib
from django.core.urlresolvers import reverse
from oscar_testsupport.factories import create_order
from oscar_testsupport.testcases import WebTestCase
class TestAnAnonymousUser(WebTestCase):
def test_gets_a_404_when_requesting_an_unknown_order(self):
path = reverse('customer:anon-order', kwargs={'order_number': 1000,
'hash': '1231231232'})
response = self.app.get(path, status="*")
self.assertEqual(httplib.NOT_FOUND, response.status_code)
def test_can_see_order_status(self):
order = create_order()
path = reverse('customer:anon-order',
kwargs={'order_number': order.number,
'hash': order.verification_hash()})
response = self.app.get(path)
self.assertEqual(httplib.OK, response.status_code)
def test_gets_404_when_using_incorrect_hash(self):
order = create_order()
path = reverse('customer:anon-order',
kwargs={'order_number': order.number,
'hash': 'bad'})
response = self.app.get(path, status="*")
self.assertEqual(httplib.NOT_FOUND, response.status_code)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | tests/functional/customer/order_status_tests.py | endgame/django-oscar |
"""Create your api serializers here."""
import numpy as np
from django.core.serializers.json import DjangoJSONEncoder
from rest_framework import serializers
class NpEncoder(DjangoJSONEncoder):
"""Encoder for numpy object."""
def default(self, o):
"""Serialize implementation of NpEncoder serializer.
Args:
o: The object you want to serialize.
Returns:
The serialized object.
"""
if isinstance(o, np.integer):
return int(o)
if isinstance(o, np.floating):
return float(o)
if isinstance(o, np.ndarray):
return o.tolist()
return super().default(o)
class NotificationSerializer(serializers.Serializer):
"""Serialize Notification data."""
title = serializers.CharField(max_length=200)
body = serializers.CharField(max_length=200)
token = serializers.CharField()
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
... | 3 | api/serializers.py | PrynsTag/oneBarangay |
import glob
import logging
from importlib import import_module
from os.path import basename, isdir, isfile
from pathlib import Path
from aiogram import Dispatcher
class ModuleManager:
def __init__(self, dp: Dispatcher):
self.dp = dp
self.root = Path(__file__).parent.parent
def load_path(self, path: str):
mod_paths = glob.glob(f"{self.root}/{path}/*.py")
all_modules = [
basename(module)[:-3]
for module in mod_paths
if isfile(module) and module.endswith(".py")
]
for module in all_modules:
self.load(path.replace("/", ".") + f".{module}")
def load(self, module: str):
try:
imp_module = import_module("app." + module)
except ModuleNotFoundError:
logging.error(f"Module <{module}> was not found.")
raise SystemExit()
if not hasattr(imp_module, "setup"):
logging.error(f"Module <{module}> doesn't have <setup>.")
raise SystemExit()
if not callable(imp_module.setup):
logging.error(f"Module <{module}> doesn't have callable <setup>.")
raise SystemExit()
try:
imp_module.setup(self.dp)
except Exception as error:
logging.exception(f"An error occured in <{module}>: {error}")
raise SystemExit()
logging.debug(f"Module <{module}> was loaded.")
return module
def load_all(self, modules: list):
"""
Iterates through modules and loads them.
"""
for module in modules:
# Shortcut for %module%.__init__
if module.startswith("$"):
self.load(f"{module[1:]}.__init__")
elif isdir(f"{self.root}/{module}/"):
self.load_path(module)
else:
self.load(module)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | app/misc/modular.py | Cicadadenis/999 |
import pandas as pd
from plotly import graph_objects as go
from expenses_report.chart_builder import ChartBuilder
from expenses_report.config import config
from expenses_report.preprocessing.data_provider import DataProvider
from expenses_report.visualizations.i_visualization import IVisualization
class TransactionBubblesVisualization(IVisualization):
_category_values = dict()
def prepare_data(self, data: DataProvider):
"""
Preprocesses each transaction and calculates the relative amount within its category
"""
RATIO = 'ratio'
df_all = data.get_all_transactions()
for category_name in config.categories.keys():
df_category = df_all[df_all[config.CATEGORY_MAIN_COL] == category_name]
category_total = df_category[config.ABSAMOUNT_COL].sum()
df_category.loc[:, RATIO] = df_category[config.ABSAMOUNT_COL] / category_total
x_axis = list(map(lambda datetime: pd.Timestamp(datetime), pd.DatetimeIndex(df_category.index).values))
if x_axis:
self._category_values[category_name] = (x_axis,
df_category[config.ABSAMOUNT_COL].values,
df_category[RATIO].values,
df_category[config.LABEL].values)
def build_visualization(self) -> go.Figure:
return ChartBuilder.create_bubble_chart(self._category_values)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | expenses_report/visualizations/transaction_bubbles_visualization.py | kircher-sw/expenses-tracker |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...chartsheet import Chartsheet
class TestInitialisation(unittest.TestCase):
"""
Test initialisation of the Chartsheet class and call a method.
"""
def setUp(self):
self.fh = StringIO()
self.chartsheet = Chartsheet()
self.chartsheet._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test Chartsheet xml_declaration()"""
self.chartsheet._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | xlsxwriter/test/chartsheet/test_initialisation.py | haiyangd/XlsxWriter |
from nose.tools import assert_equals
from framework.pages.loginPage import loginPage
from framework.pages.headerPage import headerPage
from framework.core.webdriverfactory import WebDriverFactory
from framework.core.configuration import webdriver_configuration
class testLogin():
baseUrl = "http://twiindan.pythonanywhere.com/admin"
@classmethod
def setup_class(self):
wdf = WebDriverFactory(webdriver_configuration)
self.driver = wdf.getWebDriverInstance()
self.login_page = loginPage(self.driver)
def setup(self):
self.login_page.navigate()
def test_correct_login(self):
self.login_page.locate_elements()
self.login_page.fillUsername('user1')
self.login_page.fillPassword('selenium')
self.login_page.submitClick()
@classmethod
def teardown_class(self):
self.driver.quit()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | 04_Selenium/framework/tests/testLogin.py | twiindan/selenium_lessons |
import unittest
import warnings
from dataclasses import dataclass
from transformers.convert_slow_tokenizer import SpmConverter
from transformers.testing_utils import get_tests_dir
@dataclass
class FakeOriginalTokenizer:
vocab_file: str
class ConvertSlowTokenizerTest(unittest.TestCase):
def test_spm_converter_bytefallback_warning(self):
spm_model_file_without_bytefallback = get_tests_dir("fixtures/test_sentencepiece.model")
spm_model_file_with_bytefallback = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
original_tokenizer_without_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_without_bytefallback)
with warnings.catch_warnings(record=True) as w:
_ = SpmConverter(original_tokenizer_without_bytefallback)
self.assertEqual(len(w), 0)
original_tokenizer_with_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_with_bytefallback)
with warnings.catch_warnings(record=True) as w:
_ = SpmConverter(original_tokenizer_with_bytefallback)
self.assertEqual(len(w), 1)
self.assertIn(
(
"The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option"
" which is not implemented in the fast tokenizers."
),
str(w[0].message),
)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
}... | 3 | tests/utils/test_convert_slow_tokenizer.py | manuelciosici/transformers |
import os
from flask import Flask, send_file, abort
# Imports and stuff.
webserver = Flask(__name__)
# Defines the web server.
@webserver.route("/")
def acc_denied():
return "You cannot browse this subdomain."
# Denies access if user tries to browse the subdomain.
@webserver.route("/<path:imageid>")
def i(imageid):
imageid = imageid.rstrip('/').split('.')[0]
x = False
for file in os.listdir("./i"):
if file.split('.')[0] == imageid:
x = True
return send_file("./i/" + file)
if not x:
abort(404)
# If there is a file with the name suggested, load the file.
if __name__ == '__main__':
webserver.run(port=34)
# Starts the web server on port 34.
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | i_server.py | JakeMakesStuff/aurorame.me |
"""
Subdivide Cells
~~~~~~~~~~~~~~~
Increase the number of triangles in a single, connected triangular mesh.
The :func:`pyvista.PolyDataFilters.subdivide` filter utilitizes three different
subdivision algorithms to subdivide a mesh's cells: `butterfly`, `loop`,
or `linear`.
"""
from pyvista import examples
import pyvista as pv
###############################################################################
# First, let's load a **triangulated** mesh to subdivide. We can use the
# :func:`pyvista.DataSetFilters.triangulate` filter to ensure the mesh we are
# using is purely triangles.
mesh = examples.download_bunny_coarse().triangulate()
cpos = [(-0.02788175062966399, 0.19293295656233056, 0.4334449972621349),
(-0.053260899930287015, 0.08881197167521734, -9.016948161029588e-05),
(-0.10170607813337212, 0.9686438023715356, -0.22668272496584665)]
###############################################################################
# Now, lets do a few subdivisions with the mesh and compare the results.
# Below is a helper function to make a comparison plot of thee different
# subdivisions.
def plot_subdivisions(mesh, a, b):
display_args = dict(show_edges=True, color=True)
p = pv.Plotter(shape=(3,3))
for i in range(3):
p.subplot(i,0)
p.add_mesh(mesh, **display_args)
p.add_text("Original Mesh")
def row_plot(row, subfilter):
subs = [a, b]
for i in range(2):
p.subplot(row, i+1)
p.add_mesh(mesh.subdivide(subs[i], subfilter=subfilter), **display_args)
p.add_text(f"{subfilter} subdivision of {subs[i]}")
row_plot(0, "linear")
row_plot(1, "butterfly")
row_plot(2, "loop")
p.link_views()
p.view_isometric()
return p
###############################################################################
# Run the subdivisions for 1 and 3 levels.
plotter = plot_subdivisions(mesh, 1, 3)
plotter.camera_position = cpos
plotter.show()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (e... | 3 | examples/01-filter/subdivide.py | whophil/pyvista |
def selection_sort(input_list):
if not isinstance(input_list, list):
raise TypeError('input needs to be a list')
def find_lowest_value_index(start_index):
lowest_value_index = start_index
for i, x in enumerate(input_list[start_index:]):
if x < input_list[lowest_value_index]:
lowest_value_index = start_index + i
return lowest_value_index
def swap_indices_in_place(i, j):
temp = input_list[i]
input_list[i] = input_list[j]
input_list[j] = temp
for k, x in enumerate(input_list):
lowest_value_index_in_input = find_lowest_value_index(k)
if input_list[lowest_value_index_in_input] < x:
swap_indices_in_place(lowest_value_index_in_input, k)
return input_list
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exc... | 3 | pyImplementations/selection_sort/index.py | veekas/thinkDataStructures |
#version 15:38
import random
import string
#name = 'zzz'
set_off = 23
def convert(name):
for i in range(len(name)):
if name[i].lower() == 'i' or name[i].lower() == 'y' or name[i].lower() == '9':
name = list(name)
name[i] = 'g'
name = ''.join(name)
indx = 0
c=0
while len(name) < 6:
if c >16:
return '#ffb300' # just in case it goes into an infinate Loop (probability is very, very low)
c +=1
new_letter = chr(65 + (ord(name[indx]) + set_off + (indx*6) )%25) # this keeps the char within the range of A-Z in the asci table and adds variation in case the letter is the same (indx*6)
if new_letter.lower() != 'i' and new_letter.lower() != 'y' and new_letter != '9':
name = name + new_letter #add the letter
indx = (indx+1)%len(name)
if len(name) > 6:
name = name[:6] #cut name if too long
name = list(name) # make it a list so we can edit it more easily
for i in range(len(name)):
Integer = (ord(name[i])+set_off)%16
Hex = Integer.to_bytes(((Integer.bit_length() + 7) // 8),"big").hex()
#print("...."+Hex)
Hex = Hex[1:]
name[i] = Hex
name = ''.join(name)
color = '#' + name
return color
def name_to_color(name):
color = convert(name)
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
if r<128 or g<128 or b<128 and len(name) == 7:
return color
else:
return '#00f7ff' # some ord() chars aren't convertable. When we checked all, we found this to be the case with i, y and 9 which is why we prevent the program from outputting them. Just in case there are any other letters that we forgot to check, we added this clause. Should never get here but the presentation got us worried cuase if the color is not exactly 6 digits long, tkinter crashes.
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | 21-fs-ias-lec/16-nicknames-forward/subChat/Colorize.py | paultroeger/BACnet |
# pylint: disable=redefined-outer-name
import asyncio
import time
import pytest
DEFAULT_MAX_LATENCY = 10 * 1000
@pytest.mark.asyncio
async def test_slow_server(host):
if not pytest.enable_microbatch:
pytest.skip()
A, B = 0.2, 1
data = '{"a": %s, "b": %s}' % (A, B)
time_start = time.time()
req_count = 10
tasks = tuple(
pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay",
headers=(("Content-Type", "application/json"),),
data=data,
timeout=30,
assert_status=200,
assert_data=data.encode(),
)
for i in range(req_count)
)
await asyncio.gather(*tasks)
assert time.time() - time_start < 12
req_count = 100
tasks = tuple(
pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay",
headers=(("Content-Type", "application/json"),),
data=data,
assert_status=lambda i: i in (200, 429),
)
for i in range(req_count)
)
await asyncio.gather(*tasks)
@pytest.mark.asyncio
async def test_fast_server(host):
if not pytest.enable_microbatch:
pytest.skip()
A, B = 0.0002, 0.01
data = '{"a": %s, "b": %s}' % (A, B)
time_start = time.time()
req_count = 500
tasks = tuple(
pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay",
headers=(("Content-Type", "application/json"),),
data=data,
timeout=30,
assert_status=200,
assert_data=data.encode(),
)
for i in range(req_count)
)
await asyncio.gather(*tasks)
assert time.time() - time_start < 5
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | tests/integration/api_server/test_microbatch.py | theopinard/BentoML |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2020 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import typing
import coroutine
from typing import Any, Generator, TextIO
def follow(fp:TextIO, target:Generator[None, str, None], from_end:bool=False) \
-> None:
from_end and fp.seek(0, 2)
while True:
line = fp.readline()
if not line:
time.sleep(0.1)
continue
target.send(line)
@coroutine.corouine
def printer() -> Generator[None, str, None]:
while True:
line = (yield)
print(line,)
if __name__ == '__main__':
fname = sys.argv[1] if len(sys.argv) > 1 else 'cofollow.py'
with open(fname) as fp:
follow(fp, printer())
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return... | 3 | python/coroutines/cofollow.py | ASMlover/study |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Apr, 2019
@author: Nathan de Lara <ndelara@enst.fr>
"""
from typing import Optional, Union
import numpy as np
from sknetwork.utils.check import check_seeds
def stack_seeds(n_row: int, n_col: int, seeds_row: Optional[Union[np.ndarray, dict]],
seeds_col: Optional[Union[np.ndarray, dict]] = None, default_value: float = -1) -> np.ndarray:
"""Process seeds for rows and columns and stack the results into a single vector."""
if seeds_row is None and seeds_col is None:
seeds_row = np.ones(n_row)
seeds_col = default_value * np.ones(n_col)
elif seeds_row is None:
seeds_row = default_value * np.ones(n_row)
elif seeds_col is None:
seeds_col = default_value * np.ones(n_col)
seeds_row = check_seeds(seeds_row, n_row)
seeds_col = check_seeds(seeds_col, n_col)
return np.hstack((seeds_row, seeds_col))
def seeds2probs(n: int, seeds: Union[dict, np.ndarray] = None) -> np.ndarray:
"""Transform seeds into probability vector.
Parameters
----------
n : int
Total number of samples.
seeds :
If ``None``, the uniform distribution is used.
Otherwise, a non-negative, non-zero vector or a dictionary must be provided.
Returns
-------
probs: np.ndarray
A probability vector.
"""
if seeds is None:
return np.ones(n) / n
else:
seeds = check_seeds(seeds, n)
probs = np.zeros_like(seeds, dtype=float)
ix = (seeds > 0)
probs[ix] = seeds[ix]
w: float = probs.sum()
if w > 0:
return probs / w
else:
raise ValueError('At least one seeds must have a positive probability.')
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"an... | 3 | sknetwork/utils/seeds.py | altana-tech/scikit-network |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from typing import Any, Dict, List, Optional
from fbpcp.entity.container_instance import ContainerInstance
from fbpcp.entity.mpc_instance import (
MPCInstance,
MPCParty,
MPCInstanceStatus,
)
from fbpcs.common.entity.instance_base import InstanceBase
class PCSMPCInstance(MPCInstance, InstanceBase):
@classmethod
def create_instance(
cls,
instance_id: str,
game_name: str,
mpc_party: MPCParty,
num_workers: int,
server_ips: Optional[List[str]] = None,
containers: Optional[List[ContainerInstance]] = None,
status: MPCInstanceStatus = MPCInstanceStatus.UNKNOWN,
game_args: Optional[List[Dict[str, Any]]] = None,
) -> "PCSMPCInstance":
return cls(
instance_id,
game_name,
mpc_party,
num_workers,
server_ips,
containers or [],
status,
game_args,
)
@classmethod
def from_mpc_instance(cls, mpc_instance: MPCInstance) -> "PCSMPCInstance":
return cls(
mpc_instance.instance_id,
mpc_instance.game_name,
mpc_instance.mpc_party,
mpc_instance.num_workers,
mpc_instance.server_ips,
mpc_instance.containers,
mpc_instance.status,
mpc_instance.game_args,
)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | fbpcs/common/entity/pcs_mpc_instance.py | hche11/fbpcs |
from pathlib import Path
def dir_touch(path_file) -> None:
Path(path_file).mkdir(parents=True, exist_ok=True)
def file_touch(path_file) -> None:
p = Path(path_file)
p.parents[0].mkdir(parents=True, exist_ok=True)
p.touch()
def index_or_default(lst, val, default=-1):
return lst.index(val) if val in lst else default
def print_info(logger, message):
print(message)
logger.info(message)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | src/tools.py | r3w0p/memeoff |
"""Tests for `democritus_pypi` module."""
from democritus_pypi import pypi_package_data, pypi_packages_new, pypi_packages_all_names, pypi_packages_recent
def test_pypi_package_data_1():
results = pypi_package_data('ioc-finder')
assert results['info']['author'] == 'Floyd Hightower'
assert results['info']['description'].startswith('# Observable Finder')
assert '2.1.0' in results['releases']
# try requesting details about a package using a specific version
results = pypi_package_data('ioc-finder', version='1.0.5')
assert results['info']['author'] == 'Floyd Hightower'
assert results['info']['description'].startswith('Copyright (c) 2018, Floyd Hightower')
assert results['info']['version'] == '1.0.5'
def test_pypi_packages_new_1():
results = pypi_packages_new()
assert len(results) == 10
def test_pypi_packages_recent_1():
results = pypi_packages_recent()
assert len(results) == 10
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | tests/test_democritus_pypi.py | democritus-project/d8s-pypi |
from deeply.datasets.util import image_mask
from tensorflow_datasets.core import (
Version,
GeneratorBasedBuilder
)
_DATASET_HOMEPAGE = "https://polyp.grand-challenge.org/CVCClinicDB/"
_DATASET_KAGGLE = "achillesrasquinha/cvcclinicdb"
_DATASET_DESCRIPTION = """
CVC-ClinicDB is a database of frames extracted from colonoscopy videos. These frames contain several examples of polyps. In addition to the frames, we provide the ground truth for the polyps. This ground truth consists of a mask corresponding to the region covered by the polyp in the image
"""
_DATASET_CITATION = """\
Bernal, J., Sánchez, F. J., Fernández-Esparrach, G., Gil, D., Rodríguez, C., & Vilariño, F. (2015). WM-DOVA maps for accurate polyp highlighting in colonoscopy: Validation vs. saliency maps from physicians. Computerized Medical Imaging and Graphics, 43, 99-111
"""
class CVCClinicDB(GeneratorBasedBuilder):
"""
The CVC-ClinicDB Dataset.
"""
VERSION = Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial Release"
}
def _info(self, *args, **kwargs):
return image_mask._info(self,
description = _DATASET_DESCRIPTION,
homepage = _DATASET_HOMEPAGE,
citation = _DATASET_CITATION,
*args, **kwargs
)
def _split_generators(self, *args, **kwargs):
return image_mask._split_generators(self, kaggle = _DATASET_KAGGLE, *args, **kwargs)
def _generate_examples(self, *args, **kwargs):
return image_mask._generate_examples(self, *args, **kwargs) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | src/deeply/datasets/colonoscopy/cvc_clinic_db.py | achillesrasquinha/deeply |
class Robaczek:
def __init__(self, x, y, krok):
self.x=x
self.y=y
self.krok=krok
def zmien_robaczka(self, x1, y1, krok1):
self.x=x1
self.y=y1
self.krok=krok1
def gora(self, ile):
self.y += ile*self.krok
def dol(self, ile):
self.y -= ile * self.krok
def prawo(self, ile):
self.x += ile * self.krok
def lewo(self, ile):
self.x -= ile * self.krok
def gdzie_jestes(self):
return(self.x, self.y)
print("podstawowy robaczek Rob o wsp: x=0, y=0 i kroku=1")
Rob=Robaczek(0, 0, 1)
print("Rob.gora(2)")
Rob.gora(2)
print(Rob.gdzie_jestes())
print("Rob.prawo(3)")
Rob.prawo(3)
print(Rob.gdzie_jestes())
print("Rob.dol(3)")
Rob.dol(3)
print(Rob.gdzie_jestes())
print("Rob.lewo(1)")
Rob.lewo(1)
print(Rob.gdzie_jestes())
print()
print("zmiana ustawień robaczka Roba na x=2, y=3 i krok=2")
Rob.zmien_robaczka(2,3,2)
print("Rob.gora(2)")
Rob.gora(2)
print(Rob.gdzie_jestes())
print("Rob.prawo(3)")
Rob.prawo(3)
print(Rob.gdzie_jestes())
print("Rob.dol(3)")
Rob.dol(3)
print(Rob.gdzie_jestes())
print("Rob.lewo(1)")
Rob.lewo(1)
print(Rob.gdzie_jestes()) | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | cw4_wd_zadania/z7.py | KamilMarkusz96/wizualizacja-danych |
"""safe name
Revision ID: 9332f05cb7d6
Revises: 30228d27a270
Create Date: 2020-05-24 23:49:06.195432
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9332f05cb7d6'
down_revision = '30228d27a270'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('base', sa.Column('nameSafe', sa.String(length=32), nullable=True))
op.create_index(op.f('ix_base_nameSafe'), 'base', ['nameSafe'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_base_nameSafe'), table_name='base')
op.drop_column('base', 'nameSafe')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?... | 3 | migrations/versions/9332f05cb7d6_safe_name.py | maiorano84/ctaCompanion |
# *******************************************************************************
#
# Copyright (c) 2020-2021 David Briant. All rights reserved.
#
# *******************************************************************************
import sys
if hasattr(sys, '_TRACE_IMPORTS') and sys._TRACE_IMPORTS: print(__name__)
import os, os.path, json
from io import TextIOWrapper
from coppertop.pipe import *
from bones.core.types import pystr
from coppertop.std.text import strip
from coppertop.std.transforming import each
getCwd = coppertop(style=unary1, name='getCwd')(os.getcwd)
isFile = coppertop(style=unary1, name='isFile')(os.path.isfile)
isDir = coppertop(style=unary1, name='isDir')(os.path.isdir)
dirEntries = coppertop(style=unary1, name='dirEntries')(os.listdir)
@coppertop(style=binary2)
def joinPath(a, b):
return os.path.join(a, *(b if isinstance(b, (list, tuple)) else [b]))
@coppertop
def readlines(f:TextIOWrapper) -> list:
return f.readlines()
@coppertop
def linesOf(pfn:pystr):
with open(pfn) as f:
return f >> readlines >> each >> strip(_,'\\n')
@coppertop(style=binary)
def copyTo(src, dest):
raise NotImplementedError()
@coppertop
def readJson(pfn:pystr):
with open(pfn) as f:
return json.load(f)
@coppertop
def readJson(f:TextIOWrapper):
return json.load(f)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | src/std/coppertop/std/files.py | DangerMouseB/coppertop |
# coding: utf-8
from datetime import date, timedelta
import pandas as pd
from rescuetime.api.service import Service
from rescuetime.api.access import AnalyticApiKey
def get_apikey():
with open("apikey", "r") as fileo:
key = fileo.read()
return key
apikey = get_apikey()
def get_efficiency():
try:
today_date = date.today().strftime("%Y-%m-%d")
tomorrow_date = (date.today() + timedelta(1)).strftime("%Y-%m-%d")
s = Service.Service()
k = AnalyticApiKey.AnalyticApiKey(apikey, s)
p = {'restrict_begin': today_date,
'restrict_end': tomorrow_date,
'restrict_kind': 'efficiency',
'perspective': 'interval'}
#YYYY-MM-DD
d = s.fetch_data(k, p)
df = pd.DataFrame(d['rows'], columns=d['row_headers'])
efficiency = df["Efficiency (percent)"]
dates = df["Date"]
return int(efficiency.tail(1)), str(dates.tail(1))
except:
return "F", "F"
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | rescuetime_wrapper.py | psorianom/rescuescore |
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
import numpy as np
import torch
from .registry import TRANSFORMS
def to_tensor(data):
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, list):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f"Unsupported type {type(data)}")
@TRANSFORMS.register_class()
class ToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, item):
for key in self.keys:
item[key] = to_tensor(item[key])
return item
@TRANSFORMS.register_class()
class Select(object):
def __init__(self, keys, meta_keys=()):
self.keys = keys
if not isinstance(meta_keys, (list, tuple)):
raise TypeError(f"Expected meta_keys to be list or tuple, got {type(meta_keys)}")
self.meta_keys = meta_keys
def __call__(self, item):
data = {}
for key in self.keys:
data[key] = item[key]
if "meta" in item and len(self.meta_keys) > 0:
data["meta"] = {}
for key in self.meta_keys:
data["meta"][key] = item['meta'][key]
return data
@TRANSFORMS.register_class()
class TensorToGPU(object):
def __init__(self, keys, device_id=None):
self.keys = keys
self.device_id = device_id
def __call__(self, item):
ret = {}
for key, value in item.items():
if key in self.keys and isinstance(value, torch.Tensor) and torch.cuda.is_available():
ret[key] = value.cuda(self.device_id, non_blocking=True)
else:
ret[key] = value
return ret
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | essmc2/transforms/tensor.py | huang-ziyuan/EssentialMC2 |
# -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTestCase.test_readConnectionLost -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTestCase.test_readConnectionLost}
to test that IHalfCloseableProtocol.readConnectionLost works for process
transports.
"""
__import__('_preamble')
import sys
from zope.interface import implements
from twisted.internet.interfaces import IHalfCloseableProtocol
from twisted.internet import stdio, protocol
from twisted.python import reflect, log
class HalfCloseProtocol(protocol.Protocol):
"""
A protocol to hook up to stdio and observe its transport being
half-closed. If all goes as expected, C{exitCode} will be set to C{0};
otherwise it will be set to C{1} to indicate failure.
"""
implements(IHalfCloseableProtocol)
exitCode = None
def connectionMade(self):
"""
Signal the parent process that we're ready.
"""
self.transport.write("x")
def readConnectionLost(self):
"""
This is the desired event. Once it has happened, stop the reactor so
the process will exit.
"""
self.exitCode = 0
reactor.stop()
def connectionLost(self, reason):
"""
This may only be invoked after C{readConnectionLost}. If it happens
otherwise, mark it as an error and shut down.
"""
if self.exitCode is None:
self.exitCode = 1
log.err(reason, "Unexpected call to connectionLost")
reactor.stop()
if __name__ == '__main__':
reflect.namedAny(sys.argv[1]).install()
log.startLogging(file(sys.argv[2], 'w'))
from twisted.internet import reactor
protocol = HalfCloseProtocol()
stdio.StandardIO(protocol)
reactor.run()
sys.exit(protocol.exitCode)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | twisted/test/stdio_test_halfclose.py | djmitche/Twisted |
import json
import discord.ext
reactionMessageIDs = {
}
openGames = {
}
openLobbies = {
}
playersInGame = {
}
playersInLobby = {
}
def channelInGame(channelID):
if channelID in openGames.keys(): return True
else: return False
def channelHasLobby(channelID):
if channelID in openLobbies.keys(): return True
else: return False
def playerInGame(userID):
if userID in playersInGame.keys(): return True
else: return False
def playerInLobby(userID):
if userID in playersInLobby.keys(): return True
else: return False
def getRules(channelID):
rules = json.load(open("storage/channelRulesets.json", "r"))
if str(channelID) in rules.keys():
return rules[str(channelID)]
else:
ruleset = {
"startingCards" : 7,
"jumpIns" : True,
"stacking" : True,
"forceplay" : False,
"drawToMatch" : True
}
rules[str(channelID)] = ruleset
json.dump(rules, open("storage/channelRulesets.json", "w"))
return rules[str(channelID)]
client = discord.ext.commands.Bot(command_prefix = "u!") | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | storage/globalVariables.py | SuperSmay/Uno2 |
"""
Code illustration: 5.06
@Tkinter GUI Application Development Blueprints
"""
class Model:
def __init__(self):
self.__play_list = []
@property
def play_list(self):
return self.__play_list
def get_file_to_play(self, file_index):
return self.__play_list[file_index]
def clear_play_list(self):
self.__play_list.clear()
def add_to_play_list(self, file_name):
self.__play_list.append(file_name)
def remove_item_from_play_list_at_index(self, index):
del self.__play_list[index]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | Chapter 05/5.06/model.py | ACsBlack/Tkinter-GUI-Application-Development-Blueprints-Second-Edition |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestBaseHandler(unittest.TestCase):
PROJECT = 'PROJECT'
@staticmethod
def _get_target_class():
from google.cloud.logging.handlers.transports import Transport
return Transport
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_send_is_abstract(self):
target = self._make_one()
with self.assertRaises(NotImplementedError):
target.send(None, None, None)
def test_flush_is_abstract_and_optional(self):
target = self._make_one()
target.flush()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | logging/tests/unit/handlers/transports/test_base.py | rodrigodias27/google-cloud-python |
# -*- coding: utf-8 -*-
try:
import django
except ImportError as e:
django = None
django_import_error = e
def check_django_import():
if django is None:
raise django_import_error
class django_required(object):
def __call__(self, func):
def wrapper(self, *args, **kwargs):
check_django_import()
return func(self, *args, **kwargs)
return wrapper
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | aserializer/django/utils.py | orderbird/aserializer |
# counter.py
def inc(x):
"""
Increments the value of x
>>> inc(4)
5
"""
return x + 1
def dec(x):
"""
Decrements the value of x
>>> dec(5)
4
"""
return x - 1
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | counter.py | lmerchant/ucsd-ext-put-final |
#!\usr\bin\python
from numpy import array
from scipy.special import erf
from scipy.optimize import minimize
from math import pi, sin, cos, exp, sqrt
#import dicom
line_array = [] ## global
def read_line (file_name ):
with open( file_name ) as f:
for line in f:
line_array.append( [float( line.split()[0] ), float( line.split()[2] )] )
read_line("4mv_line.csv")
line_len_2 = int(len(line_array)*0.5) ## global
def pi(x, b): # 0 1 2 3 4 5 6
# b is np.array of these parameters: [sigma_1, sigma_2, w_1, x_sh, bkg, B, b]
s_1 = 0.5*b[5]/(abs(b[2])*b[0]+abs(1-abs(b[2]))*b[1])
s_2 = abs(b[2])*b[0]*erf( (b[6]-x-b[3])/(sqrt(2)*b[0]) )
s_3 = abs(b[2])*b[0]*erf( (-b[6]-x-b[3])/(sqrt(2)*b[0]) )
s_4 = abs(1-abs(b[2]))*b[1]*erf( (b[6]-x-b[3])/(sqrt(2)*b[1]) )
s_5 = abs(1-abs(b[2]))*b[1]*erf( (-b[6]-x-b[3])/(sqrt(2)*b[1]) )
return s_1*(s_2 - s_3 + s_4 - s_5) + b[4] # x in mm
def s(b):
n_points_checked = 190
halv = int( n_points_checked*0.5 )
temp = 0.0
for i in range( n_points_checked ):
x = (i-halv)*0.2481
a = pi(x, b) - line_array[ line_len_2 - halv +i ][1]
temp += a*a
return temp
# [sigma_1, sigma_2, w_1, x_sh, bkg, B, b ]
x0 = array([1.58, 0.58, 0.08, -0.03, 1047.0, 15031.0, 1.40]) # initial values for minimize
print ( x0 )
res = minimize(s, x0, method='nelder-mead', options={'xtol': 1e-2, 'disp': True, 'maxfev':1e5, 'maxiter':1e5} )
print (res.x)
print (res.fun * 1e-6)
# print out the whole line
for i in range(190):
x = (i-95)*0.2481 # x in milimiters
print(x,", ", line_array[line_len_2 - 95 + i][1],", ",pi(x,res.x) )
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | minimize/retic_xmm_2gauss/2minimize_4mv.py | oustling/dicom_profile_fitting |
from dp_excel.ExcelRowOptions import ExcelRowOptions
from dp_excel.ExcelCell import ExcelCell
class ExcelRowTemplate:
def __init__(self):
self.rows = []
def __get_current_row(self):
return self.rows[-1]
def add_column(self, value, options=None, is_empty=False):
cell = ExcelCell(value, options, is_empty)
self.__get_current_row()['columns'].append(cell)
return self
def add_row(self, options=None):
if options:
if not isinstance(options, ExcelRowOptions):
raise ValueError('options must be is instance of ExcelRowOptions')
else:
options = ExcelRowOptions()
self.rows.append({'columns': [], 'options': options})
return self
def get_rows(self):
for row in self.rows:
yield row
def get_columns(self, row):
for column in row['columns']:
yield column
def get_row_options(self, row):
return row['options']
def get_cell_options(self, column):
return column.get('options', None)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | dp_excel/ExcelRowTemplate.py | DmitryPaschenko/python_excel_writer |
# coding=utf-8
import unittest
from pyprobe.sensors.sensors.LinuxSensorsParser import LinuxSensorsParser
__author__ = 'Dirk Dittert'
SAMPLE_OUTPUT = u"""\
coretemp-isa-0000
Adapter: ISA adapter
Core 0: +46.0°C (high = +82.0°C, crit = +100.0°C)
Core 1: +45.0°C (high = +82.0°C, crit = +100.0°C)
Core 2: +43.0°C (high = +82.0°C, crit = +100.0°C)
Core 3: +43.0°C (high = +82.0°C, crit = +100.0°C)
coretemp-isa-0001
Adapter: ISA adapter
Core 0: +43.0°C (high = +82.0°C, crit = +100.0°C)
Core 1: +41.0°C (high = +82.0°C, crit = +100.0°C)
Core 2: +43.0°C (high = +82.0°C, crit = +100.0°C)
Core 3: +44.0°C (high = +82.0°C, crit = +100.0°C)
i5k_amb-isa-0000
Adapter: ISA adapter
Ch. 0 DIMM 0: +63.5°C (low = +127.5°C, high = +127.5°C)
Ch. 0 DIMM 1: +52.0°C (low = +127.5°C, high = +127.5°C)
Ch. 1 DIMM 0: +62.0°C (low = +127.5°C, high = +127.5°C)
Ch. 1 DIMM 1: +51.5°C (low = +127.5°C, high = +127.5°C)
Ch. 2 DIMM 0: +55.0°C (low = +127.5°C, high = +127.5°C)
Ch. 2 DIMM 1: +51.5°C (low = +127.5°C, high = +127.5°C)
Ch. 3 DIMM 0: +55.5°C (low = +127.5°C, high = +127.5°C)
Ch. 3 DIMM 1: +52.0°C (low = +127.5°C, high = +127.5°C)
"""
class LinuxSensorsParserTest(unittest.TestCase):
def test_sensor_names_should_be_correct(self):
subject = LinuxSensorsParser(SAMPLE_OUTPUT)
names = [name for (t, name, chunk) in subject.sensors]
self.assertEqual([u'coretemp-isa-0000', u'coretemp-isa-0001', u'i5k_amb-isa-0000'], names)
def test_sensor_types_should_be_correct(self):
subject = LinuxSensorsParser(SAMPLE_OUTPUT)
types = [t for (t, name, chunk) in subject.sensors]
self.assertEqual([u'coretemp-isa', u'coretemp-isa', u'i5k_amb-isa'], types)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | tests/sensors/sensors/LinuxSensorsParserTest.py | dittert/pyprobe |
from urllib.parse import parse_qs
class Request:
GET = {}
POST = {}
def __init__(self, environ: dict):
self.build_get_params_from_dict(environ.get('QUERY_STRING'))
self.build_post_params_dict(environ.get('wsgi_input').read())
def build_get_params_from_dict(self, raw_params: str):
self.GET = parse_qs(raw_params)
def build_post_params_dict(self, raw_bytes: bytes):
self.POST = parse_qs(raw_bytes.decode('utf-8'))
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | flack/request.py | LeikoDmitry/web |
from pip.locations import build_prefix, src_prefix
from pip.util import display_path, backup_dir
from pip.log import logger
from pip.exceptions import InstallationError
from pip.commands.install import InstallCommand
class BundleCommand(InstallCommand):
name = 'bundle'
usage = '%prog [OPTIONS] BUNDLE_NAME.pybundle PACKAGE_NAMES...'
summary = 'Create pybundles (archives containing multiple packages)'
bundle = True
def __init__(self):
super(BundleCommand, self).__init__()
# bundle uses different default source and build dirs
build_opt = self.parser.get_option("--build")
build_opt.default = backup_dir(build_prefix, '-bundle')
src_opt = self.parser.get_option("--src")
src_opt.default = backup_dir(src_prefix, '-bundle')
self.parser.set_defaults(**{
src_opt.dest: src_opt.default,
build_opt.dest: build_opt.default,
})
def run(self, options, args):
if not args:
raise InstallationError('You must give a bundle filename')
# We have to get everything when creating a bundle:
options.ignore_installed = True
logger.notify('Putting temporary build files in %s and source/develop files in %s'
% (display_path(options.build_dir), display_path(options.src_dir)))
self.bundle_filename = args.pop(0)
requirement_set = super(BundleCommand, self).run(options, args)
return requirement_set
BundleCommand()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | vendor/pip-1.2.1/pip/commands/bundle.py | hmoody87/heroku-buildpack-python-ffmpeg-lame |
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from .fake_webapp import EXAMPLE_APP
class IsTextPresentTest(object):
def test_is_text_present(self):
"should verify if text is present"
self.assertTrue(self.browser.is_text_present("Example Header"))
def test_is_text_present_and_should_return_false(self):
"should verify if text is present and return false"
self.assertFalse(self.browser.is_text_present("Text that not exist"))
def test_is_text_present_and_should_wait_time(self):
"should verify if text is present and wait for five seconds"
self.browser.links.find_by_text("FOO").click()
self.assertTrue(self.browser.is_text_present("BAR!", wait_time=5))
def test_is_text_not_present(self):
"should verify if text is not present"
self.assertTrue(self.browser.is_text_not_present("Text that not exist"))
def test_is_text_not_present_and_should_return_false(self):
"should verify if text is not prasent and return false"
self.assertFalse(self.browser.is_text_not_present("Example Header"))
def test_is_text_not_present_and_should_wait_time(self):
"should verify if text is not present and wait for five seconds"
self.browser.links.find_by_text("FOO").click()
self.assertTrue(self.browser.is_text_not_present("another text", wait_time=5))
def test_is_text_present_no_body(self):
"should work properly (return false) even if there's no body"
self.browser.visit(EXAMPLE_APP + "no-body")
self.assertFalse(self.browser.is_text_present("No such text"))
def test_is_text_not_present_no_body(self):
"returns true if there's no body"
self.browser.visit(EXAMPLE_APP + "no-body")
self.assertTrue(self.browser.is_text_not_present("No such text"))
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | tests/is_text_present.py | jsfehler/splinter |
from currency_exchanger.currencies.models import Currency
from currency_exchanger.wallets.models import Wallet
from django.db import models
class Stock(models.Model):
symbol = models.CharField(max_length=10)
currency = models.ForeignKey(Currency, on_delete=models.CASCADE, related_name="stocks")
price = models.DecimalField(decimal_places=2, max_digits=10)
def __str__(self):
return self.symbol
class WalletStock(models.Model):
wallet = models.ForeignKey(Wallet, on_delete=models.CASCADE)
stocks = models.ForeignKey(Stock, on_delete=models.CASCADE)
count = models.PositiveIntegerField(default=0)
class Meta:
constraints = [
models.UniqueConstraint(fields=["wallet", "stocks"], name="unique_wallet_stock")
]
class StockTransfer(models.Model):
wallet = models.ForeignKey(Wallet, on_delete=models.CASCADE, related_name="stock_transfers")
stock = models.ForeignKey(Stock, on_delete=models.CASCADE, related_name="+")
amount = models.IntegerField()
class StockHistory(models.Model):
stocks = models.ForeignKey(Stock, on_delete=models.CASCADE, related_name="history")
timestamp = models.DateTimeField(auto_now_add=True)
price = models.DecimalField(decimal_places=2, max_digits=10)
class Meta:
ordering = ["-timestamp"]
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | backend/currency_exchanger/stocks/models.py | norbertcyran/currency-exchanger |
import numpy as np
from prob import VRPDGLDataset
from dgl.dataloading import GraphDataLoader
import torch
from attention_model.attention_utils.functions import load_routing_agent
from solver.absolver import ABSolver
class amVRP:
def __init__(self, size=20, method="greedy"):
"""
args:
size: the number of customers
"""
self.device = "cpu"
if torch.cuda.is_available():
self.device = "cuda"
self.solver = load_routing_agent(size=size, name="vrp")
self.horizon = size * 2
self._size = size
assert method in ["greedy", "sampling"]
self.method = method
def solve(self, batch_data):
if self.method == "greedy":
batch_rep, iter_rep = 1, 1
else:
batch_rep, iter_rep = 1280, 1
routes, costs = self.solver.sample_many(batch_data, batch_rep=batch_rep, iter_rep=iter_rep)
routes = self._covertRoutes(routes)
return routes, costs.detach().cpu().tolist()
def _covertRoutes(self, batch_routes):
batch_routes = batch_routes.cpu().detach().numpy() - 1
batch_routes_list = []
for routes in batch_routes:
routes_list = []
tour_list = []
for i in routes:
if i == -1 and len(tour_list) != 0:
routes_list.append(tour_list)
tour_list = []
if i != -1:
tour_list.append(i)
if len(tour_list) != 0:
routes_list.append(tour_list)
batch_routes_list.append(routes_list)
return batch_routes_list
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | solver/am_vrp_solver.py | lin-bo/RL_back2depot_VRP |
from typing import Union
class StorageMap:
def __init__(self, context, prefix: Union[bytes, str]):
from boa3.builtin.interop.storage.storagecontext import StorageContext
self._context: StorageContext
self._prefix: Union[bytes, str]
def get(self, key: Union[str, bytes]) -> bytes:
"""
Gets a value from the map based on the given key.
:param key: value identifier in the store
:type key: str or bytes
:return: the value corresponding to given key for current storage context
:rtype: bytes
"""
pass
def put(self, key: Union[str, bytes], value: Union[int, str, bytes]):
"""
Inserts a given value in the key-value format into the map.
:param key: the identifier in the store for the new value
:type key: str or bytes
:param value: value to be stored
:type value: int or str or bytes
"""
pass
def delete(self, key: Union[str, bytes]):
"""
Removes a given key from the map if exists.
:param key: the identifier in the store for the new value
:type key: str or bytes
"""
pass
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | boa3/builtin/interop/storage/storagemap.py | DanPopa46/neo3-boa |
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyAwesomeModel(nn.Module):
def __init__(self, n_classes):
super(MyAwesomeModel, self).__init__()
self.feature_extractor = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=6, kernel_size=4, stride=1),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2),
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=4, stride=1),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2),
nn.Conv2d(in_channels=16, out_channels=120, kernel_size=4, stride=1),
nn.Tanh(),
)
self.classifier = nn.Sequential(
nn.Linear(in_features=120, out_features=84),
nn.Tanh(),
nn.Linear(in_features=84, out_features=n_classes),
)
def forward(self, x, return_features=False):
x = self.feature_extractor(x)
x = torch.flatten(x, 1)
logits = self.classifier(x)
probs = F.log_softmax(logits, dim=1)
if return_features:
return x
else:
return probs
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | src/models/model.py | schibsen/MLops_exercises_organized |
import Algorithmia
# API calls will begin at the apply() method, with the request body passed as 'input'
# For more details, see algorithmia.com/developers/algorithm-development/languages
def apply(input):
return "hello {}".format(str(input))
# Here is an example of an advanced form of an algorithm function,
# which also uses a load function (see the example below)
# -- ADVANCED ALGORITHM USAGE --
# def apply(input, keras_model):
# prediction = keras_model.predict(input)
# result = {"class": prediction[0], "confidence": prediction[1]}
# return result
def load():
# Here you can optionally define a function that will be called when the algorithm is loaded.
# The return object from this function can be passed directly as input to your apply function.
# A great example would be any model files that need to be available to this algorithm
# during runtime.
# Any variables returned here, will be passed as the secondary argument to your 'algorithm' function
# -- USAGE EXAMPLE ---
# client = Algorithmia.client()
# model_file_path = client.file('data://path/to/my/modelFile.hd5).getFile().name
# keras_model = keras.load_model(model_path)
# return keras_model
return None
# This code turns your library code into an algorithm that can run on the platform.
# If you intend to use loading operations, remember to pass a `load` function as a second variable.
algo = Algorithmia.handler(apply, load)
# The 'serve()' function actually starts the algorithm, you can follow along in the source code
# to see how everything works.
algo.serve()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | languages/anaconda3/template/src/Algorithm.py | algorithmiaio/langpacks |
import warnings
from typing import List, Callable, Union
from sharpy.plans.require.methods import merge_to_require
from sharpy.plans.require import RequireBase
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from sharpy.knowledges import Knowledge
class Any(RequireBase):
"""Check passes if any of the conditions are true."""
def __init__(
self,
conditions: Union[RequireBase, Callable[["Knowledge"], bool], List[RequireBase]],
*args: Union[RequireBase, Callable[["Knowledge"], bool]]
):
super().__init__()
is_act = isinstance(conditions, RequireBase) or isinstance(conditions, Callable)
assert conditions is not None and (isinstance(conditions, list) or is_act)
super().__init__()
if is_act:
self.conditions: List[RequireBase] = [merge_to_require(conditions)]
else:
self.conditions: List[RequireBase] = []
for order in conditions:
assert order is not None
self.conditions.append(merge_to_require(order))
for order in args:
assert order is not None
self.conditions.append(merge_to_require(order))
async def start(self, knowledge: "Knowledge"):
await super().start(knowledge)
for condition in self.conditions:
await self.start_component(condition, knowledge)
def check(self) -> bool:
for condition in self.conditions:
if condition.check():
return True
return False
class RequiredAny(Any):
def __init__(
self,
conditions: Union[RequireBase, Callable[["Knowledge"], bool], List[RequireBase]],
*args: Union[RequireBase, Callable[["Knowledge"], bool]]
):
warnings.warn("'RequiredAny' is deprecated, use 'Any' instead", DeprecationWarning, 2)
super().__init__(conditions, *args)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding s... | 3 | sharpy-sc2/sharpy/plans/require/any.py | etzhang416/sharpy-bot-eco |
import sys
sys.path.append('..')
from dread.base import BaseResource
from dread.json import JSONDispatcher
from dread.auth import BasicAuth
from werkzeug.exceptions import NotFound
class User(BaseResource):
PROTECTED_ACTIONS = [
'create', 'update', 'delete'
]
def __init__(self):
self.users = []
super(User, self).__init__(name="users", param_name="user_id")
def on_list(self, params):
return {'data': self.users}
def on_detail(self, params):
try:
return self.users[params['user_id']]
except IndexError:
raise NotFound
def on_create(self, params):
new_user = params['data']
self.users.append(new_user)
return {'data': new_user}
def on_update(self, params):
try:
self.users[params['user_id']] = params['data']
return {'data': self.users[params['user_id']]}
except IndexError:
raise NotFound
def on_delete(self, params):
try:
del self.users[params['user_id']]
return {}
except IndexError:
raise NotFound
class Auth(BasicAuth):
def check_credientials(self, username, password):
return username == 'admin' and password == 'admin'
class Dispatcher(JSONDispatcher):
auth_class = Auth
app = Dispatcher()
app.add_resource(User())
if __name__ == '__main__':
app.run()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | examples/basicauth.py | rugginoso/dread |
from graphql.language.location import SourceLocation as L
from graphql.validation.rules import UniqueInputFieldNames
from .utils import expect_fails_rule, expect_passes_rule
def duplicate_field(name, l1, l2):
return {
'message': UniqueInputFieldNames.duplicate_input_field_message(name),
'locations': [l1, l2]
}
def test_input_object_with_fields():
expect_passes_rule(UniqueInputFieldNames, '''
{
field(arg: { f: true })
}
''')
def test_same_input_object_within_two_args():
expect_passes_rule(UniqueInputFieldNames, '''
{
field(arg1: { f: true }, arg2: { f: true })
}
''')
def test_multiple_input_object_fields():
expect_passes_rule(UniqueInputFieldNames, '''
{
field(arg: { f1: "value", f2: "value", f3: "value" })
}
''')
def test_it_allows_for_nested_input_objects_with_similar_fields():
expect_passes_rule(UniqueInputFieldNames, '''
{
field(arg: {
deep: {
deep: {
id: 1
}
id: 1
}
id: 1
})
}
''')
def test_duplicate_input_object_fields():
expect_fails_rule(UniqueInputFieldNames, '''
{
field(arg: { f1: "value", f1: "value" })
}
''', [
duplicate_field("f1", L(3, 22), L(3, 35))
])
def test_many_duplicate_input_object_fields():
expect_fails_rule(UniqueInputFieldNames, '''
{
field(arg: { f1: "value", f1: "value", f1: "value" })
}
''', [
duplicate_field('f1', L(3, 22), L(3, 35)),
duplicate_field('f1', L(3, 22), L(3, 48))
])
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | graphql/validation/tests/test_unique_input_field_names.py | phil303/graphql-core |
#Import sessions for session handling
import webapp2
from webapp2_extras import sessions
#This is needed to configure the session secret key
#Runs first in the whole application
session_config = {}
session_config['webapp2_extras.sessions'] = {
'secret_key': 'my-super-secret-key-somemorearbitarythingstosay',
}
#Session Handling class, gets the store, dispatches the request
class BaseSessionHandler(webapp2.RequestHandler):
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
# Returns a session using the default cookie key.
return self.session_store.get_session()
#End of BaseSessionHandler Class | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | session_module.py | zapstar/gae-facebook |
#!/usr/bin/env python
import os
import glob
import subprocess
passed = []
failed = []
def execTest(testfile):
cmd = "./glslc %s" % testfile
ret = subprocess.call(cmd, shell=True)
if ret:
return False
return True
def stat():
global passed
global failed
n = len(passed) + len(failed)
print("# of total tests : %d" % n)
print("# of tests passed: %d" % len(passed))
print("# of tests failed: %d" % len(failed))
for f in failed:
print(f)
def main():
test_dir = "test"
for f in glob.glob(os.path.join(test_dir, "*.frag")):
ret = execTest(f)
print("%s %s" % (f, ret))
if ret:
passed.append(f)
else:
failed.append(f)
stat()
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | glsl/test_runner.py | avr-aics-riken/SURFACE |
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Spirent Landslide traffic profile definitions """
from yardstick.network_services.traffic_profile import base
class LandslideProfile(base.TrafficProfile):
"""
This traffic profile handles attributes of Landslide data stream
"""
def __init__(self, tp_config):
super(LandslideProfile, self).__init__(tp_config)
# for backward compatibility support dict and list of dicts
if isinstance(tp_config["dmf_config"], dict):
self.dmf_config = [tp_config["dmf_config"]]
else:
self.dmf_config = tp_config["dmf_config"]
def execute(self, traffic_generator):
pass
def update_dmf(self, options):
if 'dmf' in options:
if isinstance(options['dmf'], dict):
_dmfs = [options['dmf']]
else:
_dmfs = options['dmf']
for index, _dmf in enumerate(_dmfs):
try:
self.dmf_config[index].update(_dmf)
except IndexError:
pass
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | yardstick/network_services/traffic_profile/landslide_profile.py | upfront710/yardstick |
from __future__ import unicode_literals
import json as jsonencode
from datetime import datetime
import pytz
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag(takes_context=True)
def absolute_url(context, obj):
return obj.get_absolute_url(context.get('request', None))
@register.filter
@stringfilter
def underslug(string):
return string.replace(" ", "_").replace("'", "").lower()
@register.filter
def json(obj):
return mark_safe(jsonencode.dumps(obj))
@register.filter(name='timestamptodate')
def timestamptodate(value):
return datetime.fromtimestamp(float(value)/1000, tz=pytz.UTC)
@register.filter(name='reversed')
def reversed_filter(value):
return reversed(value) | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | utils/templatetags/common.py | ntucker/django-common-utils |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_2
from isi_sdk_8_2_2.models.cluster_firmware_status_node import ClusterFirmwareStatusNode # noqa: E501
from isi_sdk_8_2_2.rest import ApiException
class TestClusterFirmwareStatusNode(unittest.TestCase):
"""ClusterFirmwareStatusNode unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testClusterFirmwareStatusNode(self):
"""Test ClusterFirmwareStatusNode"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_2.models.cluster_firmware_status_node.ClusterFirmwareStatusNode() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer":... | 3 | isi_sdk_8_2_2/test/test_cluster_firmware_status_node.py | mohitjain97/isilon_sdk_python |
import psycopg2, psycopg2.extras
import time
import numpy as np
import pandas as pd
from datetime import timedelta, date
def date_range(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def generate_by_month():
conn = psycopg2.connect(**eval(open('auth.txt').read()))
cmd = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
start_date = date(2010, 1, 1)
end_date = date(2010, 7, 1)
prices = []
with open('dt_index.txt', 'w') as f:
#run from start_date to end_date-1 day
for single_date in date_range(start_date, end_date):
cmd.execute('select * from market_index where mid = 1 and dt=%(dt)s',
dict(dt=single_date.strftime("%Y-%m-%d")))
recs = cmd.fetchall()
if recs == []:
continue
df = pd.DataFrame(recs, columns=recs[0].keys())
prices = prices + [df['close'].tolist()]
f.write(single_date.strftime("%Y-%m-%d") + '\n')
# print(prices)
# print(np.shape(prices))
np.save(('price_201001~06.npy'), prices)
def generate_by_txt():
conn = psycopg2.connect(**eval(open('auth.txt').read()))
cmd = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
prices = []
with open('dt_threshold=10.txt', 'r') as f:
dates = f.readlines()
# print(dates)
for index, date in enumerate(dates):
cmd.execute('select * from market_index where mid = 1 and dt=%(dt)s', dict(dt=date))
recs = cmd.fetchall()
if recs == []:
continue
df = pd.DataFrame(recs, columns=recs[0].keys())
prices = prices + [df['close'].tolist()]
# print(prices)
# print(np.shape(prices))
np.save(('price_threshold=10.npy'), prices)
if __name__ == '__main__':
generate_by_month()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",... | 3 | midprice_profit_label/profit_evaluate/data_generate.py | ianpan870102/neural-network-on-finance-data |
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
"""
Built-in value transformers.
"""
import datetime as dt
from typing import Any, Sequence
from datadog_checks.base import AgentCheck
from datadog_checks.base.types import ServiceCheckStatus
from datadog_checks.base.utils.db.utils import normalize_datetime
def length(value):
# type: (Sequence) -> int
return len(value)
def to_time_elapsed(datetime):
# type: (dt.datetime) -> float
datetime = normalize_datetime(datetime)
elapsed = dt.datetime.now(datetime.tzinfo) - datetime
return elapsed.total_seconds()
def ok_warning(value):
# type: (Any) -> ServiceCheckStatus
return AgentCheck.OK if value else AgentCheck.WARNING
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | rethinkdb/datadog_checks/rethinkdb/document_db/transformers.py | remicalixte/integrations-core |
#import ibm_db
#import ibm_db_dbi as db
from typing import Any, Dict, Optional
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.models.connection import Connection
class DB2Hook(DbApiHook):
"""
General hook for DB2 access.
"""
conn_name_attr = 'db2_conn_id'
default_conn_name = 'db2_default'
conn_type = 'DB2'
hook_name = 'DB2 Connection'
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"security_mechanism": StringField(lazy_gettext('Security Mechanism'), widget=BS3TextFieldWidget())
}
@staticmethod
def get_ui_field_behaviour() -> Dict:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['schema', 'extra'],
"relabeling": {'schema': 'database'}
}
def get_conn(self):
conn: Connection = self.get_connection(getattr(self, self.conn_name_attr))
host: str = conn.host
login: str = conn.login
psw: str = conn.password
port: str = conn.port
database: str = conn.schema
security_mechanism: Optional[str] = conn.extra_dejson.get('security_mechanism')
# conn = conn = db.Connection(
# ibm_db.connect(
# f'''
# DATABASE={database};\
# HOSTNAME={host};\
# PORT={port};\
# PROTOCOL=TCPIP;\
# SECURITYMECHANISM={security_mechanism};\
# UID={login};\
# PWD={psw};\
# ''', '', ''))
return conn
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | airflow_provider_db2/hooks/db2_hook.py | fmilagres/airflow-provider-db2 |
import Structures.Polynomial
import Structures.Integers
from Algorithms.HenselLifting import hensel_full_lifting, squarefree_charzero
from Algorithms.Kronecker import full_kronecker
from Algorithms.SortPolynomials import lexicografic_mon
class IntegerPolynomial(Structures.Polynomial.Polynomial):
def __init__(self, F, var, order=lexicografic_mon):
assert issubclass(type(F), Structures.Integers.Integers) \
or issubclass(type(F), Structures.IntegerPolynomial.IntegerPolynomial)
super(IntegerPolynomial, self).__init__(F, var, order)
def kronecker(self, f):
return full_kronecker(f, self)
def hensel_lifting(self, f):
return hensel_full_lifting(f, self)
def square_free_part(self, f):
assert self.get_order() == 0
return squarefree_charzero(f, self)
def symmetric_module(self, a, pl):
import IntegersModuleP
IMP = IntegersModuleP.IntegersModuleP(pl)
al = self.generate_tuple_representation(a)
l = []
plm = pl // 2
for m, col in al:
mc = IMP.canonical(m)
if mc > plm:
mc = mc - pl
l.append((mc, col))
return self.generate_polynomial(l)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | Structures/IntegerPolynomial.py | Galieve/algebra_computacional |
from typing import Dict, Optional
from marshmallow import fields, validate
from tortuga.node.state import ALLOWED_NODE_STATES
from tortuga.types.base import BaseType, BaseTypeSchema
NodeStateValidator = validate.OneOf(
choices=ALLOWED_NODE_STATES,
error="Invalid node state '{input}'; must be one of {choices}"
)
class NodeSchema(BaseTypeSchema):
name = fields.String()
public_hostname = fields.String()
state = fields.String(validate=NodeStateValidator)
hardwareprofile_id = fields.String()
softwareprofile_id = fields.String()
locked = fields.String()
tags = fields.Dict()
last_update = fields.String(dump_only=True)
class Node(BaseType):
schema_class = NodeSchema
type = 'node'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.name: Optional[str] = kwargs.get('name', None)
self.public_hostname: Optional[str] = \
kwargs.get('public_hostname', None)
self.state: Optional[str] = kwargs.get('state', None)
self.hardwareprofile_id: Optional[str] = \
kwargs.get('hardwareprofile_id', None)
self.softwareprofile_id: Optional[str] = \
kwargs.get('softwareprofile_id', None)
self.locked: Optional[str] = kwargs.get('locked', None)
self.tags: Dict[str, str] = kwargs.get('tags', {})
self.last_update: Optional[str] = kwargs.get('last_update', None)
class NodeStatusSchema(BaseTypeSchema):
state = fields.String(validate=NodeStateValidator)
last_update = fields.String(dump_only=True)
class NodeStatus(BaseType):
schema_class = NodeStatusSchema
type = 'node'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.state: Optional[str] = kwargs.get('state', None)
self.last_update: Optional[str] = kwargs.get('last_update', None)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | src/installer/src/tortuga/node/types.py | sutasu/tortuga |
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from playwright.sync_api import Page, Route
from tests.server import Server
def test_should_fetch_original_request_and_fulfill(page: Page, server: Server) -> None:
def handle(route: Route) -> None:
response = page.request.fetch(route.request)
route.fulfill(response=response)
page.route("**/*", handle)
response = page.goto(server.PREFIX + "/title.html")
assert response
assert response.status == 200
assert page.title() == "Woof-Woof"
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?"... | 3 | tests/sync/test_request_fulfill.py | rayshifu/playwright-python |
from modules.lib.reporter import Reporter
from modules.lib.report import Report
from modules.lib.alarm_machine import AlarmMachine
class TemperatureReporter(Reporter):
def data_type(self):
return 'temperature'
def report(self):
with open('/sys/class/thermal/thermal_zone0/temp') as file:
temp = int(file.read()) / float(1000)
report = Report.report_now(
'measurement',
type='temperature',
key='zone_0',
value=temp,
unit='c'
)
alarm = None
if self.alarm_machine() is not None:
alarm = self.alarm_machine().judge('temperature', temp,
report.reported_at)
return report, alarm
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | Armadillo-IoT_GW/modules/reporters/temperature_reporter.py | naomitodori/Azure-IoT-samples |
"""
custom_components.light.test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mock switch platform.
Call init before using it in your tests to ensure clean test data.
"""
from homeassistant.const import STATE_ON, STATE_OFF
from tests.helpers import MockToggleDevice
DEVICES = []
def init(empty=False):
""" (re-)initalizes the platform with devices. """
global DEVICES
DEVICES = [] if empty else [
MockToggleDevice('Ceiling', STATE_ON),
MockToggleDevice('Ceiling', STATE_OFF),
MockToggleDevice(None, STATE_OFF)
]
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Returns mock devices. """
add_devices_callback(DEVICES)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tests/config/custom_components/light/test.py | hemantsangwan/home-assistant |
"""
The MIT License (MIT)
Copyright (c) 2015 <Satyajit Sarangi>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
def foo(a, b):
return a + b
def main():
foo(4, 5)
main() | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?... | 3 | tests/first.py | ssarangi/python_type_inference |
import numpy as np
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
### --- Gather Dataset --- ###
n = 100
X, y = make_blobs(n_samples = n, centers = 2)
y = y[:, np.newaxis]
### --- Build Model --- ###
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
class LogisticRegression:
def predict(self, X, w, b):
return sigmoid(X.dot(w) + b)
def loss(self, pred, y):
BCE = (y * np.log(pred + 1e-6) + (1 - y) * np.log(1 - pred + 1e-6))
return -np.sum(BCE) * (1.0 / self.m)
def fit(self, X, y):
eta = 0.01
epochs = 5000
self.m, self.n = X.shape
self.weights = np.random.uniform(-1, 1, (self.n, 1))
self.bias = np.random.uniform(-1, 1, (1, 1))
for i in range(epochs):
predicted = self.predict(X, self.weights, self.bias)
if i % 100 == 0: print("Loss on step {} is: {}".format(i, self.loss(predicted, y)))
self.weights = self.weights - eta * X.T.dot(predicted - y)
self.bias = self.bias - eta * np.sum(predicted - y)
### --- Instantiate Model --- ###
model = LogisticRegression()
model.fit(X, y)
weight0, weight1 = model.weights
bias = model.bias[0][0]
### --- Plot --- ###
for i in range(n):
if (y[i] == 1):
plt.scatter(X[:, 0][i], X[:, 1][i], color="green")
else:
plt.scatter(X[:, 0][i], X[:, 1][i], color="blue")
x = np.linspace(-5, 5, 5)
hyperplane = (-(weight0 / weight1) * x) - (bias / weight1)
plt.suptitle("Logistic Regression")
plt.plot(x, hyperplane, '-', color = "red")
plt.show()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | logistic_regression/logistic_regression.py | ryanirl/ml-basics |
# -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
"""
from tensorflow.python.keras.layers import Layer, Concatenate
class NoMask(Layer):
def __init__(self, **kwargs):
super(NoMask, self).__init__(**kwargs)
def build(self, input_shape):
# Be sure to call this somewhere!
super(NoMask, self).build(input_shape)
def call(self, x, mask=None, **kwargs):
return x
def compute_mask(self, inputs, mask):
return None
def concat_fun(inputs, axis=-1):
if len(inputs) == 1:
return inputs[0]
else:
return Concatenate(axis=axis)(inputs)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | deepctr/layers/utils.py | osljw/keras_tf |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cascade User deletion to GPG keys
Revision ID: 8fd3400c760f
Revises: c0302a8a0878
Create Date: 2018-03-09 23:27:06.222073
"""
from alembic import op
revision = '8fd3400c760f'
down_revision = 'c0302a8a0878'
def upgrade():
op.drop_constraint(
'accounts_gpgkey_user_id_fkey', 'accounts_gpgkey', type_='foreignkey'
)
op.create_foreign_key(
'accounts_gpgkey_user_id_fkey',
'accounts_gpgkey',
'accounts_user', ['user_id'], ['id'],
ondelete='CASCADE',
initially='DEFERRED',
deferrable=True
)
def downgrade():
op.drop_constraint(
'accounts_gpgkey_user_id_fkey', 'accounts_gpgkey', type_='foreignkey'
)
op.create_foreign_key(
'accounts_gpgkey_user_id_fkey',
'accounts_gpgkey',
'accounts_user', ['user_id'], ['id'],
initially='DEFERRED',
deferrable=True
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | warehouse/migrations/versions/8fd3400c760f_cascade_user_deletion_to_gpg_keys.py | matt-land/warehouse |
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from oslo_config import cfg
from st2common import config as common_config
from st2common.constants.system import VERSION_STRING
from st2common.constants.system import DEFAULT_CONFIG_FILE_PATH
common_config.register_opts()
CONF = cfg.CONF
def parse_args(args=None):
cfg.CONF(args=args, version=VERSION_STRING,
default_config_files=[DEFAULT_CONFIG_FILE_PATH])
def register_opts():
_register_common_opts()
_register_results_tracker_opts()
def get_logging_config_path():
return cfg.CONF.resultstracker.logging
def _register_common_opts():
common_config.register_opts()
def _register_results_tracker_opts():
resultstracker_opts = [
cfg.StrOpt(
'logging', default='/etc/st2/logging.resultstracker.conf',
help='Location of the logging configuration file.')
]
CONF.register_opts(resultstracker_opts, group='resultstracker')
register_opts()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | st2actions/st2actions/resultstracker/config.py | saucetray/st2 |
import unittest
from unittest.mock import MagicMock
from scraper.scraper import get_table_with_data, row_not_loaded, \
reload_table_rows, get_coin_name, get_coin_symbol, \
get_coin_price, get_coin_change24h, get_coin_change7d, \
get_coin_market_cap, get_coin_volume24h, get_coin_circulating_supply
class TestStringMethods(unittest.TestCase):
def test_get_table_with_data_raises_error(self):
self.assertRaises(AttributeError, get_table_with_data, "")
def test_row_not_loaded_true(self):
def has_attr(arg):
return True
row_mock = MagicMock()
row_mock.has_attr = has_attr
self.assertTrue(row_not_loaded(row_mock))
def test_row_not_loaded_false(self):
def has_attr(arg):
return False
row_mock = MagicMock()
row_mock.has_attr = has_attr
self.assertFalse(row_not_loaded(row_mock))
def test_reload_table_rows_raises_error(self):
driver_mock = MagicMock(page_source="")
self.assertRaises(AttributeError, reload_table_rows, driver_mock)
def test_get_coin_name_result_none(self):
self.assertIsNone(get_coin_name([]))
def test_get_coin_symbol_result_none(self):
self.assertIsNone(get_coin_symbol([]))
def test_get_coin_price_result_none(self):
self.assertIsNone(get_coin_price([]))
def test_get_coin_change24h_result_none(self):
self.assertIsNone(get_coin_change24h([]))
def test_get_coin_change7d_result_none(self):
self.assertIsNone(get_coin_change7d([]))
def test_get_coin_market_cap_result_none(self):
self.assertIsNone(get_coin_market_cap([]))
def test_get_coin_volume24h_result_none(self):
self.assertIsNone(get_coin_volume24h([]))
def test_get_coin_circulating_supply_result_none(self):
self.assertIsNone(get_coin_circulating_supply([]))
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | tests/test.py | alexd-conf/coinmarketcap-scraper |
from sympy import Symbol, I
from qnet.algebra.core.operator_algebra import (
LocalSigma, rewrite_with_operator_pm_cc, OperatorPlusMinusCC)
from qnet.algebra.library.fock_operators import Destroy, Create
from qnet.algebra.core.hilbert_space_algebra import LocalSpace
from qnet.printing import srepr
def test_simple_cc():
"""Test that we can find complex conjugates in a sum directly"""
hs_c = LocalSpace('c', dimension=3)
hs_q = LocalSpace('q1', basis=('g', 'e'))
Delta_1 = Symbol('Delta_1')
Omega_1 = Symbol('Omega_1')
g_1 = Symbol('g_1')
a = Destroy(hs=hs_c)
a_dag = Create(hs=hs_c)
sig_p = LocalSigma('e', 'g', hs=hs_q)
sig_m = LocalSigma('g', 'e', hs=hs_q)
coeff = (-I / 2) * (Omega_1 * g_1 / Delta_1)
jc_expr = coeff * (a * sig_p - a_dag * sig_m)
simplified = rewrite_with_operator_pm_cc(jc_expr)
assert simplified == coeff * OperatorPlusMinusCC(a * sig_p, sign=-1)
assert (srepr(simplified.term) ==
"OperatorPlusMinusCC(OperatorTimes(Destroy(hs=LocalSpace('c', "
"dimension=3)), LocalSigma('e', 'g', hs=LocalSpace('q1', "
"basis=('g', 'e')))), sign=-1)")
expanded = simplified.doit()
assert expanded == jc_expr
def test_scalar_coeff_cc():
"""Test that we can find complex conjugates in a sum of
ScalarTimesOperator"""
hs_1 = LocalSpace('q1', basis=('g', 'e'))
hs_2 = LocalSpace('q2', basis=('g', 'e'))
kappa = Symbol('kappa', real=True)
a1 = Destroy(hs=hs_1)
a2 = Destroy(hs=hs_2)
jc_expr = I/2 * (2*kappa * (a1.dag() * a2) - 2*kappa * (a1 * a2.dag()))
simplified = rewrite_with_operator_pm_cc(jc_expr)
assert (
simplified == I * kappa * OperatorPlusMinusCC(a1.dag() * a2, sign=-1))
expanded = simplified.doit()
assert expanded == I * kappa * (a1.dag() * a2 - a1 * a2.dag())
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tests/algebra/test_operator_plus_minus_cc.py | amitkumarj441/QNET |
import numpy as np
from .SudokuSquare import SudokuSquare
from .SudokuLine import SudokuLine
def build_squares(matrix, symbols, dimension=3):
"""Split the matrix into a (dim x dim) list of SudokuSquare"""
rows = []
for row in range(dimension):
cols = []
row_index = row * dimension
row_slice = slice(row_index, row_index + dimension)
for col in range(dimension):
col_index = col * dimension
col_slice = slice(col_index, col_index + dimension)
square = matrix[row_slice, col_slice]
cols.append(SudokuSquare(square, symbols))
rows.append(cols)
return rows
def build_rows(matrix, symbols, dimension=3):
rows = []
for row in range(matrix.shape[0]):
rows.append(SudokuLine(matrix[row], symbols))
return rows
def build_columns(matrix, symbols, dimension=3):
cols = []
for col in range(matrix.shape[1]):
cols.append(SudokuLine(matrix[:,col], symbols))
return cols
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | src/sudoku/builders.py | ognibit/sudoku-solver |
from datetime import datetime, timedelta
import jwt
key = "#KEY_TO_BE_REPLACED#"
def handler(event, context):
if event.get('Records') is not None:
return process_cf_request(event)
return generate_token(event)
def generate_token(event):
uri = event['uri']
if uri[0] is not '/':
uri = '/' + uri
payload = {
'uri': uri,
'iat': datetime.now(),
'exp': datetime.now() + timedelta(minutes=event.get("minutes", 30))
}
return jwt.encode(payload, key, algorithm='HS256')
def process_cf_request(event):
request = event["Records"][0]["cf"]["request"]
try:
# The uri will start with a "/" that is not part of the token that
# we are trying to decode
decoded = jwt.decode(request["uri"][1:], key, algorithms=["HS256"])
request["uri"] = decoded["uri"]
return request
except jwt.PyJWTError:
return {
'status': "401",
'statusDescription': "Unauthorized",
'body': "Unauthorized",
'bodyEncoding': "text"
}
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | lambda/main.py | helionagamachi/S3Share |
import time
from PyQt5 import QtGui, QtCore
from ui.room_item import Ui_Form
from PyQt5.QtWidgets import QWidget
class Room_Item(QWidget,Ui_Form):
def __init__(self,parent=None,room_data=None):
super(Room_Item,self).__init__(parent)
self.setupUi(self)
self.data = room_data
self.setRoomInfo()
def setRoomInfo(self):
self.room_name.setText('{}({})'.format(self.data['naturalName'], self.data['roomName']))
self.description.setText("<a style='color:#BCBCBC'>{}</a>".format(self.data['description']))
timeStamp = int(self.data['creationDate']) / 1000
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d", timeArray)
self.create_time.setText("<a style='color:#BCBCBC'>{}</a>".format(otherStyleTime))
members = len(self.data['owners']) + len(self.data['admins']) + len(self.data['members'])
memberCounter = "<a style='color:#BCBCBC'>{}/{}</a>".format(members, ('∞' if self.data['maxUsers']==0 else self.data['maxUsers']))
self.member.setText(memberCounter) | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
... | 3 | TestProject/app/view/RoomItem.py | ChinSing00/ChatChat |
# Copyright 2015 Ciara Kamahele-Sanfratello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Planner is a generic interface used by Simulators to choose the next action to take
class Planner:
def __init__(self):
pass
def next_action(self, initial_state, goal_state, prev_obs):
pass
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | simulator/Planners/Planner.py | ciarakamahele/sasy |
import insightconnect_plugin_runtime
from .schema import GetDeviceSoftwareInput, GetDeviceSoftwareOutput, Input, Output, Component
# Custom imports below
class GetDeviceSoftware(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_device_software",
description=Component.DESCRIPTION,
input=GetDeviceSoftwareInput(),
output=GetDeviceSoftwareOutput(),
)
def run(self, params={}):
device_software = self.connection.automox_api.get_device_software(
params.get(Input.ORG_ID), params.get(Input.DEVICE_ID)
)
return {Output.SOFTWARE: device_software}
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | plugins/automox/icon_automox/actions/get_device_software/action.py | lukaszlaszuk/insightconnect-plugins |
import os
import tensorflow as tf
from app.storage_service import weights_filepath, dictionaries_dirpath
def test_local_storage():
local_filepaths = [
weights_filepath("local"),
os.path.join(dictionaries_dirpath("local"), "dic.txt"),
os.path.join(dictionaries_dirpath("local"), "dic_s.txt"),
]
for filepath in local_filepaths:
assert os.path.isfile(filepath)
def test_remote_storage():
remote_filepaths = [
weights_filepath("remote"),
os.path.join(dictionaries_dirpath("remote"), "dic.txt"),
os.path.join(dictionaries_dirpath("remote"), "dic_s.txt"),
]
for filepath in remote_filepaths:
assert tf.io.gfile.exists(filepath)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | test/storage_service_test.py | zaman-lab/brexitmeter-py |
from neomodel import config, StructuredNode, StringProperty, install_all_labels, install_labels
from neomodel.core import get_database_from_cls
db = get_database_from_cls(None)
config.AUTO_INSTALL_LABELS = False
class NoConstraintsSetup(StructuredNode):
name = StringProperty(unique_index=True)
class TestAbstractNode(StructuredNode):
__abstract_node__ = True
name = StringProperty(unique_index=True)
config.AUTO_INSTALL_LABELS = True
def test_labels_were_not_installed():
bob = NoConstraintsSetup(name='bob').save()
bob2 = NoConstraintsSetup(name='bob').save()
assert bob.id != bob2.id
for n in NoConstraintsSetup.nodes.all():
n.delete()
def test_install_all():
install_labels(TestAbstractNode)
# run install all labels
install_all_labels()
assert True
# remove constraint for above test
db.cypher_query("DROP CONSTRAINT on (n:NoConstraintsSetup) ASSERT n.name IS UNIQUE")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false... | 3 | test/test_label_install.py | moengage/neomodel |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh.document import Document
from bokeh.io.state import curstate
# Module under test
import bokeh.io.doc as bid # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_curdoc_from_curstate() -> None:
assert bid.curdoc() is curstate().document
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_set_curdoc_sets_curstate() -> None:
d = Document()
bid.set_curdoc(d)
assert curstate().document is d
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?"... | 3 | tests/unit/bokeh/io/test_doc.py | brendancol/bokeh |
from flask import request, jsonify
from jdxapi.utils.logger_resource import LoggerResource
from jdxapi.app import api, DB
from jdxapi.models import Pipeline
from jdxapi.utils.functions import RequestHandler, ResponseHandler
from jdxapi.utils.error import ApiError
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
import datetime
import jdxapi.utils.constants as c
from jdxapi.services.competensor import get_preview
@api.resource("/preview")
class Preview(LoggerResource):
def post(self):
req = request.get_json()
pipeline_id = RequestHandler.get_pipeline_id(req, True)
_ = Pipeline.get_pipeline_from_id(pipeline_id)
preview_data = get_preview(pipeline_id)
resp_data = self.create_response_data(pipeline_id, preview_data, req)
response = ResponseHandler.create_response(resp_data, 200)
return response
def create_response_data(self, pipeline_id, preview_data, req):
resp_data = {
c.PIPELINE_ID: str(pipeline_id),
c.TIMESTAMP: str(datetime.datetime.now()),
"preview": preview_data
}
return resp_data
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | jdxapi/routes/preview.py | jobdataexchange/jdx-api |
"""Rename action._type to action.type_
Revision ID: 45024170cf6
Revises: 337978f8c75
Create Date: 2014-06-18 14:21:37.202030
"""
# revision identifiers, used by Alembic.
revision = '45024170cf6'
down_revision = '337978f8c75'
from alembic import op
import sqlalchemy as sa
from evesrp.models import ActionType
def upgrade():
op.alter_column('action',
column_name='_type',
new_column_name='type_',
existing_type=ActionType.db_type,
existing_nullable=False)
def downgrade():
op.alter_column('action',
column_name='type_',
new_column_name='_type',
existing_type=ActionType.db_type,
existing_nullable=False)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | src/evesrp/migrate/versions/45024170cf6_rename_action__type_to_action_type_.py | paxswill/evesrp |
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from packaging.version import InvalidVersion, Version
from internal_backend.utilities.register import PantsReleases
def _branch_name(revision_str: str) -> str:
return PantsReleases._branch_name(Version(revision_str))
def test_branch_name_master() -> None:
assert "1.1.x" == _branch_name("1.1.0-dev1")
assert "1.1.x" == _branch_name("1.1.0dev1")
assert "1.1.x" == _branch_name("1.1.0.dev1")
def test_branch_name_stable() -> None:
assert "1.1.x" == _branch_name("1.1.0-rc1")
assert "1.1.x" == _branch_name("1.1.0rc1")
assert "2.1.x" == _branch_name("2.1.0")
assert "1.2.x" == _branch_name("1.2.0rc0-12345")
# A negative example: do not prepend `<number>.`, because # the first two numbers will be taken
# as branch name.
assert "12345.1.x" == _branch_name("12345.1.2.0rc0")
def test_invalid_test_branch_name_stable_append_alphabet():
with pytest.raises(InvalidVersion):
_branch_name("1.2.0rc0-abcd")
def test_invalid_test_branch_name_stable_prepend_numbers():
with pytest.raises(InvalidVersion):
_branch_name("12345-1.2.0rc0")
def test_branch_name_unknown_suffix():
with pytest.raises(ValueError):
_branch_name("1.1.0-anything1")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | pants-plugins/src/python/internal_backend/utilities/releases_test.py | viktortnk/pants |
'''
AM2315 Temp/Humidity Sensor Driver (using Silta bridge)
Based on the Adafruite Arduino driver
https://github.com/adafruit/Adafruit_AM2315
'''
import time
import tca9548a
AM2315_ADDR = 0xB8
class AM2315:
def __init__(self, bridge, mux_channel=3):
self.bridge = bridge
self.mux = tca9548a.TCA9548A(bridge)
self.mux_channel = mux_channel
def read(self):
self.mux.set_channel(self.mux_channel)
# Make sure we're running at 100kHz
self.bridge.i2c_speed(100000)
# 'wake up' the sensor (it clock stretches so transaction fails)
self.bridge.i2c(AM2315_ADDR, 0, [0])
# Read(cmd 0x03) 4 bytes from address 0x00
self.bridge.i2c(AM2315_ADDR, 0, [0x03, 0x00, 4])
# Wait ~10ms for measurement
time.sleep(0.01)
# Get measurement data
reply = self.bridge.i2c(AM2315_ADDR, 8, [])
if not isinstance(reply, list) or (reply[1] != 4):
raise IOError('Invalid AM2315 response')
# Convert bytes to humidity
humidity = ((reply[2] << 8) + reply[3])/10.0
# Convert bytes to temperature
temperature = (((reply[4] & 0x7F) << 8) + reply[5])/10.0
if (reply[4] >> 7) == 1:
temperature = -temperature
return humidity, temperature
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false... | 3 | sw/examples/drivers/am2315.py | nilkemorya/silta |
def find_common_left(strings):
"""
:param list[str] strings: list of strings we want to find a common left part in
:rtype: str
"""
length = min([len(s) for s in strings])
result = ''
for i in range(length):
if all([strings[0][i] == s[i] for s in strings[1:]]):
result += strings[0][i]
else:
break
return result
def find_common_right(strings):
reversed_string = [s[::-1] for s in strings]
return find_common_left(reversed_string)[::-1]
def find_common(strings, side='left'):
"""
:param list[str] strings: list of strings we want to find a common left part in
:type side: str
:rtype: str
"""
if side[0].lower() == 'l':
return find_common_left(strings=strings)
else:
return find_common_right(strings=strings)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | cyberspace/find_common.py | idin/cyberspace |
import io
import jax
import requests
import PIL
from PIL import ImageOps
import numpy as np
import jax.numpy as jnp
from dall_e_jax import get_encoder, get_decoder, map_pixels, unmap_pixels
target_image_size = 256
def download_image(url):
resp = requests.get(url)
resp.raise_for_status()
return PIL.Image.open(io.BytesIO(resp.content))
def preprocess(img):
img = ImageOps.fit(img, [target_image_size,] * 2, method=0, bleed=0.0, centering=(0.5, 0.5))
img = np.expand_dims(np.transpose(np.array(img).astype(np.float32)/255, (2, 0, 1)), 0)
return map_pixels(img)
jax_enc_fn, jax_enc_params = get_encoder("encoder.pkl")
jax_dec_fn, jax_dec_params = get_decoder("decoder.pkl")
x = preprocess(download_image('https://assets.bwbx.io/images/users/iqjWHBFdfxIU/iKIWgaiJUtss/v2/1000x-1.jpg'))
z_logits = jax_enc_fn(jax_enc_params, x)
z = jnp.argmax(z_logits, axis=1)
z = jnp.transpose(jax.nn.one_hot(z, num_classes=8192), (0, 3, 1, 2))
x_stats = jax_dec_fn(jax_dec_params, z)
x_rec = unmap_pixels(jax.nn.sigmoid(x_stats[:, :3]))
x_rec = np.transpose((np.array(x_rec[0]) * 255).astype(np.uint8), (1, 2, 0))
PIL.Image.fromarray(x_rec).save('reconstructed.png')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | examples/pure_jax.py | kingoflolz/DALL-E |
from os import getenv
import tenacity
from flask import Flask
from py_eureka_client import eureka_client
def get_db_path():
return '{}+{}://{}:{}@{}:{}/{}'.format(
getenv('DB_DIALECT'),
getenv('DB_DRIVER'),
getenv('DB_USERNAME'),
getenv('DB_PASSWORD'),
getenv('DB_HOST'),
getenv('DB_PORT'),
getenv('DB_NAME')
)
def get_eureka_path():
return 'http://{}:{}/eureka/'.format(
getenv('EUREKA_HOST'),
getenv('EUREKA_PORT')
)
class BaseConfig(object):
DATABASE_SERVER = get_db_path()
EUREKA_SERVER = get_eureka_path()
FLASK_RUN_PORT = int(getenv('FLASK_RUN_PORT', '5000'))
def create_app(bp, app_name, init_schema_func, config=BaseConfig, tenacity_wait=30):
app = Flask(__name__)
app.register_blueprint(bp)
app.config.from_object(config)
app.url_map.strict_slashes = True
@tenacity.retry(wait=tenacity.wait_fixed(tenacity_wait))
def _init_service_discovery():
eureka_client.init(eureka_server=config.EUREKA_SERVER,
app_name=app_name,
instance_port=config.FLASK_RUN_PORT)
init_schema_func()
_init_service_discovery()
return app
def create_test_app(bp):
app = Flask(__name__)
app.register_blueprint(bp)
app.config['TESTING'] = True
return app
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
... | 3 | timesheet_utils/base.py | MR6996/timesheet-utils |
import sys
import subprocess
from unittest import TestCase
from unittest.mock import patch
diffview = sys.modules["DiffView"]
BzrHelper = diffview.util.vcs.BzrHelper
class test_BzrHelper(TestCase):
def setUp(self):
self.dummy_process = DummyProcess()
def test_init(self):
bzr_helper = BzrHelper('/repo/base')
self.assertFalse(bzr_helper.got_changed_files)
@patch('subprocess.Popen')
def test_file_versions(self, mocked_Popen):
bzr_helper = BzrHelper('/repo/base')
self.assertEquals(
bzr_helper.get_file_versions(''),
('last:1', ''))
self.assertEquals(
bzr_helper.get_file_versions('branch_name'),
('branch_name', ''))
self.assertEquals(
bzr_helper.get_file_versions('branch_name..'),
('branch_name', ''))
self.assertEquals(
bzr_helper.get_file_versions('branch_name..other_branch_name'),
('branch_name', 'other_branch_name'))
self.assertEquals(
bzr_helper.get_file_versions('..other_branch_name'),
('', 'other_branch_name'))
class DummyProcess(object):
"""Dummy process to return values from `communicate()`.
Set `ret_vals` to use.
"""
def communicate(self, *args, **kwargs):
return self.ret_vals.pop(0)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer":... | 3 | tests/test_bzr_helper.py | rkoval/SublimeDiffView |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def cylinder():
radius = int(input('Введите радиус цилиндра: '))
height = int(input('Введите высоту цилиндра: '))
def circle():
print('Площадь полной поверхности цилиндра: ',
2 * 3.14 * radius * height + 2 * 3.14 * radius ** 2)
print('Какую площадь нужно получить?')
print('Площадь боковой поверхности? - 1')
print('Площадь полной поверхности цилиндра? - 2')
message = input('>>> ')
if message.lower() == '1':
print('Площадь боковой повехности: ', 2 * 3.14 * radius * height)
elif message.lower() == '2':
circle()
else:
print('Неизвестная команда')
if __name__ == '__main__':
cylinder()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"... | 3 | tasks/z2.py | nbobrov8/laba10 |
# -*- coding: utf-8 -*-
## @package pycv_tutorial.color_space
#
# 画像処理: 色空間の変換
# @author tody
# @date 2016/06/27
import cv2
import matplotlib.pyplot as plt
# RGB画像の表示
def showImageRGB(image_file):
image_bgr = cv2.imread(image_file)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
plt.title('RGB')
plt.imshow(image_rgb)
plt.axis('off')
plt.show()
# グレースケール画像の表示
def showImageGray(image_file):
image_gray = cv2.imread(image_file, 0)
plt.title('Gray')
plt.gray()
plt.imshow(image_gray)
plt.axis('off')
plt.show()
# HSVチャンネルの表示
def showImageHSV(image_file):
image_bgr = cv2.imread(image_file)
image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
H = image_hsv[:, :, 0]
S = image_hsv[:, :, 1]
V = image_hsv[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('Hue')
plt.gray()
plt.imshow(H)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('Saturation')
plt.gray()
plt.imshow(S)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Value')
plt.gray()
plt.imshow(V)
plt.axis('off')
plt.show()
# Labチャンネルの表示
def showImageLab(image_file):
image_bgr = cv2.imread(image_file)
image_Lab = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2LAB)
L = image_Lab[:, :, 0]
a = image_Lab[:, :, 1]
b = image_Lab[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('L')
plt.gray()
plt.imshow(L)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('a')
plt.gray()
plt.imshow(a)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('b')
plt.gray()
plt.imshow(b)
plt.axis('off')
plt.show()
if __name__ == '__main__':
image_file = "images/peppers.png"
showImageRGB(image_file)
showImageGray(image_file)
showImageHSV(image_file)
showImageLab(image_file) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding s... | 3 | opencv/pycv_tutorial/color_space.py | OYukiya/PyIntroduction |
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import Http404
class StaffRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(StaffRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if request.user.is_staff:
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
else:
raise Http404
class LoginRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(LoginRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | src/products/mixins.py | bopopescu/django-estore |
"""
github3.gists.comment
---------------------
Module containing the logic for a GistComment
"""
from github3.models import BaseComment
from github3.users import User
class GistComment(BaseComment):
"""This object represents a comment on a gist.
Two comment instances can be checked like so::
c1 == c2
c1 != c2
And is equivalent to::
c1.id == c2.id
c1.id != c2.id
See also: http://developer.github.com/v3/gists/comments/
"""
def __init__(self, comment, session=None):
super(GistComment, self).__init__(comment, session)
#: :class:`User <github3.users.User>` who made the comment
#: Unless it is not associated with an account
self.user = None
if comment.get('user'):
self.user = User(comment.get('user'), self) # (No coverage)
def __repr__(self):
return '<Gist Comment [{0}]>'.format(self.user.login)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"a... | 3 | github3/gists/comment.py | kbakba/github3.py |
import numpy as np
import pytest
import pyswallow as ps
import pyswallow.handlers.boundary_handler as psbh
class TestMOSwallow:
@pytest.fixture
def swallow(self):
bounds = {
'x0': [-50.0, 50.0],
'x1': [-50.0, 50.0]
}
swallow = ps.MOSwallow(bounds, n_obj=2)
return swallow
@pytest.fixture
def opp_swallow(self):
bounds = {
'x0': [-50.0, 50.0],
'x1': [-50.0, 50.0]
}
opp_swallow = ps.MOSwallow(bounds, n_obj=2)
return opp_swallow
def test_move(self, swallow):
swallow.position = np.array([0.0, 0.0])
swallow.velocity = np.array([10.0, 10.0])
bh = psbh.StandardBH()
swallow.move(bh)
assert np.array_equal(swallow.position, swallow.velocity)
def test_dominate(self, swallow, opp_swallow):
opp_swallow.fitness = [50.0, 50.0]
swallow.fitness = [5.0, 5.0]
ret_bool = swallow.dominate(opp_swallow)
assert ret_bool
def test_self_dominate(self, swallow):
swallow.fitness = [5.0, 5.0]
swallow.pbest_fitness = [50.0, 50.0]
ret_bool = swallow.self_dominate()
assert ret_bool
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | tests/swallows/test_mo_swallow.py | danielkelshaw/PySwallow |
import sentencepiece as spm
s = spm.SentencePieceProcessor('data/jpa_wiki_100000.model')
#file1 = open('jyp_train.txt', 'r')
#Lines = file1.readlines()
# for i in Lines:
# print(i)
path_input_eng = 'data/eng.txt'
path_output_eng = 'data/eng_train_1000.txt'
path_input_jyp = 'data/jyp.txt'
path_output_jyp = 'data/jyp_train_1000.txt'
def tokenize(sequence):
test = s.Encode(sequence, out_type= str , enable_sampling= True , alpha = 0.01, nbest_size = 2)
sequence_token = ''
for j in range(0,len(test)):
test_clean = test[j]
test_clean = test_clean.replace('▁','')
if test_clean != '':
sequence_token = sequence_token + ' ' + test_clean
else:
continue
return sequence_token
def save_file(path_input, path_output, check_token = False, number_line = 1000):
open_file = open(path_input, 'r')
save_file = open(path_output, 'w')
for i in range(number_line):
line = open_file.readline()
if check_token == True:
line = tokenize(line)
save_file.write(line)
#print(tokenize('慈悲あまねく慈愛深きアッラーの御名において。'))
save_file(path_input_eng, path_output_eng, number_line= 100)
save_file(path_input_jyp, path_output_jyp,check_token = True, number_line= 100)
#def | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | tokenize/tokenize.py | khoauit99/open-NMT-test |
"""
Swaprs token1, for token2 in the sentence
token2 should belong in sentence for this to work
"""
def swap(token1, token2, sentence):
index = token2.idx
length = len(token2.text)
if index == 0:
prepend = ''
else:
prepend = sentence[:(index)]
append = sentence[(index + length):]
return prepend + token1.text + append
def remove(token, sentence):
return remove(token.idx, token.idx + len(token.text), sentence)
"""
Removes all characters
from start to end in sentence
"""
def remove(start, end, sentence):
append = sentence[end:(len(sentence))]
prepend = ''
if start > 0:
prepend = sentence[:(start)]
return prepend + append | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | src/utilities/string_functions.py | seadavis/StoryNode |
from __future__ import print_function
import subprocess
from distutils.command.build import build as distutils_build #pylint: disable=no-name-in-module
from setuptools import setup, find_packages, Command as SetupToolsCommand
VERSION = '0.1.dev0'
with open('requirements.txt', 'r') as f:
install_requires = f.readlines()
CUSTOM_COMMANDS = [
# TODO(jlewi): python -m is complaining that module spacy not found even
# though it should be installed due to requirements. Reinstalling
# it using a custom command appears to fix the problem.
['pip', 'install', 'spacy'],
['python', '-m', 'spacy', 'download', 'en'],
# TODO(sanyamkapoor): This isn't ideal but no other way for a seamless install right now.
['pip', 'install', 'https://github.com/kubeflow/batch-predict/tarball/master']
]
class Build(distutils_build):
sub_commands = distutils_build.sub_commands + [('CustomCommands', None)]
class CustomCommands(SetupToolsCommand):
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def run_custom_command(command_list):
print('Running command: %s' % command_list)
p = subprocess.Popen(command_list, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_data, _ = p.communicate()
print('Command output: %s' % stdout_data)
if p.returncode != 0:
raise RuntimeError('Command %s failed: exit code: %s' % (command_list, p.returncode))
def run(self):
for command in CUSTOM_COMMANDS:
self.run_custom_command(command)
setup(name='code-search',
description='Kubeflow Code Search Demo',
url='https://www.github.com/kubeflow/examples',
author='Google',
author_email='sanyamkapoor@google.com',
version=VERSION,
license='MIT',
packages=find_packages(),
install_requires=install_requires,
extras_require={},
cmdclass={
'build': Build,
'CustomCommands': CustomCommands,
})
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | code_search/src/setup.py | dimara/kubeflow-examples |
from numericalmethods.ode import euler_explicit, euler_implicit, rk4
def test_euler_explicit():
ans = [1.0, 1.1, 1.22, 1.3620, 1.5282, 1.7210, 1.9431, 2.1974, 2.4872, 2.8159]
assert [round(sol, 4) for sol in euler_explicit(f=lambda x, y: x+y, y0=1, t0=0, t=1, h=0.1)][:-1] == ans
# TODO: check why this fails (wrong numerical derivative)
# def test_euler_implicit_without_dependance_on_y():
# ans = [0, 1, 4, 9, 17, 28]
# assert list(euler_implicit(f=lambda y, t: (1 + t**3)**(1/2), y0=0, t0=0, t=5, h=1)) == ans
def test_runge_kutta4():
ans = [1.0, 1.11034167, 1.24280514, 1.39971699, 1.58364848, 1.79744128,
2.04423592, 2.32750325, 2.65107913, 3.01920283, 3.43655949]
assert [round(i, 4) for i in rk4(lambda y, t : y + t, y0=1, t0=0, t=1, h=0.1)] == [round(i, 4) for i in ans]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | tests/test_ode.py | LuisGMM/CharliePY |
from devices import network_devices
from napalm import get_network_driver
from pprint import pprint
def open_napalm_connection(device):
"""Funtion to open napalm connection and return connection object"""
# Copy dictionary to ensure original object is not modified
device=device.copy()
# Pop "platform" as this is an invalid kwarg to napalm
platform = device.pop('platform')
driver = get_network_driver(platform)
conn = driver(**device)
conn.open()
return(conn)
def main():
connections = []
for device in network_devices:
conn = open_napalm_connection(device)
connections.append(conn)
print ("\n\n")
print("Print facts for all devices in connections list")
print("-" * 20)
for conn in connections:
print()
print("-" * 6)
print(conn)
pprint("{} facts:".format(conn.platform))
pprint(conn.get_facts())
print("-" * 6)
# Close the NAPALM connection
conn.close()
print("\n\n")
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | day3/linting/exercise1.py | austind/pyplus-ons |
import os
from wpkit.basic import PowerDirPath
pkg_dir=PowerDirPath(os.path.dirname(__file__))
pkg_data_dir=pkg_dir+'/data'
pkg_scripts_dir=pkg_data_dir+'/shell_scripts'
pkg_documents_dir=pkg_data_dir+'/documents'
def is_linux():
import sys
pf=sys.platform
if pf=='linux':
return True
return False
def is_windows():
import sys
pf=sys.platform
if pf=='win32':
return True
return False
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | wpkit/pkg_info.py | Peiiii/wpkit |
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import unittest
from assemble_workflow.bundle_url_location import BundleUrlLocation
class TestBundleUrlLocation(unittest.TestCase):
def test_opensearch(self) -> None:
location = BundleUrlLocation("https://ci.opensearch.org/ci/dbc/bundle-build/1.3.0/1318/linux/x64", "opensearch", "tar")
self.assertEqual(
location.get_bundle_location("sql"),
"https://ci.opensearch.org/ci/dbc/bundle-build/1.3.0/1318/linux/x64/tar/dist/opensearch/sql"
)
self.assertEqual(
location.get_build_location("sql"),
"https://ci.opensearch.org/ci/dbc/bundle-build/1.3.0/1318/linux/x64/tar/builds/opensearch/sql"
)
def test_opensearch_tailing_slash(self) -> None:
location = BundleUrlLocation("https://ci.opensearch.org/ci/dbc/bundle-build/1.3.0/1318/linux/x64/", "opensearch", "tar")
self.assertEqual(
location.get_bundle_location("sql"),
"https://ci.opensearch.org/ci/dbc/bundle-build/1.3.0/1318/linux/x64/tar/dist/opensearch/sql"
)
self.assertEqual(
location.get_build_location("sql"),
"https://ci.opensearch.org/ci/dbc/bundle-build/1.3.0/1318/linux/x64/tar/builds/opensearch/sql"
)
def test_opensearch_dashboards(self) -> None:
location = BundleUrlLocation("https://ci.opensearch.org/ci/dbc/bundle-build/1.3.0/1318/linux/x64", "opensearch-dashboards", "tar")
self.assertEqual(
location.get_bundle_location("sql"),
"https://ci.opensearch.org/ci/dbc/bundle-build/1.3.0/1318/linux/x64/tar/dist/opensearch-dashboards/sql"
)
self.assertEqual(
location.get_build_location("sql"),
"https://ci.opensearch.org/ci/dbc/bundle-build/1.3.0/1318/linux/x64/tar/builds/opensearch-dashboards/sql"
)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | tests/tests_assemble_workflow/test_bundle_url_location.py | rishabh6788/opensearch-build |
import unittest
from masonite.helpers import Dot, config
from config import database
class TestConfig(unittest.TestCase):
def setUp(self):
self.config = config
def test_config_can_get_value_from_file(self):
self.assertEqual(self.config('application.DEBUG'), True)
def test_config_can_get_dict_value_lowercase(self):
self.assertEqual(self.config('application.debug'), True)
def test_config_can_get_dict_default(self):
self.assertEqual(self.config('sdff.na', 'default'), 'default')
def test_config_not_found_returns_default(self):
self.assertEqual(self.config('application.nothere', 'default'), 'default')
def test_dict_dot_returns_value(self):
self.assertEqual(Dot().dict_dot('s3.test', {'s3': {'test': 'value'}}, ''), 'value')
def test_config_can_get_dict_value_inside_dict(self):
self.assertEqual(self.config('database.DATABASES.default'), database.DATABASES['default'])
def test_config_can_get_dict_value_inside_dict_with_lowercase(self):
self.assertEqual(self.config('database.databases.default'), database.DATABASES['default'])
def test_config_can_get_dict_inside_dict_inside_dict(self):
self.assertIsInstance(self.config('database.databases.sqlite'), dict)
def test_config_can_get_dict_inside_dict_inside_another_dict(self):
self.assertEqual(self.config('storage.DRIVERS.s3.test_locations.test'), 'value')
def test_dot_dict(self):
self.assertEqual(Dot().dict_dot('async.driver', {'async': {'driver': 'me'}}, 'you'), 'me')
def test_dict_dot_works_for_deep_dictionaries(self):
dictionary = {
'storage': {
'drivers': {
'disk': {
'location': {
'uploading': 'uploads/'
}
}
}
}
}
self.assertEqual(Dot().dict_dot('storage.drivers.disk.location', dictionary)['uploading'], 'uploads/')
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer"... | 3 | tests/helpers/test_config.py | STejas6/core |
import cv2
from pupil_labs.realtime_api.simple import discover_one_device
def main():
# Look for devices. Returns as soon as it has found the first device.
print("Looking for the next best device...")
device = discover_one_device(max_search_duration_seconds=10)
if device is None:
print("No device found.")
raise SystemExit(-1)
print(f"Connecting to {device}...")
try:
while True:
bgr_pixels, frame_datetime = device.receive_scene_video_frame()
draw_time(bgr_pixels, frame_datetime)
cv2.imshow("Scene Camera - Press ESC to quit", bgr_pixels)
if cv2.waitKey(1) & 0xFF == 27:
break
except KeyboardInterrupt:
pass
finally:
print("Stopping...")
device.close() # explicitly stop auto-update
def draw_time(frame, time):
frame_txt_font_name = cv2.FONT_HERSHEY_SIMPLEX
frame_txt_font_scale = 1.0
frame_txt_thickness = 1
# first line: frame index
frame_txt = str(time)
cv2.putText(
frame,
frame_txt,
(20, 50),
frame_txt_font_name,
frame_txt_font_scale,
(255, 255, 255),
thickness=frame_txt_thickness,
lineType=cv2.LINE_8,
)
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | examples/simple/stream_scene_camera_video.py | pupil-labs/realtime-python-api |
from __future__ import absolute_import, unicode_literals
import datetime, json
from sqlalchemy import (
Column,
Integer,
Text,
DateTime,
ForeignKey,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
relationship,
backref,
)
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(Text, unique=True)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
address = relationship("Address", uselist=False, backref="user")
def __init__(self, name):
self.name = name
def __str__(self):
return "%s" % self.name
def __repr__(self):
return '<%s#%s>' % (self.__class__.__name__, self.id)
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
description = Column(Text, unique=True)
user_id = Column(Integer, ForeignKey('users.id'))
def __init__(self, description):
self.description = description
def __str__(self):
return "%s" % (self.id)
def __repr__(self):
return '<%s#%s>' % (self.__class__.__name__, self.id)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | test-project/testproject/models.py | RedTurtle/sqlalchemy-datatables |
import pandas as pd
import numpy as np
import os
def target(df_exp_train, path=""):
path_validation = os.path.join(path, "test.csv")
df_val = pd.read_csv(path_validation, escapechar="\\")
df_exp_train = df_exp_train.merge(df_val[['record_id', 'linked_id']], how='left', left_on='queried_record_id', right_on='record_id').drop('record_id', axis=1)
def extract_target(predicted, linked):
res = np.where(predicted == linked, 1, 0)
return res
df_exp_train['target'] = extract_target(df_exp_train.predicted_record_id.values, df_exp_train.linked_id.values)
#return df_exp_train.drop(['linked_id'], axis=1)
return df_exp_train['target']
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | features/target.py | teomores/Oracle_HPC_contest |
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import (
IsAuthenticated,
)
from .serializers import (
CreateRoomSerializer,
RoomSerializer,
)
from .models import RoomMember, Room
from rest_framework.reverse import reverse
from rest_framework.status import (
HTTP_201_CREATED,
HTTP_400_BAD_REQUEST
)
class CreateRoomView(APIView):
permission_classes = [IsAuthenticated]
throttle_classes = []
def post(self, request):
serializer = CreateRoomSerializer(data=request.data)
if serializer.is_valid():
room = serializer.save(creator=request.user)
RoomMember.objects.create(room=room)
room.roommember.members.add(request.user.id)
return Response(data={"redirect": room.room_code}, status=HTTP_201_CREATED)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
class RoomView(APIView):
permission_classes = [IsAuthenticated]
throttle_classes = []
def get(self, request, room_code):
try:
room = Room.objects.get(room_code=room_code)
except Room.DoesNotExist:
return Response({"not found": "noe"})
if not RoomMember.objects.filter(room_id=room.id, members=request.user.id).exists():
return Response({"using post to join": "..."})
my_data = RoomSerializer(room)
return Response(my_data.data)
def post(self, request, room_code):
try:
room = Room.objects.get(room_code=room_code)
except Room.DoesNotExist:
return Response({"not found": "noe"})
room.roommember.members.add(request.user.id)
my_data = RoomSerializer(room)
return Response(my_data.data)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | main/online/views.py | MahanBi/Back-End |
'''
Make sure orbit plotting can still occur after chopping chains.
'''
import orbitize
from orbitize import driver, DATADIR
import multiprocessing as mp
def verify_results_data(res, sys):
# Make data attribute from System is carried forward to Result class
assert res.data is not None
# Make sure the data tables are equivalent between Result and System class
res_data = res.data.to_pandas()
sys_data = sys.data_table.to_pandas()
assert res_data.equals(sys_data) == True
# Make sure no error results when making the final orbit plot
try:
epochs = sys.data_table['epoch']
res.plot_orbits(
object_to_plot = 1,
num_orbits_to_plot = 10,
start_mjd = epochs[0]
)
except:
raise Exception("Plotting orbits failed.")
def test_chop_chains():
'''
First run MCMC sampler to generate results object and make a call to 'chop_chains'
function afterwards.
'''
filename = "{}/HD4747.csv".format(DATADIR)
num_secondary_bodies = 1
system_mass = 0.84
plx = 53.18
mass_err = 0.04
plx_err = 0.12
num_temps = 5
num_walkers = 40
num_threads = mp.cpu_count()
total_orbits = 5000
burn_steps = 10
thin = 2
my_driver = driver.Driver(
filename, 'MCMC', num_secondary_bodies, system_mass, plx, mass_err=mass_err, plx_err=plx_err,
system_kwargs={'fit_secondary_mass':True, 'tau_ref_epoch':0},
mcmc_kwargs={'num_temps':num_temps, 'num_walkers':num_walkers, 'num_threads':num_threads})
my_driver.sampler.run_sampler(total_orbits, burn_steps=burn_steps, thin=thin)
my_driver.sampler.chop_chains(burn=25, trim=25)
mcmc_sys = my_driver.system
mcmc_result = my_driver.sampler.results
verify_results_data(mcmc_result, mcmc_sys)
if __name__ == '__main__':
test_chop_chains()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | tests/test_chop_chains_with_plotting.py | jorgellop/orbitize |
import requests
from pyquery import PyQuery as pq
import json
def get_one_page(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
res = requests.get(url, headers=headers)
text = res.text
doc = pq(text)
results = []
for item in doc.find('.explore-feed.feed-item').items():
temp = {
'title': item.find('h2').text(),
'anwser': item.find('.zh-summary.summary').text(),
'author': item.find('.author-link-line').text()
}
results.append(temp)
return results
def write_to_txt(results):
for item in results:
with open('./output/zhihu_data.txt', 'a', encoding='utf-8') as f:
print(item)
question = ''
question = question + item['title'] + '\n'
question = question + item['anwser'] + '\n'
question = question + item['author'] + '\n'
question = question + '========\n'
f.write(question)
def write_to_json(results):
with open('./output/zhihu_data.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(results, indent=4, ensure_ascii=False))
def main():
url = 'https://www.zhihu.com/explore'
results = get_one_page(url)
# write_to_txt(results)
write_to_json(results)
print(results)
main() | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined insid... | 3 | python_demo_v2/zhihu_data.py | renhongl/python_demo |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.