id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1678560 | # coding: utf-8
import io
import sys
sys.stdout.reconfigure(encoding='gb18030')#IDE调试、间接调用
#sys.stdout.reconfigure(encoding='utf-8')#Terminal运行
import my_server
if __name__ == '__main__':
# print(sys.getdefaultencoding())
# sys.stdout=io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
my_server.Run()
| StarcoderdataPython |
6562306 | <reponame>withshubh/dagster
# pylint: disable=unused-import
import os
import sys
import uuid
import pytest
from airflow.exceptions import AirflowException
from airflow.utils import timezone
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.utils import make_new_run_id
from dagster.utils import git_repository_root, load_yaml_from_glob_list
from dagster_airflow.factory import make_airflow_dag_containerized_for_recon_repo
from dagster_airflow_tests.conftest import dagster_docker_image
from dagster_airflow_tests.marks import nettest, requires_airflow_db
from dagster_airflow_tests.test_fixtures import (
dagster_airflow_docker_operator_pipeline,
execute_tasks_in_dag,
postgres_instance,
)
from dagster_test.test_project import get_test_project_environments_path
from .utils import validate_pipeline_execution, validate_skip_pipeline_execution
@requires_airflow_db
def test_fs_storage_no_explicit_base_dir(
dagster_airflow_docker_operator_pipeline, dagster_docker_image
): # pylint: disable=redefined-outer-name
pipeline_name = "demo_pipeline"
environments_path = get_test_project_environments_path()
results = dagster_airflow_docker_operator_pipeline(
pipeline_name=pipeline_name,
recon_repo=ReconstructableRepository.for_module(
"dagster_test.test_project.test_pipelines.repo",
"define_demo_execution_repo",
),
environment_yaml=[
os.path.join(environments_path, "env.yaml"),
os.path.join(environments_path, "env_filesystem_no_explicit_base_dir.yaml"),
],
image=dagster_docker_image,
)
validate_pipeline_execution(results)
@requires_airflow_db
def test_fs_storage(
dagster_airflow_docker_operator_pipeline, dagster_docker_image
): # pylint: disable=redefined-outer-name
pipeline_name = "demo_pipeline"
environments_path = get_test_project_environments_path()
results = dagster_airflow_docker_operator_pipeline(
pipeline_name=pipeline_name,
recon_repo=ReconstructableRepository.for_module(
"dagster_test.test_project.test_pipelines.repo",
"define_demo_execution_repo",
),
environment_yaml=[
os.path.join(environments_path, "env.yaml"),
os.path.join(environments_path, "env_filesystem.yaml"),
],
image=dagster_docker_image,
)
validate_pipeline_execution(results)
@nettest
@requires_airflow_db
def test_s3_storage(
dagster_airflow_docker_operator_pipeline, dagster_docker_image
): # pylint: disable=redefined-outer-name
pipeline_name = "demo_pipeline"
environments_path = get_test_project_environments_path()
results = dagster_airflow_docker_operator_pipeline(
pipeline_name=pipeline_name,
recon_repo=ReconstructableRepository.for_module(
"dagster_test.test_project.test_pipelines.repo",
"define_demo_execution_repo",
),
environment_yaml=[
os.path.join(environments_path, "env.yaml"),
os.path.join(environments_path, "env_s3.yaml"),
],
image=dagster_docker_image,
)
validate_pipeline_execution(results)
@nettest
@requires_airflow_db
def test_gcs_storage(
dagster_airflow_docker_operator_pipeline,
dagster_docker_image,
): # pylint: disable=redefined-outer-name
pipeline_name = "demo_pipeline_gcs"
environments_path = get_test_project_environments_path()
results = dagster_airflow_docker_operator_pipeline(
pipeline_name=pipeline_name,
recon_repo=ReconstructableRepository.for_module(
"dagster_test.test_project.test_pipelines.repo",
"define_demo_execution_repo",
),
environment_yaml=[
os.path.join(environments_path, "env.yaml"),
os.path.join(environments_path, "env_gcs.yaml"),
],
image=dagster_docker_image,
)
validate_pipeline_execution(results)
@requires_airflow_db
def test_skip_operator(
dagster_airflow_docker_operator_pipeline, dagster_docker_image
): # pylint: disable=redefined-outer-name
pipeline_name = "optional_outputs"
environments_path = get_test_project_environments_path()
results = dagster_airflow_docker_operator_pipeline(
pipeline_name=pipeline_name,
recon_repo=ReconstructableRepository.for_module(
"dagster_test.test_project.test_pipelines.repo",
"define_demo_execution_repo",
),
environment_yaml=[os.path.join(environments_path, "env_filesystem.yaml")],
op_kwargs={"host_tmp_dir": "/tmp"},
image=dagster_docker_image,
)
validate_skip_pipeline_execution(results)
@requires_airflow_db
def test_error_dag_containerized(dagster_docker_image): # pylint: disable=redefined-outer-name
pipeline_name = "demo_error_pipeline"
recon_repo = ReconstructableRepository.for_module(
"dagster_test.test_project.test_pipelines.repo", "define_demo_execution_repo"
)
environments_path = get_test_project_environments_path()
environment_yaml = [
os.path.join(environments_path, "env_s3.yaml"),
]
run_config = load_yaml_from_glob_list(environment_yaml)
run_id = make_new_run_id()
execution_date = timezone.utcnow()
with postgres_instance() as instance:
dag, tasks = make_airflow_dag_containerized_for_recon_repo(
recon_repo,
pipeline_name,
dagster_docker_image,
run_config,
instance=instance,
op_kwargs={"network_mode": "container:test-postgres-db-airflow"},
)
with pytest.raises(AirflowException) as exc_info:
execute_tasks_in_dag(dag, tasks, run_id, execution_date)
assert "Exception: Unusual error" in str(exc_info.value)
@requires_airflow_db
def test_airflow_execution_date_tags_containerized(
dagster_docker_image,
): # pylint: disable=redefined-outer-name, unused-argument
pipeline_name = "demo_airflow_execution_date_pipeline"
recon_repo = ReconstructableRepository.for_module(
"dagster_test.test_project.test_pipelines.repo", "define_demo_execution_repo"
)
environments_path = get_test_project_environments_path()
environment_yaml = [
os.path.join(environments_path, "env_s3.yaml"),
]
run_config = load_yaml_from_glob_list(environment_yaml)
execution_date = timezone.utcnow()
with postgres_instance() as instance:
dag, tasks = make_airflow_dag_containerized_for_recon_repo(
recon_repo,
pipeline_name,
dagster_docker_image,
run_config,
instance=instance,
op_kwargs={"network_mode": "container:test-postgres-db-airflow"},
)
results = execute_tasks_in_dag(
dag, tasks, run_id=make_new_run_id(), execution_date=execution_date
)
materialized_airflow_execution_date = None
for result in results.values():
for event in result:
if event.event_type_value == "STEP_MATERIALIZATION":
materialization = event.event_specific_data.materialization
materialization_entry = materialization.metadata_entries[0]
materialized_airflow_execution_date = materialization_entry.entry_data.text
assert execution_date.isoformat() == materialized_airflow_execution_date
| StarcoderdataPython |
3396594 | #!/usr/bin/env python
"""
Split a given file into the specified number of files.
Order is preserved.
Author: <NAME>
Contact: <EMAIL>
Date: 2014
"""
from os import path
import argparse
import math
import os
import sys
#-----------------------------------------------------------------------------#
# UTILITY FUNCTIONS #
#-----------------------------------------------------------------------------#
def check_argv():
"""Check the command line arguments."""
parser = argparse.ArgumentParser(description=__doc__.strip().split("\n")[0], add_help=False)
parser.add_argument("input_fn", help="the input file")
parser.add_argument("n_files", type=int, help="the number of files to split `input_fn` into")
parser.add_argument(
"output_dir", help="the split files are written here; "
"a index (starting from 0) is added before the extension of the file, e.g. "
"if `input_fn` is 'apples.list' then the first split file will be 'apples.0.list'"
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
#-----------------------------------------------------------------------------#
# MAIN FUNCTION #
#-----------------------------------------------------------------------------#
def main():
args = check_argv()
input_fn = args.input_fn
n_files = args.n_files
output_dir = args.output_dir
if not path.isdir(output_dir):
os.mkdir(output_dir)
# Read the input file
lines = open(input_fn).readlines()
print "Number of lines in input:", len(lines)
n_lines_per_split = int(math.ceil(float(len(lines))/n_files))
print "Max number of lines per split:", n_lines_per_split
basename, extension = path.splitext(path.split(input_fn)[-1])
# Write lines to split files
i_split = 1
n_cur_lines = 0
fn_cur = path.join(output_dir, basename + "." + str(i_split) + extension)
f_cur = open(fn_cur, "w")
for line in lines:
f_cur.write(line)
n_cur_lines += 1
if n_cur_lines == n_lines_per_split:
f_cur.close()
print "Wrote " + str(n_cur_lines) + " lines to: " + fn_cur
if i_split == n_files:
break
i_split += 1
fn_cur = path.join(output_dir, basename + "." + str(i_split) + extension)
f_cur = open(fn_cur, "w")
n_cur_lines = 0
f_cur.close()
print "Wrote " + str(n_cur_lines) + " lines to: " + fn_cur
if __name__ == "__main__":
main()
| StarcoderdataPython |
3552214 | """ Unit test file. """
import pytest
import numpy as np
from ..BaumWelch import do_E_step, calculate_log_likelihood
from ..LineageTree import LineageTree
from ..tHMM import tHMM
from ..figures.figureCommon import pi, T, E
@pytest.mark.parametrize("cens", [0, 2])
@pytest.mark.parametrize("nStates", [1, 2, 3])
def test_BW(cens, nStates):
""" This tests that one step of Baum-Welch increases the likelihood of the fit. """
X = LineageTree.init_from_parameters(pi, T, E, desired_num_cells=(2 ** 7) - 1, desired_experimental_time=500, censor_condition=cens)
tHMMobj = tHMM([X], num_states=nStates) # build the tHMM class with X
# Test cases below
# Get the likelihoods before fitting
_, NF, _, _ = do_E_step(tHMMobj)
LL_before = calculate_log_likelihood(NF)
assert np.isfinite(LL_before)
# Get the likelihoods after fitting
_, _, NF_after, _, _, new_LL_list_after = tHMMobj.fit(max_iter=3)
LL_after = calculate_log_likelihood(NF_after)
assert np.isfinite(LL_after)
assert np.isfinite(new_LL_list_after)
assert LL_after > LL_before
| StarcoderdataPython |
5151078 | import json
import numpy as np
import re
# defining regex pattern
# pattern = re.compile(r"[^\u0E00-\u0E7F0-9]|^'|'$|''")
path = 'D:/Users/Patdanai/th-qasys-db/tokenized_wiki_corpus/' # default development path
def load_article(dir_path, art_id):
with open(dir_path + str(art_id) + '.json', 'r', encoding='utf-8', errors='ignore') as f:
article = json.load(f)
return article
def m_words_separate(m, arrays_of_tokens, overlapping_words=0, question_number=0):
sentences_in_articles = []
sentences_ranges_in_articles = []
for i in range(arrays_of_tokens.__len__()):
sentences = []
sentences_ranges = []
temp_j = 0
for j in range(0, arrays_of_tokens[i].__len__(), m-overlapping_words):
if((j + m) > arrays_of_tokens[i].__len__()):
if(arrays_of_tokens[i].__len__() < m):
fill_length = m - arrays_of_tokens[i].__len__()
# print('fill length', fill_length, arrays_of_tokens[i].__len__())
arrays_of_tokens[i] = list(arrays_of_tokens[i])
t = type(arrays_of_tokens[i][-1])
for k in range(fill_length):
if(t is type(np.array([]))):
arrays_of_tokens[i].append(np.zeros(arrays_of_tokens[i][-1].shape))
else:
arrays_of_tokens[i].append('<PAD>')
# arrays_of_tokens[i] = np.asarray(arrays_of_tokens[i])
idx = (j, m)
sentence = arrays_of_tokens[i][j:m]
sentences_ranges.append(idx)
else:
fill_length = (j + m) - arrays_of_tokens[i].__len__()
idx = (j-fill_length, arrays_of_tokens[i].__len__())
sentence = arrays_of_tokens[i][j-fill_length:j+m]
sentences_ranges.append(idx)
break
if(sentences):
if(sentences_ranges[-1] == sentences_ranges[-2] and len(sentences) == len(sentences_ranges)):
sentences.pop()
sentences_ranges.pop()
else:
if(j > 0):
idx = (temp_j - overlapping_words, (temp_j + m) - overlapping_words)
sentences_ranges.append(idx)
temp_j += (m - overlapping_words)
else:
idx = (temp_j, temp_j + m)
sentences_ranges.append(idx)
temp_j += m
sentence = arrays_of_tokens[i][j:j+m]
sentences.append(sentence)
sentences_in_articles.append(sentences)
sentences_ranges_in_articles.append(sentences_ranges)
# print('Batch: ' + str(question_number + 1) + ' Converting to ' + str(m) + '-words sentences. [' + str(i) + '/' + str(arrays_of_tokens.__len__()) + '] \r', end='')
# print('\n')
return [sentences_in_articles, sentences_ranges_in_articles]
# remove xml tag at the beginning
def remove_xml(article):
xml_close_index = 0
for i in range(len(article)):
if(article[i] == '>'):
xml_close_index = i + 1
return article[xml_close_index:]
"""
output: preprocessed (remove noises from each token) arrays of tokens in each article
<arrays of tokens in each article: array like>
input: arrays of tokens <arrays of tokens: array like>
"""
# r"[^\u0E00-\u0E7F^0-9^ \t^.]" this pattern removes '.', ',', spaces, tabs and english characters
def remove_noise(array_of_tokens, preprocessing_pattern=re.compile(r"[^\u0E00-\u0E7F^0-9^ ^\t]|^[\u0E00-\u0E7F].[\u0E00-\u0E7F].")):
thai_numbers = ['\u0E50', '\u0E51', '\u0E52', '\u0E53', '\u0E54', '\u0E55', '\u0E56', '\u0E57', '\u0E58', '\u0E59']
original_token_lengths = []
for word in array_of_tokens:
original_token_lengths.append(word.__len__()) # for each j => temp contains each article's word lengths
# this below block removes any charater in regex_pattern
original_token_indexes = []
tokens_with_chars_removed = []
for i in range(array_of_tokens.__len__()):
chars_to_remove = re.findall(preprocessing_pattern, array_of_tokens[i])
temp = '' # declare for characters that pass condition
for j in range(array_of_tokens[i].__len__()):
if(not(array_of_tokens[i][j] in chars_to_remove)):
if(array_of_tokens[i][j] in thai_numbers):
if(array_of_tokens[i][j] == thai_numbers[0]):
temp += '0'
if(array_of_tokens[i][j] == thai_numbers[1]):
temp += '1'
if(array_of_tokens[i][j] == thai_numbers[2]):
temp += '2'
if(array_of_tokens[i][j] == thai_numbers[3]):
temp += '3'
if(array_of_tokens[i][j] == thai_numbers[4]):
temp += '4'
if(array_of_tokens[i][j] == thai_numbers[5]):
temp += '5'
if(array_of_tokens[i][j] == thai_numbers[6]):
temp += '6'
if(array_of_tokens[i][j] == thai_numbers[7]):
temp += '7'
if(array_of_tokens[i][j] == thai_numbers[8]):
temp += '8'
if(array_of_tokens[i][j] == thai_numbers[9]):
temp += '9'
else:
temp += array_of_tokens[i][j] # concatenate charaters those are not in chars_to_remove
# this below condition filters remaining single \n, \t, spaces commas and dots tokens
if(temp.__len__() and not(temp in re.findall(r"^\s|^,|^.", temp))):
original_token_indexes.append(i)
tokens_with_chars_removed.append(temp) # append temp(word <string>) to tokens_with_chars_remove
if(str(temp).isdigit()):
temp = 'NUM'
# print(tokens_with_chars_removed) # TESTING FUNCTION
summation = 0
for i in range(original_token_lengths.__len__()):
summation += original_token_lengths[i]
original_token_lengths[i] = summation
# print(original_token_lengths)
# print(original_token_indexes)
# this below block gives the remaining word's original position ranges
# in format range(0, 10) => 0..9
token_ranges = []
for i in range(1, original_token_indexes.__len__() - 1):
start = original_token_indexes[i-1]
end = start + 1
# token_ranges.append((original_token_lengths[start-1], original_token_lengths[end]))
if(start):
# append range of each token
token_ranges.append((original_token_lengths[start - 1], original_token_lengths[end]))
else:
token_ranges.append((original_token_lengths[start], original_token_lengths[end]))
# print(token_ranges)
# print(selected_plain_text_article[0][528:537]) # answer position form => (start+1, end)
"""
for i in range(token_ranges.__len__()): # TESTING FUNCTION: returns tokens string in 1 line
temp += selected_plain_text_article[0][token_ranges[i][0]:token_ranges[i][1] - 1] + ' '
print(temp)
"""
# preprocessing gives remaining tokens, their old indexes after tokenized
# and their position ranges(range canbe converted to length)
return [tokens_with_chars_removed, original_token_indexes, original_token_lengths]
# main function for script testing
if(__name__ == '__main__'):
test_sample = load_article(path, 1)
test_sample = [1,2,3,4,5,6,7,8,9,10]
print(m_words_separate(20, [test_sample], overlapping_words=10))
| StarcoderdataPython |
1613592 | """ Testing unittest_assertions/base.py """
from typing import (
Callable,
Iterable,
Mapping,
)
import pytest
from unittest_assertions.base import Assertion
from unittest_assertions.equality import AssertEqual
class TestBuiltinAssertion:
"""Testing builtin assertions"""
@pytest.mark.parametrize("testing_data", ((AssertEqual,"Message"),))
def test_init(self, testing_data: Callable) -> None:
"""Test builtin assertion __init__
Args:
function: function for Assertion paramater
Returns:
None
"""
function, message = testing_data
builtin_assertion = Assertion(_assertion_function=function,msg=message)
assert builtin_assertion._assertion_function == function
assert builtin_assertion.msg == message
@pytest.mark.parametrize("arguments", (("hello", None, 2),))
@pytest.mark.parametrize(
"keyword_args",
({"testing": "hello there"}, {"a": 1, "b": 2}),
)
def test_call(self, arguments: Iterable, keyword_args: Mapping) -> None:
"""Test `Assertion` __call__ function
Args:
arguments: arguments passed to __call__
keyword_args: keyword arguments passed to __call__
Returns:
None
"""
def _mock_function(*_args, **_kwargs) -> None:
"""mock function
Args:
*_args: arguments for the mocked function
**_kwargs: keyword arguments for the mocked function
Returns:
None
"""
keyword_args["msg"] = builtin_assertion.msg
assert arguments == _args
assert keyword_args == _kwargs
builtin_assertion = Assertion(_assertion_function=_mock_function)
builtin_assertion.__call__(*arguments, **keyword_args) | StarcoderdataPython |
6520878 | '''
Created on 2014-11-1
@author: eluoyng
'''
import wsgi
class ControllerTest(object):
def __init__(self):
print "ControllerTest!!!!"
def test(self, req):
print "req", req
return {
'name': "test",
'properties': "test"
}
class MyControllerTest(object):
def __init__(self):
print "MyControllerTest!!!!"
def my_test(self, req):
print "req", req
return {
'name': "my_test",
'properties': "my_test"
}
class MyRouterApp(wsgi.Router):
'''
app
'''
def __init__(self, mapper):
controller = ControllerTest()
mapper.connect('/test',
controller=wsgi.Resource(controller),
action='test',
conditions={'method': ['GET']})
mapper.connect('/mytest',
controller=wsgi.Resource(MyControllerTest()),
action='my_test',
conditions={'method': ['GET']})
super(MyRouterApp, self).__init__(mapper)
| StarcoderdataPython |
5131907 | <gh_stars>10-100
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import fnmatch
import json
import logging
import math
import os
import pathlib
import typing
from zipfile import ZipFile
import pandas as pd
import requests
from google.cloud import storage
def main(
source_url: str,
source_file: pathlib.Path,
source_csv_name: str,
target_file: pathlib.Path,
target_gcs_bucket: str,
target_gcs_path: str,
headers: typing.List[str],
rename_mappings: dict,
pipeline_name: str,
) -> None:
logging.info(
f"google political ads {pipeline_name} process started at "
+ str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
)
logging.info("creating 'files' folder")
pathlib.Path("./files").mkdir(parents=True, exist_ok=True)
logging.info(f"Downloading file {source_url}")
download_file(source_url, source_file)
logging.info(f"Opening file {source_file}")
df = read_csv_file(source_file, source_csv_name)
logging.info(f"Transforming.. {source_file}")
logging.info(f"Transform: Rename columns for {pipeline_name}..")
rename_headers(df, rename_mappings)
if pipeline_name == "creative_stats":
logging.info(f"Transform: converting to integer for {pipeline_name}..")
df["spend_range_max_usd"] = df["spend_range_max_usd"].apply(convert_to_int)
df["spend_range_max_eur"] = df["spend_range_max_eur"].apply(convert_to_int)
df["spend_range_max_inr"] = df["spend_range_max_inr"].apply(convert_to_int)
df["spend_range_max_bgn"] = df["spend_range_max_bgn"].apply(convert_to_int)
df["spend_range_max_hrk"] = df["spend_range_max_hrk"].apply(convert_to_int)
df["spend_range_max_czk"] = df["spend_range_max_czk"].apply(convert_to_int)
df["spend_range_max_dkk"] = df["spend_range_max_dkk"].apply(convert_to_int)
df["spend_range_max_huf"] = df["spend_range_max_huf"].apply(convert_to_int)
df["spend_range_max_pln"] = df["spend_range_max_pln"].apply(convert_to_int)
df["spend_range_max_ron"] = df["spend_range_max_ron"].apply(convert_to_int)
df["spend_range_max_gbp"] = df["spend_range_max_gbp"].apply(convert_to_int)
df["spend_range_max_sek"] = df["spend_range_max_sek"].apply(convert_to_int)
df["spend_range_max_nzd"] = df["spend_range_max_nzd"].apply(convert_to_int)
else:
df = df
logging.info(f"Transform: Reordering headers for {pipeline_name}.. ")
df = df[headers]
logging.info(f"Saving to output file.. {target_file}")
try:
save_to_new_file(df, file_path=str(target_file))
except Exception as e:
logging.error(f"Error saving output file: {e}.")
logging.info(
f"Uploading output file to.. gs://{target_gcs_bucket}/{target_gcs_path}"
)
upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path)
logging.info(
f"Google Political Ads {pipeline_name} process completed at "
+ str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
)
def save_to_new_file(df: pd.DataFrame, file_path: str) -> None:
df.to_csv(file_path, index=False)
def upload_file_to_gcs(file_path: pathlib.Path, gcs_bucket: str, gcs_path: str) -> None:
storage_client = storage.Client()
bucket = storage_client.bucket(gcs_bucket)
blob = bucket.blob(gcs_path)
blob.upload_from_filename(file_path)
def download_file(source_url: str, source_file: pathlib.Path) -> None:
logging.info(f"Downloading {source_url} into {source_file}")
r = requests.get(source_url, stream=True)
if r.status_code == 200:
with open(source_file, "wb") as f:
for chunk in r:
f.write(chunk)
else:
logging.error(f"Couldn't download {source_url}: {r.text}")
def read_csv_file(source_file: pathlib.Path, source_csv_name: str) -> pd.DataFrame:
with ZipFile(source_file) as zipfiles:
file_list = zipfiles.namelist()
csv_files = fnmatch.filter(file_list, source_csv_name)
data = [pd.read_csv(zipfiles.open(file_name)) for file_name in csv_files]
df = pd.concat(data)
return df
def rename_headers(df: pd.DataFrame, rename_mappings: dict) -> None:
df.rename(columns=rename_mappings, inplace=True)
def convert_to_int(input: str) -> str:
str_val = ""
if input == "" or (math.isnan(input)):
str_val = ""
else:
str_val = str(int(round(input, 0)))
return str_val
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
main(
source_url=os.environ["SOURCE_URL"],
source_file=pathlib.Path(os.environ["SOURCE_FILE"]).expanduser(),
source_csv_name=os.environ["FILE_NAME"],
target_file=pathlib.Path(os.environ["TARGET_FILE"]).expanduser(),
target_gcs_bucket=os.environ["TARGET_GCS_BUCKET"],
target_gcs_path=os.environ["TARGET_GCS_PATH"],
headers=json.loads(os.environ["CSV_HEADERS"]),
rename_mappings=json.loads(os.environ["RENAME_MAPPINGS"]),
pipeline_name=os.environ["PIPELINE_NAME"],
)
| StarcoderdataPython |
9721067 | import os
from setuptools import find_packages, setup
deps = [
"aiofiles",
"authlib",
"boto3",
"click",
"click-aliases",
"colorama",
"cryptography",
"databases[postgresql]",
"elasticsearch>=7.0.0,<8.0.0",
"fastapi",
"fastapi-users",
"fastapi-contrib",
"ffmpeg-python",
"gunicorn",
"jinja2",
"minio",
"psycopg2-binary",
"pydantic",
"pyjwt",
"requests",
"requests-toolbelt",
"sqlalchemy",
"starlette",
"tqdm",
"uvicorn",
]
setup(
name="workspacesio",
version="0.1.0",
script_name="setup.py",
python_requires=">3.7",
zip_safe=False,
install_requires=deps,
include_package_data=True,
packages=find_packages(exclude=["test"]),
entry_points={
"console_scripts": [
"wio=workspacesio.cli:cli",
"workspaces-create-tables=workspacesio.dev_cli:main",
],
},
)
| StarcoderdataPython |
4814032 | <gh_stars>0
import logging
import sys
from logging.handlers import RotatingFileHandler
from pathlib import Path
from naucse.freezer import NaucseFreezer
if sys.version_info[0] <3 :
raise RuntimeError('We love Python 3.')
from naucse.cli import cli
from naucse.views import app, lesson_static_generator
def main():
arca_log_path = Path(".arca/arca.log")
arca_log_path.parent.mkdir(exist_ok=True)
arca_log_path.touch()
naucse_log_path = Path(".arca/naucse.log")
naucse_log_path.touch()
def get_handler(path, **kwargs):
handler = RotatingFileHandler(path, **kwargs)
formatter = logging.Formatter("[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
return handler
logger = logging.getLogger("arca")
logger.addHandler(get_handler(arca_log_path, maxBytes=10000, backupCount=0))
logger = logging.getLogger("naucse")
logger.addHandler(get_handler(naucse_log_path))
freezer = NaucseFreezer(app)
# see the generator for details
freezer.register_generator(lesson_static_generator)
cli(app, base_url='https://naucse.python.cz', freezer=freezer)
| StarcoderdataPython |
1630406 | # -*- coding: utf-8 -*-
'''
Filter out those sequences where at least 6 saccades are not valid.
'''
import gazelib
def run(input_files, output_files):
# List of sequence lists
sequences = gazelib.io.load_json(input_files[0])
complete_sequences = []
validity = 'heuristic_saccade_validity'
for seq in sequences:
num_valid = len([1 for trial in seq if trial[validity]])
if num_valid > 6:
complete_sequences.append(seq)
# For reference, print number of complete sequences
print('# of sequences: ' + str(len(sequences)))
print('# of good (compl > 6) sequences: ' + str(len(complete_sequences)))
gazelib.io.write_json(output_files[0], complete_sequences,
human_readable=True)
| StarcoderdataPython |
3327879 | <gh_stars>0
n = int(input())
for _ in range(n):
e = input()
if len(e) >10:
print(e[0]+str(len(e)-2)+e[len(e)-1])
else:
print(e)
| StarcoderdataPython |
4872062 | import os
import sys
sys.path.append(os.getcwd() + "/facerecognition/PyFaceRecClient/simple-faster-rcnn-pytorch/")
import torchvision
import torch
import numpy as np
from models.utils.bbox_tools import bbox2loc, bbox_iou, loc2bbox
from models.utils.nms import non_maximum_suppression
'''
class ProposalTargetCreator(object):
"""Assign ground truth bounding boxes to given RoIs.
The :meth:`__call__` of this class generates training targets
for each object proposal.
This is used to train Faster RCNN [#]_.
.. [#] <NAME>, <NAME>, <NAME>, <NAME>. \
Faster R-CNN: Towards Real-Time Object Detection with \
Region Proposal Networks. NIPS 2015.
Args: #
n_sample (int): The number of sampled regions.
pos_ratio (float): Fraction of regions that is labeled as a
foreground.
pos_iou_thresh (float): IoU threshold for a RoI to be considered as a
foreground.
neg_iou_thresh_hi (float): RoI is considered to be the background
if IoU is in
[:obj:`neg_iou_thresh_hi`, :obj:`neg_iou_thresh_hi`).
neg_iou_thresh_lo (float): See above.
"""
# [:obj:`neg_iou_thresh_hi`, :obj:`neg_iou_thresh_hi`) = [0.0, 0.5)
# so regions are classified into two groups:
# label with 1 = foreground(pos)
# label with -1 = background(neg)
def __init__(self,
n_sample=128,
pos_ratio=0.25, pos_iou_thresh=0.5,
neg_iou_thresh_hi=0.5, neg_iou_thresh_lo=0.0
):
self.n_sample = n_sample
# I don't see a reason why pos_ratio is needed.
# -> yes, we are using only (default)25% of the sampled foreground.
self.pos_ratio = pos_ratio
self.pos_iou_thresh = pos_iou_thresh
self.neg_iou_thresh_hi = neg_iou_thresh_hi
self.neg_iou_thresh_lo = neg_iou_thresh_lo # NOTE:default 0.1 in py-faster-rcnn
def __call__(self, roi, bbox, label,
loc_normalize_mean=(0., 0., 0., 0.),
loc_normalize_std=(0.1, 0.1, 0.2, 0.2)):
"""Assigns ground truth to sampled proposals.
This function samples total of :obj:`self.n_sample` RoIs
from the combination of :obj:`roi` and :obj:`bbox`.
The RoIs are assigned with the ground truth class labels as well as
bounding box offsets and scales to match the ground truth bounding
boxes. As many as :obj:`pos_ratio * self.n_sample` RoIs are
sampled as foregrounds.
Offsets and scales of bounding boxes are calculated using
:func:`model.utils.bbox_tools.bbox2loc`.
Also, types of input arrays and output arrays are same.
Here are notations.
* :math:`S` is the total number of sampled RoIs, which equals \
:obj:`self.n_sample`.
* :math:`L` is number of object classes possibly including the \
background.
Args:
roi (array): Region of Interests (RoIs) from which we sample.
Its shape is :math:`(R, 4)`
bbox (array): The coordinates of ground truth bounding boxes.
Its shape is :math:`(R', 4)`.
label (array): Ground truth bounding box labels. Its shape
is :math:`(R',)`. Its range is :math:`[0, L - 1]`, where
:math:`L` is the number of foreground classes.
loc_normalize_mean (tuple of four floats): Mean values to normalize
coordinates of bouding boxes.
loc_normalize_std (tupler of four floats): Standard deviation of
the coordinates of bounding boxes.
Returns:
(array, array, array):
* **sample_roi**: Regions of interests that are sampled. \
Its shape is :math:`(S, 4)`.
* **gt_roi_loc**: Offsets and scales to match \
the sampled RoIs to the ground truth bounding boxes. \
Its shape is :math:`(S, 4)`.
* **gt_roi_label**: Labels assigned to sampled RoIs. Its shape is \
:math:`(S,)`. Its range is :math:`[0, L]`. The label with \
value 0 is the background.
"""
# the number of ground truth bounding boxes
n_bbox, _ = bbox.shape
# put everything together(axis = 0). roi and (GT) bounding boxes
roi = np.concatenate((roi, bbox), axis=0)
pos_roi_per_image = np.round(self.n_sample * self.pos_ratio)
iou = bbox_iou(roi, bbox)
gt_assignment = iou.argmax(axis=1)
max_iou = iou.max(axis=1)
# Offset range of classes from [0, n_fg_class - 1] to [1, n_fg_class].
# The label with value 0 is the background.
gt_roi_label = label[gt_assignment] + 1
# Select foreground RoIs as those with >= pos_iou_thresh IoU.
pos_index = np.where(max_iou >= self.pos_iou_thresh)[0]
pos_roi_per_this_image = int(min(pos_roi_per_image, pos_index.size))
if pos_index.size > 0:
pos_index = np.random.choice(
pos_index, size=pos_roi_per_this_image, replace=False)
# Select background RoIs as those within
# [neg_iou_thresh_lo, neg_iou_thresh_hi).
neg_index = np.where((max_iou < self.neg_iou_thresh_hi) &
(max_iou >= self.neg_iou_thresh_lo))[0]
neg_roi_per_this_image = self.n_sample - pos_roi_per_this_image
neg_roi_per_this_image = int(min(neg_roi_per_this_image,
neg_index.size))
if neg_index.size > 0:
neg_index = np.random.choice(
neg_index, size=neg_roi_per_this_image, replace=False)
# The indices that we're selecting (both positive and negative).
keep_index = np.append(pos_index, neg_index)
gt_roi_label = gt_roi_label[keep_index]
gt_roi_label[pos_roi_per_this_image:] = 0 # negative labels --> 0
sample_roi = roi[keep_index]
# Compute offsets and scales to match sampled RoIs to the GTs.
gt_roi_loc = bbox2loc(sample_roi, bbox[gt_assignment[keep_index]])
gt_roi_loc = ((gt_roi_loc - np.array(loc_normalize_mean, np.float32)
) / np.array(loc_normalize_std, np.float32))
return sample_roi, gt_roi_loc, gt_roi_label
'''
class ProposalCreator:
"""Proposal regions are generated by calling this object.
The :meth:`__call__` of this object outputs object detection proposals by
applying estimated bounding box offsets
to a set of anchors.
This class takes parameters to control number of bounding boxes to
pass to NMS and keep after NMS.
If the paramters are negative, it uses all the bounding boxes supplied
or keep all the bounding boxes returned by NMS.
This class is used for Region Proposal Networks introduced in
Faster R-CNN [#]_.
.. [#] <NAME>, <NAME>, <NAME>, <NAME>. \
Faster R-CNN: Towards Real-Time Object Detection with \
Region Proposal Networks. NIPS 2015.
Args:
nms_thresh (float): Threshold value used when calling NMS.
n_train_pre_nms (int): Number of top scored bounding boxes
to keep before passing to NMS in train mode.
n_train_post_nms (int): Number of top scored bounding boxes
to keep after passing to NMS in train mode.
n_test_pre_nms (int): Number of top scored bounding boxes
to keep before passing to NMS in test mode.
n_test_post_nms (int): Number of top scored bounding boxes
to keep after passing to NMS in test mode.
force_cpu_nms (bool): If this is :obj:`True`,
always use NMS in CPU mode. If :obj:`False`,
the NMS mode is selected based on the type of inputs.
min_size (int): A paramter to determine the threshold on
discarding bounding boxes based on their sizes.
"""
def __init__(self,
parent_model,
nms_thresh=0.5,
n_train_pre_nms=12000,
n_train_post_nms=300,
n_test_pre_nms=6000,
n_test_post_nms=300,
min_size=16
):
self.parent_model = parent_model # NOTE: check this how it looks
self.nms_thresh = nms_thresh
self.n_train_pre_nms = n_train_pre_nms
self.n_train_post_nms = n_train_post_nms
self.n_test_pre_nms = n_test_pre_nms
self.n_test_post_nms = n_test_post_nms
self.min_size = min_size
def __call__(self, loc, score,
anchor, img_size, scale=1.):
"""
input should be ndarray Propose RoIs.
Inputs :obj:`loc, score, anchor` refer to the same anchor when indexed
by the same index.
On notations, :math:`R` is the total number of anchors. This is equal
to product of the height and the width of an image and the number of
anchor bases per pixel.
Type of the output is same as the inputs.
Args:
loc (array): Predicted offsets and scaling to anchors.
Its shape is :math:`(R, 4)`.
score (array): Predicted foreground probability for anchors.
Its shape is :math:`(R,)`.
anchor (array): Coordinates of anchors. Its shape is
:math:`(R, 4)`.
img_size (tuple of ints): A tuple :obj:`height, width`,
which contains image size after scaling.
scale (float): The scaling factor used to scale an image after
reading it from a file.
Returns:
array:
An array of coordinates of proposal boxes.
Its shape is :math:`(S, 4)`. :math:`S` is less than
:obj:`self.n_test_post_nms` in test time and less than
:obj:`self.n_train_post_nms` in train time. :math:`S` depends on
the size of the predicted bounding boxes and the number of
bounding boxes discarded by NMS.
"""
# NOTE: when test, remember
# faster_rcnn.eval()
# to set self.traing = False
if self.parent_model.training:
n_pre_nms = self.n_train_pre_nms
n_post_nms = self.n_train_post_nms
else:
n_pre_nms = self.n_test_pre_nms
n_post_nms = self.n_test_post_nms
# Convert anchors into proposal via bbox transformations.
# roi = loc2bbox(anchor, loc)
# anchor: all the anchor bases at all pixels
# loc: all the location infos at all pixels
roi = loc2bbox(anchor, loc)
# Now roi has all the roi candidates!
# np.clip() clips predicted boxes to image.
# this truncates all the predicted boxes that exceeds the
# boundary of the original image.
#
# slice(start, end, step)
# clip y coordinates
roi[:, slice(0, 4, 2)] = np.clip(
roi[:, slice(0, 4, 2)], 0, img_size[0])
# clip x coordinates
roi[:, slice(1, 4, 2)] = np.clip(
roi[:, slice(1, 4, 2)], 0, img_size[1])
# Remove predicted boxes with either height or width < threshold.
min_size = self.min_size * scale
hs = roi[:, 2] - roi[:, 0]
ws = roi[:, 3] - roi[:, 1]
keep = np.where((hs >= min_size) & (ws >= min_size))[0]
roi = roi[keep, :]
score = score[keep]
# Sort all (proposal, score) pairs by score from highest to lowest.
# Take top pre_nms_topN (e.g. 6000).
# ravel() returns a contiguous array.
order = score.ravel().argsort()[::-1]
if n_pre_nms > 0:
order = order[:n_pre_nms]
roi = roi[order, :]
score = score[order]
# Apply nms (e.g. threshold = 0.7).
# Take after_nms_topN (e.g. 300).
# unNOTE: somthing is wrong here!
# TODO: remove cuda.to_gpu
keep = torchvision.ops.nms(torch.from_numpy(roi), torch.from_numpy(score), 0.6)
'''
keep = non_maximum_suppression(
np.ascontiguousarray(np.asarray(roi)),
threshold=0.5)
'''
# remember that keep is tuple: (indices, indices_num)
if n_post_nms > 0 and keep[1] > n_post_nms:
keep = keep[0][:n_post_nms]
roi = roi[keep, :]
return roi | StarcoderdataPython |
11251590 | <filename>test/imputation/cs/test_fast_knn.py
"""test_fast_knn.py"""
import unittest
import numpy as np
import impyute as impy
# pylint:disable=invalid-name
class TestFastKNN(unittest.TestCase):
""" Tests for Fast KNN """
def setUp(self):
"""
self.data_c: Complete dataset/No missing values
self.data_m: Incommplete dataset/Has missing values
"""
n = 100
self.data_c = np.random.normal(size=n*n).reshape((n, n))
self.data_m = self.data_c.copy()
for _ in range(int(n*0.3*n)):
self.data_m[np.random.randint(n)][np.random.randint(n)] = np.nan
def test_return_type(self):
""" Check return type, should return an np.ndarray"""
imputed = impy.fast_knn(self.data_m)
self.assertTrue(isinstance(imputed, np.ndarray))
def test_impute_missing_values(self):
""" After imputation, no NaN's should exist"""
imputed = impy.fast_knn(self.data_m)
self.assertFalse(np.isnan(imputed).any())
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4954178 | <reponame>lhuett/insights-core
from ...parsers import saphostctrl, ParseException, SkipException
from ...parsers.saphostctrl import SAPHostCtrlInstances
from ...tests import context_wrap
import doctest
import pytest
SAPHOSTCTRL_HOSTINSTANCES_DOCS = '''
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D89
SystemNumber , String , 88
InstanceName , String , HDB88
Hostname , String , hdb88
FullQualifiedHostname , String , hdb88.example.com
IPAddress , String , 10.0.0.88
SapVersionInfo , String , 749, patch 211, changelist 1754007
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D90
SystemNumber , String , 90
InstanceName , String , HDB90
Hostname , String , hdb90
FullQualifiedHostname , String , hdb90.example.com
IPAddress , String , 10.0.0.90
SapVersionInfo , String , 749, patch 211, changelist 1754007
*********************************************************
'''
SAPHOSTCTRL_HOSTINSTANCES_GOOD = '''
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D89
SystemNumber , String , 88
InstanceName , String , HDB88
Hostname , String , li-ld-1810
FullQualifiedHostname , String , li-ld-1810.example.com
IPAddress , String , 10.0.0.1
SapVersionInfo , String , 749, patch 211, changelist 1754007
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D90
SystemNumber , String , 90
InstanceName , String , HDB90
Hostname , String , li-ld-1810
FullQualifiedHostname , String , li-ld-1810.example.com
IPAddress , String , 10.0.0.1
SapVersionInfo , String , 749, patch 211, changelist 1754007
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D79
SystemNumber , String , 08
InstanceName , String , ERS08
Hostname , String , d79ers
FullQualifiedHostname , String , d79ers.example.com
IPAddress , String , 10.0.0.15
SapVersionInfo , String , 749, patch 301, changelist 1779613
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D79
SystemNumber , String , 07
InstanceName , String , ASCS07
Hostname , String , d79ascs
FullQualifiedHostname , String , d79ascs.example.com
IPAddress , String , 10.0.0.14
SapVersionInfo , String , 749, patch 301, changelist 1779613
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D79
SystemNumber , String , 09
InstanceName , String , DVEBMGS09
Hostname , String , d79pas
FullQualifiedHostname , String , d79pas.example.com
IPAddress , String , 10.0.0.16
SapVersionInfo , String , 749, patch 301, changelist 1779613
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D80
SystemNumber , String , 10
InstanceName , String , SCS10
Hostname , String , d80scs
FullQualifiedHostname , String , d80scs.example.com
IPAddress , String , 10.0.0.17
SapVersionInfo , String , 749, patch 301, changelist 1779613
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D62
SystemNumber , String , 62
InstanceName , String , HDB62
Hostname , String , d62dbsrv
FullQualifiedHostname , String , li-ld-1810.example.com
IPAddress , String , 10.0.1.12
SapVersionInfo , String , 749, patch 211, changelist 1754007
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D52
SystemNumber , String , 52
InstanceName , String , ASCS52
Hostname , String , d52ascs
FullQualifiedHostname , String , d52ascs.example.com
IPAddress , String , 10.0.0.20
SapVersionInfo , String , 749, patch 401, changelist 1806777
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D52
SystemNumber , String , 54
InstanceName , String , D54
Hostname , String , d52pas
FullQualifiedHostname , String , d52pas.example.com
IPAddress , String , 10.0.0.22
SapVersionInfo , String , 749, patch 401, changelist 1806777
*********************************************************
CreationClassName , String , SAPInstance
SID , String , SMA
SystemNumber , String , 91
InstanceName , String , SMDA91
Hostname , String , li-ld-1810
FullQualifiedHostname , String , li-ld-1810.example.com
IPAddress , String , 10.0.0.1
SapVersionInfo , String , 749, patch 200, changelist 1746260
'''
SAPHOSTCTRL_HOSTINSTANCES_BAD = '''
CreationClassName , String
SID , String , D89
SystemNumber , String , 88
'''
SAPHOSTCTRL_HOSTINSTANCES_BAD1 = '''
CreationClassName , String , SAPInstance
SID , String , D89
SystemNumber , String , 88
'''
def test_saphostctrl_docs():
globs = {
'sap_inst': SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_DOCS))
}
failed, total = doctest.testmod(saphostctrl, globs=globs)
assert failed == 0
def test_saphostctrl():
sap = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_GOOD))
assert len(sap) == 10
assert sap.data[-2]['SapVersionInfo'] == '749, patch 401, changelist 1806777'
assert sorted(sap.instances) == sorted([
'HDB88', 'HDB90', 'ERS08', 'ASCS07', 'DVEBMGS09', 'SCS10', 'HDB62',
'ASCS52', 'D54', 'SMDA91'
])
for sid in ['D89', 'D90', 'D79', 'D80', 'D62', 'D52', 'SMA']:
assert sid in sap.sids
assert sorted(sap.types) == sorted([
'HDB', 'ERS', 'ASCS', 'DVEBMGS', 'SCS', 'D', 'SMDA'
])
def test_saphostctrl_bad():
with pytest.raises(ParseException) as pe:
SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_BAD))
assert "Incorrect line: 'CreationClassName , String'" in str(pe)
with pytest.raises(SkipException) as pe:
SAPHostCtrlInstances(context_wrap(''))
assert "Empty content" in str(pe)
with pytest.raises(ParseException) as pe:
SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_BAD1))
assert "Missing:" in str(pe)
| StarcoderdataPython |
5057597 | # this module has all string related algorithm
def is_permutation(string1, string2):
""" check if string1 is permutation of string2 """
return len(string1) == len(string2) and sorted(string1) == sorted(string2)
| StarcoderdataPython |
3266773 | <gh_stars>1-10
import json
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from blitz_api.factories import UserFactory, AdminFactory
from blitz_api.services import remove_translation_fields
from ..models import Workplace
User = get_user_model()
class WorkplaceTests(APITestCase):
@classmethod
def setUpClass(cls):
super(WorkplaceTests, cls).setUpClass()
cls.client = APIClient()
cls.user = UserFactory()
cls.admin = AdminFactory()
def setUp(self):
self.workplace = Workplace.objects.create(
name="Blitz",
seats=40,
details="short_description",
address_line1="random_address_1",
postal_code="RAN_DOM",
city='random_city',
state_province="Random_State",
country="Random_Country",
timezone="America/Montreal",
)
def test_create(self):
"""
Ensure we can create a workplace if user has permission.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "random_workplace",
'seats': 40,
'details': "short_description",
'address_line1': 'random_address_1',
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'timezone': "America/Montreal",
'volunteers': [f"http://testserver/users/{self.user.id}"],
}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
response_content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED,
response.content)
content = {
'details': 'short_description',
'address_line1': 'random_address_1',
'address_line2': None,
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'latitude': None,
'longitude': None,
'name': 'random_workplace',
'pictures': [],
'seats': 40,
'timezone': "America/Montreal",
'place_name': '',
'volunteers': [
f'http://testserver/users/{self.user.id}'
],
}
del response_content['id']
del response_content['url']
self.assertEqual(
remove_translation_fields(response_content),
content
)
def test_create_without_permission(self):
"""
Ensure we can't create a workplace if user has no permission.
"""
self.client.force_authenticate(user=self.user)
data = {
'name': "random_workplace",
'seats': 40,
'details': "short_description",
'address_line1': 'random_address_1',
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'timezone': "America/Montreal"
}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
content = {
'detail': 'You do not have permission to perform this action.'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_duplicate_name(self):
"""
Ensure we can't create a workplace with same name.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "Blitz",
'seats': 40,
'details': "short_description",
'address_line1': 'random_address_1',
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'timezone': "America/Montreal"
}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
content = {'name': ['This field must be unique.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_missing_field(self):
"""
Ensure we can't create a workplace when required field are missing.
"""
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
content = {
'details': ['This field is required.'],
'address_line1': ['This field is required.'],
'city': ['This field is required.'],
'country': ['This field is required.'],
'name': ['This field is required.'],
'postal_code': ['This field is required.'],
'seats': ['This field is required.'],
'state_province': ['This field is required.'],
'timezone': ['This field is required.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_invalid_field(self):
"""
Ensure we can't create a workplace with invalid fields.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': ("invalid",),
'seats': "invalid",
'details': ("invalid",),
'postal_code': (1,),
'city': (1,),
'address_line1': (1,),
'country': (1,),
'state_province': (1,),
'timezone': ("invalid",),
'place_name': (1,),
'volunteers': (1,),
}
response = self.client.post(
reverse('workplace-list'),
data,
format='json',
)
content = {
'details': ['Not a valid string.'],
'name': ['Not a valid string.'],
'city': ['Not a valid string.'],
'address_line1': ['Not a valid string.'],
'postal_code': ['Not a valid string.'],
'state_province': ['Not a valid string.'],
'country': ['Not a valid string.'],
'seats': ['A valid integer is required.'],
'timezone': ['Unknown timezone'],
'place_name': ['Not a valid string.'],
'volunteers': [
'Incorrect type. Expected URL string, received int.'
],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update(self):
"""
Ensure we can update a workplace.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "new_workplace",
'seats': 200,
'details': "new_short_description",
'address_line1': 'new_address',
'city': 'new_city',
'country': 'Random_Country',
'postal_code': 'NEW_CIT',
'state_province': 'Random_State',
'timezone': "America/Montreal",
}
response = self.client.put(
reverse(
'workplace-detail',
kwargs={'pk': self.workplace.id},
),
data,
format='json',
)
content = {
'details': 'new_short_description',
'id': self.workplace.id,
'longitude': None,
'latitude': None,
'address_line1': 'new_address',
'address_line2': None,
'city': 'new_city',
'country': 'Random_Country',
'postal_code': 'NEW_CIT',
'state_province': 'Random_State',
'name': 'new_workplace',
'pictures': [],
'seats': 200,
'timezone': 'America/Montreal',
'place_name': '',
'volunteers': [],
'url': f'http://testserver/workplaces/{self.workplace.id}'
}
self.assertEqual(
remove_translation_fields(json.loads(response.content)),
content
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete(self):
"""
Ensure we can delete a workplace.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'workplace-detail',
kwargs={'pk': self.workplace.id},
),
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_list(self):
"""
Ensure we can list workplaces as an unauthenticated user.
"""
response = self.client.get(
reverse('workplace-list'),
format='json',
)
content = {
'count': 1,
'next': None,
'previous': None,
'results': [{
'details': 'short_description',
'id': self.workplace.id,
'latitude': None,
'longitude': None,
'address_line1': 'random_address_1',
'address_line2': None,
'city': 'random_city',
'country': 'Random_Country',
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'name': 'Blitz',
'pictures': [],
'seats': 40,
'timezone': 'America/Montreal',
'place_name': '',
'volunteers': [],
'url': f'http://testserver/workplaces/{self.workplace.id}'
}]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read(self):
"""
Ensure we can read a workplace as an unauthenticated user.
"""
response = self.client.get(
reverse(
'workplace-detail',
kwargs={'pk': self.workplace.id},
),
)
content = {
'details': 'short_description',
'id': self.workplace.id,
'address_line1': 'random_address_1',
'address_line2': None,
'city': 'random_city',
'country': 'Random_Country',
'longitude': None,
'latitude': None,
'postal_code': 'RAN_DOM',
'state_province': 'Random_State',
'name': 'Blitz',
'pictures': [],
'seats': 40,
'place_name': '',
'timezone': 'America/Montreal',
'volunteers': [],
'url': f'http://testserver/workplaces/{self.workplace.id}'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_non_existent_workplace(self):
"""
Ensure we get not found when asking for a workplace that doesn't exist.
"""
response = self.client.get(
reverse(
'workplace-detail',
kwargs={'pk': 999},
),
)
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| StarcoderdataPython |
9610784 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from scrapy.exporters import CsvItemExporter
from nlu_link_collector.items import NluLinkCollectorItem
class NluLinkCollectorPipeline:
def open_spider(self, spider):
self.file = open('{0}_links.csv'.format(spider.name),
'w+b')
self.exporter = CsvItemExporter(self.file)
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
if isinstance(item, NluLinkCollectorItem):
self.exporter.export_item(item)
return item
| StarcoderdataPython |
4995500 | import numpy as np
import sys
from random import randint
from copy import copy
from numpy import matrix
from seidel_algo import seidel_algo
def floyd_warshall_algo(input_graph):
graph = copy(input_graph)
n = graph.shape[0]
inf = 10 ** 9 + 7
for i in range(n):
for j in range(n):
if i != j and graph[i, j] == 0:
graph[i, j] = inf
for k in range(n):
for i in range(n):
for j in range(n):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
return graph
def have_full_chain(used, n):
for i in range(n):
if i not in used:
return False
return True
def gen_test_case(n):
graph_for_seidel = np.zeros((n, n), int)
graph_for_floyd_warshall = np.zeros((n, n), int)
inf = 10 ** 9 + 7
graph_for_floyd_warshall.fill(inf)
chain = []
used = set()
while not have_full_chain(used, n):
vert = randint(0, n - 1)
if len(chain) == 0 or vert != chain[-1]:
chain.append(vert)
used.add(vert)
from_vert_id = chain[0]
for to_vert_id in chain[1::]:
graph_for_seidel[from_vert_id, to_vert_id] = 1
graph_for_seidel[to_vert_id, from_vert_id] = 1
graph_for_floyd_warshall[from_vert_id, to_vert_id] = 1
graph_for_floyd_warshall[to_vert_id, from_vert_id] = 1
from_vert_id = to_vert_id
print(chain)
return graph_for_seidel, graph_for_floyd_warshall
class Tests:
def test_1(self):
graph = matrix("""
0 1 0 0;
0 0 1 0;
0 0 0 1;
1 0 0 0
""")
expected = matrix("""
0 1 2 3;
3 0 1 2;
2 3 0 1;
1 2 3 0
""")
assert np.all(expected == seidel_algo(graph))
def test_2(self):
graph = matrix("""
0 0 1 0 0;
0 0 1 0 1;
1 1 0 0 1;
0 0 0 0 1;
0 1 1 1 0
""")
expected = matrix("""
0 2 1 3 2;
2 0 1 2 1;
1 1 0 2 1;
3 2 2 0 1;
2 1 1 1 0
""")
assert np.all(expected == seidel_algo(graph))
def test_3(self):
graph = matrix("""
0 1 0 0 0 0 0;
1 0 1 0 0 0 0;
0 1 0 1 0 0 0;
0 0 1 0 1 0 0;
0 0 0 1 0 1 0;
0 0 0 0 1 0 1;
0 0 0 0 0 1 0
""")
expected = matrix("""
0 1 2 3 4 5 6;
1 0 1 2 3 4 5;
2 1 0 1 2 3 4;
3 2 1 0 1 2 3;
4 3 2 1 0 1 2;
5 4 3 2 1 0 1;
6 5 4 3 2 1 0
""")
assert np.all(seidel_algo(graph) == expected)
def run_unit_test(self):
self.test_1()
self.test_2()
self.test_3()
def unit_test():
Tests().run_unit_test()
print("UNIT TEST: OK")
def stress_test(test_cases=1, n_mn=2, n_mx=3):
sys.setrecursionlimit(5000)
for test_case in range(test_cases):
n = randint(n_mn, n_mx)
graph_for_seidel, graph_for_floyd_warshall = gen_test_case(n)
assert np.all(seidel_algo(graph_for_seidel) == floyd_warshall_algo(graph_for_floyd_warshall))
| StarcoderdataPython |
1865004 | """
loadColorTables.py
Purpose: Utility script that loads GEMPAK color table files and converts them to HootPy fconfig files.
Started: <NAME> on March 5, 2012
"""
def main():
import argparse
parser = argparse.ArgumentParser(prog='loadColorTables',description='Load a GEMPAK or matplotlib color table and convert it into an fconfig file.')
parser.add_argument('cname',metavar='COLORNAME',
help='Matplotlib colormap name or GEMPAK colortable file name')
parser.add_argument('--type',choices=['M','G'],default='M',
help='Specify whether colortable is from Matplotlib (M) or GEMPAK (G).')
parser.add_argument('-s','--start',type=float,required=True,
help='Starting value of interval')
parser.add_argument('-e','--end',type=float,required=True,
help='Ending value of interval')
parser.add_argument('-i','--interval',type=float,default=0,
help='Standard interval between contours. Do not include if you wish plot a specified number of contours')
parser.add_argument('-n','--ncontours',type=int,default=0,
help='Number of contours. Do not include if you want to have a standard interval.')
parser.add_argument('-l','--sample',action='store_true',
help='Sample colors evenly across the map.')
parser.add_argument('-o','--offset',type=int,default=0,
help='Colormap offset value. Use if you want your color range to start at a different value')
parser.add_argument('-x','--extend',choices=['both','max','min','neither'],default='both',
help='Specify whether you want to extend the colormap past the max, min, or both extrema')
parser.add_argument('-w','--wrap',action='store_true',
help='If specified, the colortable will wrap around to the beginning+offset')
parser.add_argument('-u','--unit',default='',
help='Units of contours')
parser.add_argument('-f','--file',default=None,
help='Output file name')
parser.add_argument('-r','--reverse',action='store_true',
help='Reverse colormap')
args = parser.parse_args()
fill = makeFConfigFill(args.cname,args.type,args.start,args.end,interval=args.interval,num_contours=args.ncontours,offset=args.offset,extend=args.extend,wrap=args.wrap,reverse=args.reverse,sample=args.sample)
if args.file is not None:
writeFConfigFile(args.file,args.unit,fill)
else:
for f in fill:
print f
def loadGempakColorTable(filename):
"""loadGempakColorTable
Purpose: Read a GEMPAK colortable file and convert the colors to a list of hex strings
Parameters: filename
Returns: list of hex colors
"""
import struct
ct_file = open(filename)
color_tuples = []
hex_colors = []
for line in ct_file:
if '!' in line[0]:
continue
else:
color_line = line.split()
color_start = [x.isdigit() for x in color_line].index(True)
color_tuples.append(tuple([int(x) for x in color_line[color_start:color_start+3]]))
hex_colors.append('#' + struct.pack('BBB',*color_tuples[-1]).encode('hex'))
return hex_colors
def loadMPColorTable(cname):
import struct
from matplotlib.colors import rgb2hex
from pylab import get_cmap
cmap = get_cmap(cname)
if cmap is None:
print 'Error: colormap %s does not exist' % cname
exit()
c_range = range(0,256)
hex_colors = []
for c in c_range:
hex_colors.append(rgb2hex(cmap(c)))
return hex_colors
def makeFConfigFill(cname,type,start,end,interval=0,num_contours=0,offset=0,extend='both',wrap=True,reverse=False,sample=False):
"""makeFConfigFill()
Purpose: Load the color table file and convert it into the HootPy fill list
Parameters:
cname: Either GEMPAK colortable file name or matplotlib colortable name
type: 'G' for GEMPAK color table or 'M' for matplotlib color table
start: first fill value
end: last fill value
interval: if > 0, then the distance between values will be specified by the interval. Otherwise
if < 1, then the values between start and end will be evenly spaced over the length of
the colortable.
extend: (Default 'both'): Can be 'both','min','max','neither'. Specifies whether the beginning and end colors
should be used for all values outside the specified range or if they should be ignored.
wrap: (Default True): If True, then the color table is reused from the beginning if more values are required.
Returns: fill - a list of fill interval dictionaries describing the color and the upper and lower limits of its range.
"""
import numpy as np
fill = []
if type.upper()=='G':
hex_colors = loadGempakColorTable(cname)
elif type.upper()=='M':
hex_colors = loadMPColorTable(cname)
else:
print 'Error: Improper type specified'
exit()
print hex_colors
if reverse:
hex_colors.reverse()
if interval > 0:
values = np.arange(start,end+interval,interval)
elif num_contours > 0:
values = np.linspace(start,end,num_contours)
else:
values = np.linspace(start,end,len(hex_colors))
c = offset
if sample:
c_interval = len(hex_colors) / len(values)
else:
c_interval = 1
for i,v in enumerate(values):
if (extend=='both' or extend=='min') and i==0:
fill.append({'lower_bound':None,'upper_bound':np.round(values[i],2),'color':hex_colors[c]})
c+= c_interval
elif (extend=='both' or extend=='max') and i==len(values) - 1:
fill.append({'lower_bound':np.round(values[i],2),'upper_bound':None,'color':hex_colors[c]})
if i < len(values) - 1:
fill.append({'lower_bound':np.round(values[i],2),'upper_bound':np.round(values[i+1],2),'color':hex_colors[c]})
if c >= 0 and c < len(hex_colors):
c+= c_interval
if wrap and c == len(hex_colors):
c = 0
elif not wrap and c == len(hex_colors):
c = -1
return fill
def writeFConfigFile(config_filename,unit,fill):
fconfig_file = open(config_filename,'w')
fconfig_file.write('#Auto-generated fconfig file\n')
unit_str = 'units = "%s"\n' % unit
fconfig_file.write(unit_str)
fconfig_file.write('\n')
fconfig_file.write('fill = [\n')
for row in fill[:-1]:
fconfig_file.write('\t' + str(row) + ',\n')
fconfig_file.write('\t' + str(fill[-1]) + '\n')
fconfig_file.write(']\n')
fconfig_file.close()
def testMain():
ct_filename = '/usr/local/nawips/gempak/tables/luts/ir_drgb.tbl'
hex_colors = loadGempakColorTable(ct_filename)
fill = makeFConfigFill(ct_filename,50,400,10)
fill2 = makeFConfigFill(ct_filename,50,400)
writeFConfigFile('../config/ir_sat.fconfig','W m-2',fill2)
for f in fill2:
print f
print hex_colors
ct_filename2 = '/usr/local/nawips/gempak/tables/colors/coltbl.xwp'
hex_colors2 = loadGempakColorTable(ct_filename2)
print hex_colors2
if __name__=="__main__":
main()
| StarcoderdataPython |
8116976 | <reponame>sorinsuciu-msft/openai-python
#!/usr/bin/env python
import argparse
import logging
import sys
import openai
from openai.cli import api_register, display_error, tools_register, wandb_register
logger = logging.getLogger()
formatter = logging.Formatter("[%(asctime)s] %(message)s")
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbosity",
default=0,
help="Set verbosity.",
)
parser.add_argument("-b", "--api-base", help="What API base url to use.")
parser.add_argument("-k", "--api-key", help="What API key to use.")
parser.add_argument(
"-o",
"--organization",
help="Which organization to run as (will use your default organization if not specified)",
)
def help(args):
parser.print_help()
parser.set_defaults(func=help)
subparsers = parser.add_subparsers()
sub_api = subparsers.add_parser("api", help="Direct API calls")
sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience")
sub_wandb = subparsers.add_parser("wandb", help="Logging with Weights & Biases")
api_register(sub_api)
tools_register(sub_tools)
wandb_register(sub_wandb)
args = parser.parse_args()
if args.verbosity == 1:
logger.setLevel(logging.INFO)
elif args.verbosity >= 2:
logger.setLevel(logging.DEBUG)
openai.debug = True
if args.api_key is not None:
openai.api_key = args.api_key
if args.api_base is not None:
openai.api_base = args.api_base
if args.organization is not None:
openai.organization = args.organization
try:
args.func(args)
except openai.error.OpenAIError as e:
display_error(e)
return 1
except KeyboardInterrupt:
sys.stderr.write("\n")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1679275 | from .base_model import BaseModel
class QuestionModel(BaseModel):
""" Question Model."""
table = 'questions'
def save(self, data):
""" Save a new question."""
query = "INSERT INTO {} (title, body, meetup_id, user_id) \
VALUES('{}','{}','{}', '{}') RETURNING *".format(self.table, data['title'], data['body'], data['meetup_id'], data['user_id'])
self.cur.execute(query)
result = self.cur.fetchone()
self.conn.commit()
return result
def upvote(self, question_id):
""" Upvote a question."""
question = self.where('id', question_id)
votes = question['votes'] + 1
query = "UPDATE {} SET votes = '{}' WHERE id = '{}' RETURNING *".format(self.table, votes, question_id)
self.cur.execute(query)
self.conn.commit()
return self.cur.fetchone()
def downvote(self, question_id):
""" Downvote a question."""
question = self.where('id', question_id)
votes = question['votes'] - 1
query = "UPDATE {} SET votes = '{}' WHERE id = '{}' RETURNING *".format(self.table, votes, question_id)
self.cur.execute(query)
self.conn.commit()
return self.cur.fetchone()
def exist(self, key, value):
""" check whether it exists."""
query = "SELECT * FROM {} WHERE {} = '{}'".format(self.table, key, value)
self.cur.execute(query)
result = self.cur.fetchall()
return len(result) > 0
def where(self, key, value):
query = "SELECT * FROM {} WHERE {} = '{}'".format(self.table, key, value)
self.cur.execute(query)
result = self.cur.fetchone()
return result
def getOne(self, id):
question = self.where('id', id)
return question
def getAll(self, id):
""" get all questions for a meetup."""
query = "SELECT * FROM {} WHERE meetup_id = {}".format(self.table, id)
self.cur.execute(query)
result = self.cur.fetchall()
return result
def delete(self, id):
""" delete a question."""
query = "DELETE FROM {} WHERE question_id = {}".format(self.table, id)
self.cur.execute(query)
self.conn.commit()
return True
| StarcoderdataPython |
11370647 | <filename>src/Genome/sequence/EulerPath.py<gh_stars>0
class Node():
def __init__(self, name):
self.name = name
self.ins = []
self.outs = []
class EulerPath():
def __init__(self, adj):
self.graph = {}
for src,destlist in adj.items():
srcnode = self.getNode(src)
for dest in destlist:
destnode = self.getNode(dest)
srcnode.outs.append(destnode)
destnode.ins.append(srcnode)
def findFirstNode(self):
for _,curnode in self.graph.items():
if len(curnode.outs) > len(curnode.ins):
return curnode
return curnode
def getNode(self, name):
if name in self.graph:
node = self.graph[name]
else:
node = Node(name)
self.graph[name] = node
return node
def eulerPath(self):
activenodes = []
circuit = []
curnode = self.findFirstNode()
while True:
while len(curnode.outs) > 0:
activenodes.append(curnode)
curnode = curnode.outs.pop()
circuit.append(curnode.name)
if len(activenodes) == 1:
circuit.append(activenodes.pop().name)
break
elif len(activenodes) == 0:
break
curnode = activenodes.pop()
return circuit[::-1]
| StarcoderdataPython |
1860313 | <gh_stars>1-10
# Generated by Django 3.0.3 on 2020-02-21 14:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0013_auto_20190918_1438'),
]
operations = [
migrations.AddField(
model_name='registrant',
name='type',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| StarcoderdataPython |
4946828 | __author__ = "<NAME>"
import json
import urllib.request
if __name__ == "__main__":
req = urllib.request.Request(
"https://raw.githubusercontent.com/github/gemoji/master/db/emoji.json")
with urllib.request.urlopen(req) as response:
js = json.loads(response.read())
for i in js[:10]:
print(i['emoji'], i['aliases'][0])
| StarcoderdataPython |
135437 | import re
from recipes.site_listers.base import TwoLevelSitemapLister
class TheHappyFoodieLister(TwoLevelSitemapLister):
""" """
start_url = "https://thehappyfoodie.co.uk/sitemap_index.xml"
sitemap_path_regex = re.compile(r"^/recipes-sitemap\d+\.xml$")
recipes_path_regex = re.compile(r"^/recipes/.+$")
| StarcoderdataPython |
1783752 | <reponame>nabin-info/hackerrank.com<filename>project_euler/n009.py<gh_stars>0
#!/usr/bin/python
import sys
def input_arg(tr=str): return tr(raw_input().strip())
def input_args(tr=str): return map(tr, list(input_arg().split(' ')))
def input_arglines(n,tr=str): return [input_arg(tr) for x in range(n)]
def cktri(n):
g = -1
a,b,c = 3,4,(n-7)
while a < b and b < c:
if a**2 + b**2 == c**2:
p = a*b*c
if p > g:
g = p
a += 1
b = (a**2 - (a - n)**2)/(2*(a - n))
c = n - b - a
return g
T = input_arg(int)
N = input_arglines(T,int)
for n in N:
print cktri(n)
| StarcoderdataPython |
172786 | from src.params.ParamsPING import *
from src.izhikevich_simulation.IzhikevichNetworkOutcome import *
from src.params.ParamsFrequencies import *
import numpy as np
from math import floor, pi
from scipy import fft
import matplotlib.pyplot as plt
from collections import Counter
from tqdm import tqdm
import warnings
class SpikingFrequencyComputer:
"""
TODO:: docs
"""
def compute_for_all_pings(
self, simulation_outcome: IzhikevichNetworkOutcome, params_freqs: ParamsFrequencies
) -> list[int]:
frequencies = []
for ping_network in (pbar := tqdm(simulation_outcome.grid_geometry.ping_networks)):
pbar.set_description("Frequency distribution per PING")
# select ex neurons for a single ping network from spikes
spikes_in_ping_mask = np.isin(
np.array(simulation_outcome.spikes).T[1], ping_network.ids[NeuronTypes.EX]
)
# times when excitatory neurons fired
spikes_times_in_ping = np.array(simulation_outcome.spikes)[spikes_in_ping_mask].T[0]
spikes_ex_per_times = [
np.count_nonzero(spikes_times_in_ping == t) for t in range(simulation_outcome.simulation_time)
]
signal = np.array(spikes_ex_per_times[299:])
frequency = self.tfr_single_ping(
signal=signal,
simulation_time=simulation_outcome.simulation_time,
params_freqs=params_freqs
)
frequencies.append(frequency)
return frequencies
def plot_ping_frequencies(self, frequencies):
# TODO:: make pretty
print("Plotting current-frequency.....", end="")
path = "../plots/test-freq-in-pings.png"
fig, ax = plt.subplots(figsize=(30, 30))
ax.tick_params(axis='both', which='major', labelsize=50)
plt.hist(frequencies, color="#ACDDE7", rwidth=0.7)
fig.savefig(path, bbox_inches='tight')
print(end="\r", flush=True)
print(f"Plotting ended, result: {path[3:]}")
def fft_single_ping(
self, signal: np.ndarray[int, int], params_freqs: ParamsFrequencies
) -> int:
"""
TODO
:param signal:
:param simulation_time:
:param params_freqs:
:return:
"""
fft_data = fft.fft(signal)
freqs = fft.fftfreq(len(signal), d=1 / 1000)
gamma_indices = np.argwhere(
(freqs >= params_freqs.frequencies[0]) &
(freqs <= params_freqs.frequencies[-1])
).flatten()
max_i = np.argmax(np.abs(fft_data[gamma_indices]))
freq_max = freqs[gamma_indices][max_i]
freq_max_abs = np.abs(freq_max)
return np.abs(freq_max_abs)
def tfr_single_ping(
self, signal: np.ndarray[int, int], simulation_time: int, params_freqs: ParamsFrequencies
) -> int:
"""
TODO:: Determines most prominent frequency??
:param simulation_time: number of epochs to run the simulation.
:type simulation_time: int
:param signal: number of excitatory neurons fired at relevant epochs of the simulation.
:type signal: list[int]
:return: TODO:: most prominent frequency?
:rtype: int
"""
t = [i / 0.001 for i in range(1, simulation_time+1)]
t = t[298:]
# the size of the data + zero padding
nr_points = len(params_freqs.wt) + len(signal) - 1
fft_data = fft.fft(signal, nr_points)
tfr = np.zeros((len(params_freqs.frequencies), len(t)), dtype="complex_") * np.nan
for fi in range(len(params_freqs.frequencies)):
fft_wavelet = fft.fft(params_freqs.complex_wavelets[fi], nr_points)
fft_wavelet = fft_wavelet / max(fft_wavelet)
tmp = fft.ifft(fft_wavelet * fft_data, nr_points)
# trim the edges, these are the bits we included by zero padding
tfr[
np.argwhere(np.array(params_freqs.frequencies) == params_freqs.frequencies[fi]).flatten(), :
] = tmp[params_freqs.half_wave_size: -params_freqs.half_wave_size + 1]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
mx_i = int(np.argmax(np.nanmean(np.abs(tfr), 1)))
return params_freqs.frequencies[mx_i]
| StarcoderdataPython |
6514764 | <filename>src/ipaparser/_code/definitions/brackets.py
from enum import Enum
__all__ = [
'BracketStrategy',
]
class BracketStrategy(str, Enum):
KEEP = 'keep'
EXPAND = 'expand'
STRIP = 'strip'
| StarcoderdataPython |
4982138 | <gh_stars>0
from dataclasses import dataclass
from greenberry.types.blockchain_format.sized_bytes import bytes32
from greenberry.util.ints import uint32
from greenberry.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class PoolTarget(Streamable):
puzzle_hash: bytes32
max_height: uint32 # A max height of 0 means it is valid forever
| StarcoderdataPython |
5170283 | <filename>myCSV.py
import numpy as np
import csv
class myCSV():
def open(name, ct=-1, hasHead=True, delimiter=','):
idx, data, dic, c = [], [], {}, 0
with open(name, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=delimiter, quotechar='|')
if ct < 0:
ct = len(spamreader)
for row in spamreader:
if c == 0 and hasHead == True:
idx = row
c -= 1
else:
data.append(row)
c+=1
if c >= ct:
break
if hasHead == True:
for i in range(len(idx)):
dic[idx[i]] = i
return dic, data
def select(dic, data, val, typTrans):
t = []
for i in data:
tt = []
for j in range(len(val)):
v = i[dic[val[j]]]
v = typTrans(v)
tt.append(v)
t.append(tt)
return t | StarcoderdataPython |
224849 | # vim: fdm=marker
'''
author: <NAME>
date: 22/05/14
content: Build a coordinate map of the initial reference of a patient to an
external reference seq (e.g. HXB2). This is useful to quickly find
genes and stuff like that.
'''
# Modules
import os
import argparse
from operator import attrgetter
from collections import defaultdict
import numpy as np
from Bio import SeqIO
from hivwholeseq.utils.sequence import find_fragment
from hivwholeseq.reference import load_custom_reference
from hivwholeseq.patients.patients import load_patients, Patient
from hivwholeseq.patients.filenames import get_initial_reference_filename, \
get_foldername, get_coordinate_map_filename
# Function
def build_coordinate_map(refseq, patseq, VERBOSE=0, score_gapopen=-20, **kwargs):
'''Build the coordinate map
Parameters
**kwargs: passed to alignment function (e.g. alignment penalties)
'''
from seqanpy import align_global
(score, ali1, ali2) = align_global(refseq, patseq, score_gapopen=score_gapopen,
**kwargs)
patseq_start = len(ali2) - len(ali2.lstrip('-'))
patseq_end = len(ali2.rstrip('-'))
if VERBOSE >= 3:
from hivwholeseq.utils.sequence import pretty_print_pairwise_ali
pretty_print_pairwise_ali([ali1[patseq_start: patseq_end],
ali2[patseq_start: patseq_end]],
name1=refseq.name, name2=patseq.name)
# Bijective map
mapbi = []
pos_ref = patseq_start
pos_ini = 0
for col in xrange(patseq_start, patseq_end):
nuc_ref = ali1[col]
nuc_ini = ali2[col]
if (nuc_ref != '-') and (nuc_ini != '-'):
mapbi.append((pos_ref, pos_ini))
pos_ref += 1
pos_ini += 1
elif (nuc_ref != '-'):
pos_ref += 1
elif (nuc_ini != '-'):
pos_ini += 1
return mapbi
def shift_mapco(mapco, refname, region):
'''Shift coordinate map to the beginning of the reference sequence'''
from hivwholeseq.reference import load_custom_reference
refseq = load_custom_reference(refname, format='gb')
for feature in refseq.features:
if feature.id == region:
startref = feature.location.nofuzzy_start
mapco[:, 0] += startref
break
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Map patient coordinates to reference',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--reference', default='HXB2',
help='Select reference strain to align against')
parser.add_argument('--patients', nargs='+',
help='Patient to analyze')
parser.add_argument('--regions', nargs='+',
help='regions to make coordinate maps for (e.g. V3 F6)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-3]')
parser.add_argument('--save', action='store_true',
help='Save the map to file')
args = parser.parse_args()
refname = args.reference
pnames = args.patients
regions = args.regions
VERBOSE = args.verbose
save_to_file = args.save
patients = load_patients()
if pnames is not None:
patients = patients.loc[pnames]
if VERBOSE >= 3:
print 'patients', patients.index
if not len(patients):
raise ValueError('No patients found!')
maps_coord = defaultdict(dict)
for pname, patient in patients.iterrows():
patient = Patient(patient)
# Make maps for all annotations if not explicit
if regions is None:
patseqann = patient.get_reference('genomewide', format='gb')
regionspat = map(attrgetter('id'), patseqann.features) + ['genomewide']
else:
regionspat = regions
for region in regionspat:
if VERBOSE >= 1:
print pname, region
refseq = load_custom_reference(refname, format='gb', region=region)
patseq = patient.get_reference(region)
mapco = build_coordinate_map(refseq, patseq, VERBOSE=VERBOSE)
mapco = np.array(mapco, int)
shift_mapco(mapco, refname, region)
maps_coord[(region, pname)] = mapco
if save_to_file:
out_fn = get_coordinate_map_filename(pname, region, refname=refname)
np.savetxt(out_fn, mapco, fmt='%d',
delimiter='\t',
header=refname+'\t'+pname+'_'+region)
if VERBOSE:
print 'Saved to file:', pname, region
| StarcoderdataPython |
4874269 | #
# Potassium current (IK) toy model based on the model by Hodgkin & Huxley (HH).
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import numpy as np
import pints
from . import ToyModel
class HodgkinHuxleyIKModel(pints.ForwardModel, ToyModel):
"""
Toy model based on the potassium current experiments used for Hodgkin and
Huxley's 1952 model of the action potential of a squid's giant axon.
A voltage-step protocol is created and applied to an axon, and the elicited
potassium current is given as model output.
The protocol is applied in the interval ``t = [0, 1200]``, so sampling
outside this interval will not provide much new information.
Example usage::
model = HodgkinHuxleyIKModel()
p0 = model.suggested_parameters()
times = model.suggested_times()
values = model.simulate(p0, times)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(times, values)
Alternatively, the data can be displayed using the :meth:`fold()` method::
plt.figure()
for t, v in model.fold(times, values):
plt.plot(t, v)
plt.show()
*Extends:* :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`.
References:
[1] A quantitative description of membrane currents and its application to
conduction and excitation in nerve
Hodgkin, Huxley (1952d) Journal of Physiology
"""
def __init__(self, initial_condition=0.3):
super(HodgkinHuxleyIKModel, self).__init__()
# Initial conditions
self._n0 = float(initial_condition)
if self._n0 <= 0 or self._n0 >= 1:
raise ValueError('Initial condition must be > 0 and < 1.')
# Reversal potential, in mV
self._E_k = -88
# Maximum conductance, in mS/cm^2
self._g_max = 36
# Voltage step protocol
self._prepare_protocol()
def fold(self, times, values):
"""
Takes a set of times and values as return by this model, and "folds"
the individual currents over each other, to create a very common plot
in electrophysiology.
Returns a list of tuples ``(times, values)`` for each different voltage
step.
"""
# Get modulus of times
times = np.mod(times, self._t_both)
# Remove all points during t_hold
selection = times >= self._t_hold
times = times[selection]
values = values[selection]
# Use the start of the step as t=0
times -= self._t_hold
# Find points to split arrays
split = 1 + np.argwhere(times[1:] < times[:-1])
split = split.reshape((len(split),))
# Split arrays
traces = []
i = 0
for j in split:
traces.append((times[i:j], values[i:j]))
i = j
traces.append((times[i:], values[i:]))
return traces
def n_parameters(self):
""" See :meth:`pints.ForwardModel.n_parameters()`. """
return 5
def _prepare_protocol(self):
"""
Sets up a voltage step protocol for use with this model.
The protocol consists of multiple steps, each starting with 90ms at a
fixed holding potential, followed by 10ms at a varying step potential.
"""
self._t_hold = 90 # 90ms at v_hold
self._t_step = 10 # 10ms at v_step
self._t_both = self._t_hold + self._t_step
self._v_hold = -(0 + 75)
self._v_step = np.array([
-(-6 + 75),
-(-11 + 75),
-(-19 + 75),
-(-26 + 75),
-(-32 + 75),
-(-38 + 75),
-(-51 + 75),
-(-63 + 75),
-(-76 + 75),
-(-88 + 75),
-(-100 + 75),
-(-109 + 75),
])
self._n_steps = len(self._v_step)
# Protocol duration
self._duration = len(self._v_step) * (self._t_hold + self._t_step)
# Create list of times when V changes (not including t=0)
self._events = np.concatenate((
self._t_both * (1 + np.arange(self._n_steps)),
self._t_both * np.arange(self._n_steps) + self._t_hold))
self._events.sort()
# List of voltages (not including V(t=0))
self._voltages = np.repeat(self._v_step, 2)
self._voltages[1::2] = self._v_hold
def simulate(self, parameters, times):
""" See :meth:`pints.ForwardModel.simulate()`. """
if times[0] < 0:
raise ValueError('All times must be positive.')
times = np.asarray(times)
# Unpack parameters
p1, p2, p3, p4, p5 = parameters
# Analytically calculate n, during a fixed-voltage step
def calculate_n(v, n0, t0, times):
a = p1 * (-(v + 75) + p2) / (np.exp((-(v + 75) + p2) / p3) - 1)
b = p4 * np.exp((-v - 75) / p5)
tau = 1 / (a + b)
inf = a * tau
return inf - (inf - n0) * np.exp(-(times - t0) / tau)
# Output arrays
ns = np.zeros(times.shape)
vs = np.zeros(times.shape)
# Iterate over the step, fill in the output arrays
v = self._v_hold
t_last = 0
n_last = self._n0
for i, t_next in enumerate(self._events):
index = (t_last <= times) * (times < t_next)
vs[index] = v
ns[index] = calculate_n(v, n_last, t_last, times[index])
n_last = calculate_n(v, n_last, t_last, t_next)
t_last = t_next
v = self._voltages[i]
index = times >= t_next
vs[index] = v
ns[index] = calculate_n(v, n_last, t_last, times[index])
n_last = calculate_n(v, n_last, t_last, t_next)
# Calculate and return current
return self._g_max * ns**4 * (vs - self._E_k)
def suggested_duration(self):
"""
Returns the duration of the experimental protocol modeled in this toy
model.
"""
return self._duration
def suggested_parameters(self):
"""
See :meth:`pints.toy.ToyModel.suggested_parameters()`.
Returns an array with the original model parameters used by Hodgkin
and Huxley.
"""
p1 = 0.01
p2 = 10
p3 = 10
p4 = 0.125
p5 = 80
return p1, p2, p3, p4, p5
def suggested_times(self):
""" See :meth:`pints.toy.ToyModel.suggested_times()`. """
fs = 4
return np.arange(self._duration * fs) / fs
| StarcoderdataPython |
3425261 | <filename>tensorflow/contrib/distribute/python/prefetching_ops_v2.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extension of prefetching_ops to support more than one device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from tensorflow.contrib.data.python.ops import contrib_op_loader # pylint: disable=unused-import
from tensorflow.contrib.data.python.ops import gen_dataset_ops
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.data.util import sparse
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.util import nest
# pylint: disable=protected-access
class _PrefetchToDeviceIterator(object):
"""A replacement for @{tf.data.Iterator} that prefetches to another device."""
def __init__(self, input_dataset, devices, buffer_size):
self._input_dataset = input_dataset
self._get_next_call_count = 0
self._devices = devices
input_iterator = input_dataset.make_one_shot_iterator()
input_iterator_handle = input_iterator.string_handle()
@function.Defun(dtypes.string)
def _prefetch_fn(handle):
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, input_iterator.output_types, input_iterator.output_shapes,
input_iterator.output_classes)
return remote_iterator.get_next()
target_device = gen_dataset_ops.iterator_get_device(
input_iterator._iterator_resource)
self._buffering_resources = []
for device in nest.flatten(self._devices):
with ops.device(device):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=_prefetch_fn,
target_device=target_device,
string_arg=input_iterator_handle,
buffer_size=buffer_size)
self._buffering_resources.append(buffer_resource_handle)
def get_next(self, name=None):
"""See @{tf.data.Iterator.get_next}."""
self._get_next_call_count += 1
if self._get_next_call_count > iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE)
flat_result = []
# TODO(priyag): This will fail if the input size (typically number of
# batches) is not divisible by number of devices.
# How do we handle that more gracefully / let the user know?
for buffer_resource in self._buffering_resources:
flat_ret = gen_dataset_ops.function_buffering_resource_get_next(
buffer_resource,
output_types=data_nest.flatten(sparse.as_dense_types(
self.output_types, self.output_classes)), name=name)
ret = sparse.deserialize_sparse_tensors(
data_nest.pack_sequence_as(self.output_types, flat_ret),
self.output_types, self.output_shapes, self.output_classes)
for tensor, shape in zip(
data_nest.flatten(ret), data_nest.flatten(self.output_shapes)):
if isinstance(tensor, ops.Tensor):
tensor.set_shape(shape)
flat_result.append(ret)
return nest.pack_sequence_as(self._devices, flat_result)
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
# pylint: enable=protected-access
class _PrefetchToDeviceDataset(dataset_ops.Dataset):
"""A `Dataset` whose iterator prefetches elements to other device(s)."""
def __init__(self, input_dataset, devices, buffer_size):
self._input_dataset = input_dataset
self._devices = devices
self._buffer_size = buffer_size if buffer_size is not None else 1
def make_one_shot_iterator(self):
return _PrefetchToDeviceIterator(self._input_dataset, self._devices,
self._buffer_size)
def make_initializable_iterator(self, shared_name=None):
raise NotImplementedError("`prefetch_to_devices()` is not currently "
"compatible with initializable iterators. Use "
"`make_one_shot_iterator()` instead.")
def _as_variant_tensor(self):
# TODO(mrry): Raise this error earlier (e.g. when one of the Dataset
# transformation methods is called.
# TODO(mrry): Investigate support for chaining further transformations after
# the prefetch, including GPU support.
raise NotImplementedError("`prefetch_to_devices()` must be the last "
"transformation in a dataset pipeline.")
# TODO(priyag): Fix the output types, shapes and classes to match the result
# of get_next (which has the additional nesting layer of devices now).
@property
def output_types(self):
return self._input_dataset.output_types
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_classes(self):
return self._input_dataset.output_classes
def prefetch_to_devices(devices, buffer_size=None):
"""A transformation that prefetches dataset values to the given `devices`.
NOTE: Although the transformation creates a @{tf.data.Dataset}, the
transformation must be the final `Dataset` in the input pipeline.
Args:
devices: A nested structure of devices on which to prefetch the data. It can
be a single device name, or a tuple or list of device names.
buffer_size: (Optional.) The number of elements to buffer on each device.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
return _PrefetchToDeviceDataset(dataset, devices, buffer_size)
return _apply_fn
| StarcoderdataPython |
11268610 | import torch
import vision_transformer
from TorchSUL import Model as M
import torch.nn as nn
import config
from vision_transformer import Block
class DepthToSpace(M.Model):
def initialize(self, block_size):
self.block_size = block_size
def forward(self, x):
bsize, chn, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
assert chn%(self.block_size**2)==0, 'DepthToSpace: Channel must be divided by square(block_size)'
x = x.view(bsize, -1, self.block_size, self.block_size, h, w)
x = x.permute(0,1,4,2,5,3)
x = x.reshape(bsize, -1, h*self.block_size, w*self.block_size)
return x
class UpSample(M.Model):
def initialize(self, upsample_layers, upsample_chn):
self.prevlayers = nn.ModuleList()
#self.uplayer = M.DeConvLayer(3, upsample_chn, stride=2, activation=M.PARAM_PRELU, batch_norm=True, usebias=False)
self.uplayer = M.ConvLayer(3, upsample_chn*4, activation=M.PARAM_PRELU, usebias=True)
self.d2s = DepthToSpace(2)
self.postlayers = nn.ModuleList()
for i in range(upsample_layers):
self.prevlayers.append(M.ConvLayer(3, upsample_chn, activation=M.PARAM_PRELU, usebias=False, batch_norm=True))
for i in range(upsample_layers):
self.postlayers.append(M.ConvLayer(3, upsample_chn, activation=M.PARAM_PRELU, usebias=False, batch_norm=True))
def forward(self, x):
for p in self.prevlayers:
x = p(x)
x = self.uplayer(x)
x = self.d2s(x)
# print('UPUP', x.shape)
for p in self.postlayers:
x = p(x)
return x
class JointHead(M.Model):
def initialize(self):
self.fc = nn.Linear(384, 192)
self.block = Block(dim=192, num_heads=3, mlp_ratio=1, qkv_bias=True, qk_scale=None,
drop=0.0, attn_drop=0.0, drop_path=0.0, norm_layer=nn.LayerNorm)
self.upsample = UpSample(1, 32)
self.conv = M.ConvLayer(1, 1)
def forward(self, x):
x = self.fc(x)
x = self.block(x)
x = x.view(-1, 28, 28, 192) # hard code here
x = x.permute(0, 3,1,2).contiguous()
x = self.upsample(x)
x = self.conv(x)
return x
class DinoNet(M.Model):
def initialize(self):
self.backbone = vision_transformer.deit_small(patch_size=8)
self.joints_head = nn.ModuleList()
for i in range(config.num_pts):
self.joints_head.append(JointHead())
def forward(self, x):
x = self.backbone.forward_2(x)
res = []
for h in self.joints_head:
res.append(h(x))
res = torch.cat(res, dim=1)
return res
def get_net():
net = DinoNet()
x = torch.zeros(1, 3, 224, 224)
y = net(x)
print(y.shape)
checkpoint = torch.load('dino_deitsmall8_pretrain.pth', map_location='cpu')
net.backbone.load_state_dict(checkpoint, strict=True)
print('Network initialized')
return net
if __name__=='__main__':
# net = vision_transformer.deit_small(patch_size=8)
# checkpoint = torch.load('dino_deitsmall8_pretrain.pth', map_location='cpu')
# net.load_state_dict(checkpoint, strict=True)
# x = torch.zeros(1, 3, 224, 224)
# y = net.forward_2(x)
# print(y.shape)
x = torch.zeros(1, 3, 224, 224)
net = get_net()
y = net(x)
print(y.shape)
| StarcoderdataPython |
9732587 | <filename>tests/misc/cursor_util_test.py<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# This file is part of AceQL Python Client SDK.
# AceQL Python Client SDK: Remote SQL access over HTTP with AceQL HTTP.
# Copyright (C) 2021, KawanSoft SAS
# (http://www.kawansoft.com). All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import unittest
from datetime import date, datetime
from aceql._private.cursor_util import CursorUtil
from aceql.sql_null_type import SqlNullType
class CursorUtilTest(unittest.TestCase):
def test_A(self):
the_datetime = datetime.now()
the_date = date(2017, 10, 31)
the_time = the_datetime.time()
# for NULL values
tup_null_integer = None, SqlNullType.INTEGER
the_list = [tup_null_integer, 1, 12.53, True, "text", the_datetime, the_date, the_time]
cpt = 0
for x in the_list:
print()
print(str(x) + " / type: " + str(type(x)))
sql_type = CursorUtil.get_sql_type(x)
print("sql_type : " + sql_type)
if cpt == 0:
self.assertEqual(sql_type, "TYPE_NULL4")
if cpt == 1:
self.assertEqual(sql_type, "INTEGER")
if cpt == 2:
self.assertEqual(sql_type, "REAL")
if cpt == 3:
self.assertEqual(sql_type, "BIT")
if cpt == 4:
self.assertEqual(sql_type, "VARCHAR")
if cpt == 5:
self.assertEqual(sql_type, "TIMESTAMP")
if cpt == 6:
self.assertEqual(sql_type, "DATE")
if cpt == 7:
self.assertEqual(sql_type, "TIME")
cpt += 1
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1985845 | class Solution:
def countAndSay(self, n: int) -> str:
if n == 1:
return "1"
final_string = "1"
for _ in range(1, n):
previous = final_string[0]
count = 0
temp_string = ""
for each_string in final_string:
if each_string is not previous:
temp_string += str(count) + previous
previous = each_string
count = 1
else:
count += 1
temp_string += str(count) + previous
final_string = temp_string
return final_string
| StarcoderdataPython |
6648171 | <gh_stars>0
from .account import (
Account,
)
from .chain import (
Chain,
)
from .meta import (
Meta,
)
from .module import (
Module,
)
from .state import (
State,
)
from .system import (
System,
)
__all__ = [
"Account",
"Chain",
"Meta",
"Module",
"State",
"System",
]
| StarcoderdataPython |
6676535 | <filename>pytorch_toolbelt/utils/torch_utils.py
"""Common functions to marshal data to/from PyTorch
"""
import collections
from typing import Optional, Sequence, Union, Dict
import numpy as np
import torch
from torch import nn
__all__ = [
"rgb_image_from_tensor",
"tensor_from_mask_image",
"tensor_from_rgb_image",
"count_parameters",
"transfer_weights",
"maybe_cuda",
"mask_from_tensor",
"logit",
"to_numpy",
"to_tensor",
]
def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:
"""
Compute inverse of sigmoid of the input.
Note: This function has not been tested for numerical stability.
:param x:
:param eps:
:return:
"""
x = torch.clamp(x, eps, 1.0 - eps)
return torch.log(x / (1.0 - x))
def count_parameters(model: nn.Module, keys: Optional[Sequence[str]] = None) -> Dict[str, int]:
"""
Count number of total and trainable parameters of a model
:param model: A model
:param keys: Optional list of top-level blocks
:return: Tuple (total, trainable)
"""
if keys is None:
keys = ["encoder", "decoder", "logits", "head", "final"]
total = int(sum(p.numel() for p in model.parameters()))
trainable = int(sum(p.numel() for p in model.parameters() if p.requires_grad))
parameters = {"total": total, "trainable": trainable}
for key in keys:
if hasattr(model, key) and model.__getattr__(key) is not None:
parameters[key] = int(sum(p.numel() for p in model.__getattr__(key).parameters()))
return parameters
def to_numpy(x) -> np.ndarray:
"""
Convert whatever to numpy array
:param x: List, tuple, PyTorch tensor or numpy array
:return: Numpy array
"""
if isinstance(x, np.ndarray):
return x
elif isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
elif isinstance(x, (list, tuple, int, float)):
return np.array(x)
else:
raise ValueError("Unsupported type")
def to_tensor(x, dtype=None) -> torch.Tensor:
if isinstance(x, torch.Tensor):
if dtype is not None:
x = x.type(dtype)
return x
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
if dtype is not None:
x = x.type(dtype)
return x
if isinstance(x, (list, tuple)):
x = np.ndarray(x)
x = torch.from_numpy(x)
if dtype is not None:
x = x.type(dtype)
return x
raise ValueError("Unsupported input type" + str(type(x)))
def tensor_from_rgb_image(image: np.ndarray) -> torch.Tensor:
image = np.moveaxis(image, -1, 0)
image = np.ascontiguousarray(image)
image = torch.from_numpy(image)
return image
def tensor_from_mask_image(mask: np.ndarray) -> torch.Tensor:
if len(mask.shape) == 2:
mask = np.expand_dims(mask, -1)
return tensor_from_rgb_image(mask)
def rgb_image_from_tensor(image: torch.Tensor, mean, std, max_pixel_value=255.0, dtype=np.uint8) -> np.ndarray:
image = np.moveaxis(to_numpy(image), 0, -1)
mean = to_numpy(mean)
std = to_numpy(std)
rgb_image = (max_pixel_value * (image * std + mean)).astype(dtype)
return rgb_image
def mask_from_tensor(mask: torch.Tensor, squeeze_single_channel=False, dtype=None) -> np.ndarray:
mask = np.moveaxis(to_numpy(mask), 0, -1)
if squeeze_single_channel and mask.shape[-1] == 1:
mask = np.squeeze(mask, -1)
if dtype is not None:
mask = mask.astype(dtype)
return mask
def maybe_cuda(x: Union[torch.Tensor, nn.Module]) -> Union[torch.Tensor, nn.Module]:
"""
Move input Tensor or Module to CUDA device if CUDA is available.
:param x:
:return:
"""
if torch.cuda.is_available():
return x.cuda()
return x
def transfer_weights(model: nn.Module, model_state_dict: collections.OrderedDict):
"""
Copy weights from state dict to model, skipping layers that are incompatible.
This method is helpful if you are doing some model surgery and want to load
part of the model weights into different model.
:param model: Model to load weights into
:param model_state_dict: Model state dict to load weights from
:return: None
"""
for name, value in model_state_dict.items():
try:
model.load_state_dict(collections.OrderedDict([(name, value)]), strict=False)
except Exception as e:
print(e)
| StarcoderdataPython |
6406248 | def index_to_clearcontrol_filename(index : int ):
return ("000000" + str(index))[-6:] + ".raw"
| StarcoderdataPython |
9772513 | from abc import ABC, abstractmethod
from typing import Any
from tools37.tkfw._commented.events import Transmitter
__all__ = [
'Dynamic',
'DynamicContainer',
'DynamicBinder',
]
class Dynamic(Transmitter, ABC):
"""
HasView objects are dynamic objects.
They must implement a view method which return the current state of the data.
"""
@abstractmethod
def view(self) -> Any: # non HasView
"""Return a static view of the object. The must never contains any reference to HasView instances."""
class DynamicContainer(Dynamic, ABC):
@abstractmethod
def _setup_events(self) -> None:
"""Build all the required events transmissions."""
@abstractmethod
def _clear_events(self) -> None:
"""Remove all the events transmissions."""
@abstractmethod
def update_with(self, data: Any) -> None:
""""""
class DynamicBinder(Dynamic, ABC):
@abstractmethod
def get(self) -> Any:
""""""
@abstractmethod
def set(self, value: Any) -> None:
""""""
| StarcoderdataPython |
1683054 | # -*- coding: utf-8 -*-
# Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
A tool to generate segmenter-code from human-readable rule file
"""
from __future__ import absolute_import
from __future__ import print_function
import codecs
import re
import sys
import six
HEADER = """
namespace {
const int kLSize = %d;
const int kRSize = %d;
bool IsBoundaryInternal(uint16 rid, uint16 lid) {
// BOS * or * EOS true
if (rid == 0 || lid == 0) { return true; }"""
FOOTER = """ return true; // default
}
} // namespace
"""
def ReadPOSID(id_file, special_pos_file):
pos = {}
max_id = 0
for line in codecs.open(id_file, "r", encoding="utf8"):
fields = line.split()
pos[fields[1]] = fields[0]
max_id = max(int(fields[0]), max_id)
max_id = max_id + 1
for line in codecs.open(special_pos_file, "r", encoding="utf8"):
if len(line) <= 1 or line[0] == "#":
continue
fields = line.split()
pos[fields[0]] = ("%d" % max_id)
max_id = max_id + 1
return pos
def PatternToRegexp(pattern):
return pattern.replace("*", "[^,]+")
def GetRange(pos, pattern, name):
if pattern == "*":
return ""
pat = re.compile(PatternToRegexp(pattern))
min = -1;
max = -1;
keys = list(pos.keys())
keys.sort()
range = []
for p in keys:
id = pos[p]
if pat.match(p):
if min == -1:
min = id
max = id
else:
max = id
else:
if min != -1:
range.append([min, max])
min = -1
if min != -1:
range.append([min, max])
tmp = []
for r in range:
if r[0] == r[1]:
tmp.append("(%s == %s)" % (name, r[0]))
else:
tmp.append("(%s >= %s && %s <= %s)" % (name, r[0], name, r[1]))
if len(tmp) == 0:
print("FATAL: No rule fiind %s" % (pattern))
sys.exit(-1)
return " || ".join(tmp)
def main():
pos = ReadPOSID(sys.argv[1], sys.argv[2])
out = codecs.getwriter("utf8")(sys.stdout if six.PY2 else sys.stdout.buffer)
print(HEADER % (len(list(pos.keys())), len(list(pos.keys()))), file=out)
for line in codecs.open(sys.argv[3], "r", encoding="utf8"):
if len(line) <= 1 or line[0] == "#":
continue
(l, r, result) = line.split()
result = result.lower()
lcond = GetRange(pos, l, "rid") or "true"
rcond = GetRange(pos, r, "lid") or "true"
print(" // %s %s %s" % (l, r, result), file=out)
print(
" if ((%s) && (%s)) { return %s; }" % (lcond, rcond, result), file=out)
print(FOOTER, file=out)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1603728 | <filename>Combined Neural Network/FAC/importData_image_quality.py
from __future__ import absolute_import
from __future__ import print_function
import pickle
import numpy as np
from keras.preprocessing.image import load_img, img_to_array
from scipy.misc import imresize
class importData(object):
def __init__(self, category=0, input_shape=(3, 224, 224), dir="./IMAGE_QUALITY_DATA"):
self.category = category
self.input_shape = input_shape
self.dir = dir
def biLabels(self, labels):
"""
This function will binarized labels.
There are C classes {1,2,3,4,...,c} in the labels, the output would be c dimensional vector.
Input:
- labels: (N,) np array. The element value indicates the class index.
Output:
- biLabels: (N, C) array. Each row has and only has a 1, and the other elements are all zeros.
- C: integer. The number of classes in the data.
Example:
The input labels = np.array([1,2,2,1,3])
The binaried labels are np.array([[1,0,0],[0,1,0],[0,1,0],[1,0,0],[0,0,1]])
"""
N = labels.shape[0]
labels.astype(np.int)
C = len(np.unique(labels))
binarized = np.zeros((N, C))
binarized[np.arange(N).astype(np.int), labels.astype(np.int).reshape((N,))] = 1
return binarized, C
def load_data(self, set):
'''
set: train/ val/ test
all_abs_labels: (image_name, row['class'])
all_comp_labels: (image1_name, image2_name, +1) is 1 > 2
'''
np.random.seed(1)
# load training data matrices
all_abs_labels = np.load(self.dir + '/' + set + '_abs_cat_' + str(self.category) + '.npy')
all_comp_labels = np.load(self.dir + '/' + set + '_comp_cat_' + str(self.category) + '.npy')
###################
# downsample training data
# if set == 'train' and num_unique_images < all_abs_labels.shape[0]:
# all_abs_labels, all_comp_labels = self.sample_train_data(num_unique_images, all_abs_labels, all_comp_labels)
###################
# absolute images
# load first image
image_mtx = img_to_array(load_img(self.dir + all_abs_labels[0, 0])).astype(np.uint8)
image_mtx = np.reshape(imresize(image_mtx, self.input_shape[1:]), self.input_shape)
abs_imgs = image_mtx[np.newaxis, :, :, :]
# load images
for row in np.arange(1, all_abs_labels.shape[0]):
image_mtx = img_to_array(load_img(self.dir + all_abs_labels[row, 0])).astype(np.uint8)
image_mtx = np.reshape(imresize(image_mtx, self.input_shape[1:]), self.input_shape)[np.newaxis, :, :, :]
abs_imgs = np.concatenate((abs_imgs, image_mtx), axis=0)
# get corresponding labels
if set == 'train': # categorical due to softmax
abs_labels, _ = self.biLabels(all_abs_labels[:, 1].astype(int))
else: # binary
abs_labels = all_abs_labels[:, 1].astype(int)
#####################
# comparison images left
# load first image
image_mtx = img_to_array(load_img(self.dir + all_comp_labels[0, 0])).astype(np.uint8)
image_mtx = np.reshape(imresize(image_mtx, self.input_shape[1:]), self.input_shape)
comp_imgs_1 = image_mtx[np.newaxis, :, :, :]
# load images
for row in np.arange(1, all_comp_labels.shape[0]):
image_mtx = img_to_array(load_img(self.dir + all_comp_labels[row, 0])).astype(np.uint8)
image_mtx = np.reshape(imresize(image_mtx, self.input_shape[1:]), self.input_shape)[np.newaxis, :, :, :]
comp_imgs_1 = np.concatenate((comp_imgs_1, image_mtx), axis=0)
# comparison images right
# load first image
image_mtx = img_to_array(load_img(self.dir + all_comp_labels[0, 1])).astype(np.uint8)
image_mtx = np.reshape(imresize(image_mtx, self.input_shape[1:]), self.input_shape)
comp_imgs_2 = image_mtx[np.newaxis, :, :, :]
# load images
for row in np.arange(1, all_comp_labels.shape[0]):
image_mtx = img_to_array(load_img(self.dir + all_comp_labels[row, 1])).astype(np.uint8)
image_mtx = np.reshape(imresize(image_mtx, self.input_shape[1:]), self.input_shape)[np.newaxis, :, :, :]
comp_imgs_2 = np.concatenate((comp_imgs_2, image_mtx), axis=0)
# get corresponding labels
comp_labels = all_comp_labels[:, 2].astype(int)
return abs_imgs, abs_labels, comp_imgs_1, comp_imgs_2, comp_labels
def sample_train_data(self, num_unique_images, all_abs_labels, all_comp_labels):
np.random.seed(1)
# choose images
abs_idx = np.random.permutation(np.arange(all_abs_labels.shape[0]))[:num_unique_images]
# choose absolute labels
new_abs_labels = all_abs_labels[abs_idx, :]
new_imgs = new_abs_labels[:, 0]
# choose comparison labels
comp_idx = []
for row_idx in range(all_comp_labels.shape[0]):
# choose the comparison if the first or second image is in the absolute label set
if all_comp_labels[row_idx, 0] in new_imgs or all_comp_labels[row_idx, 1] in new_imgs:
comp_idx.append(row_idx)
new_comp_labels = all_comp_labels[comp_idx, :]
return new_abs_labels, new_comp_labels
def create_partitions(self, valFold = 3, testFold = 4):
abs_label_file = "/image_score.pkl"
comp_label_file = "/pairwise_comparison.pkl"
with open(self.dir + comp_label_file, 'rb') as f:
comp_label_matrix = pickle.load(f)
# data = pickle.load(open(partition_file_6000, 'rb'))
with open(self.dir + abs_label_file, 'rb') as f:
abs_label_matrix = pickle.load(f)
#####################
image_name_list = []
np.random.seed(1)
# get all unique images in category
for row in comp_label_matrix:
# category, f1, f2, workerID, passDup, imgId, ans
if row['category'] == self.category:
image1_name = '/' + row['f1'] + '/' + row['imgId'] + '.jpg'
if image1_name not in image_name_list:
image_name_list.append(image1_name)
image2_name = '/' + row['f2'] + '/' + row['imgId'] + '.jpg'
if image2_name not in image_name_list:
image_name_list.append(image2_name)
# divide images into folds
image_name_list = np.random.permutation(image_name_list)
no_im_per_fold = int(len(image_name_list) / 5)
image_name_list_by_fold = []
for fold in range(5):
image_name_list_by_fold.append(image_name_list[fold * no_im_per_fold:(fold + 1) * no_im_per_fold])
#####################
# get all comparison labels by fold
train_comp_labels = []
val_comp_labels = []
test_comp_labels = []
train_abs_labels = []
val_abs_labels = []
test_abs_labels = []
for row in comp_label_matrix:
if row['category'] == self.category:
# category, f1, f2, workerID, passDup, imgId, ans
image1_name = '/' + row['f1'] + '/' + row['imgId'] + '.jpg'
image2_name = '/' + row['f2'] + '/' + row['imgId'] + '.jpg'
# test
if image1_name in image_name_list_by_fold[testFold] and image2_name in image_name_list_by_fold[
testFold]:
# save comparison label
if row['ans'] == 'left':
test_comp_labels.append((image1_name, image2_name, +1))
elif row['ans'] == 'right':
test_comp_labels.append((image1_name, image2_name, -1))
# validation
elif image1_name in image_name_list_by_fold[valFold] and image2_name in image_name_list_by_fold[
valFold]:
# save comparison label
if row['ans'] == 'left':
val_comp_labels.append((image1_name, image2_name, +1))
elif row['ans'] == 'right':
val_comp_labels.append((image1_name, image2_name, -1))
# train
elif image1_name not in image_name_list_by_fold[valFold] and \
image2_name not in image_name_list_by_fold[valFold] and \
image1_name not in image_name_list_by_fold[testFold] and \
image2_name not in image_name_list_by_fold[testFold]:
if row['ans'] == 'left':
train_comp_labels.append((image1_name, image2_name, +1))
elif row['ans'] == 'right':
train_comp_labels.append((image1_name, image2_name, -1))
# get all absolute labels by fold
for row in abs_label_matrix:
# filterName, imgId, class, score
image_name = '/' + row['filterName'] + '/' + row['imgId'] + '.jpg'
# test
if image_name in image_name_list_by_fold[testFold]:
test_abs_labels.append((image_name, row['class']))
elif image_name in image_name_list_by_fold[valFold]:
val_abs_labels.append((image_name, row['class']))
# train
elif image_name in image_name_list: # check category
train_abs_labels.append((image_name, row['class']))
train_abs_labels = np.array(train_abs_labels)
val_abs_labels = np.array(val_abs_labels)
test_abs_labels = np.array(test_abs_labels)
train_comp_labels = np.array(train_comp_labels)
val_comp_labels = np.array(val_comp_labels)
test_comp_labels = np.array(test_comp_labels)
np.save('train_abs_cat_' + str(self.category), train_abs_labels)
np.save('val_abs_cat_' + str(self.category), val_abs_labels)
np.save('test_abs_cat_' + str(self.category), test_abs_labels)
np.save('train_comp_cat_' + str(self.category), train_comp_labels)
np.save('val_comp_cat_' + str(self.category), val_comp_labels)
np.save('test_comp_cat_' + str(self.category), test_comp_labels) | StarcoderdataPython |
6568900 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Script to test and demonstrate methods for importing data from NODLE excel
template
@author: RIHY
"""
import nodle
from common import read_block
import matplotlib.pyplot as plt
plt.close('all')
#%%
fname = 'NODLE_demo.xlsx'
COO_df = nodle.read_COO(fname)
print(COO_df)
MEM_df = nodle.read_MEM(fname)
print(MEM_df)
mesh_obj = nodle.read_mesh(fname)
mesh_obj.define_gauss_points(N_gp=2)
mesh_obj.plot(plot_gps=True)
#%%
#
#fname = 'bigger_model.xlsx'
#mesh_obj = nodle.read_mesh(fname)
#mesh_obj.define_gauss_points(N_gp=1)
#mesh_obj.plot(plot_gps=True)
#%%
results_fname = 'NODLE_demo.res'
nodle.read_DIS(results_fname,mesh_obj=mesh_obj)
#%%
# Spot-check results have been assigned to nodes properly
nodeID = 103
node_obj = mesh_obj.node_objs[nodeID]
print(node_obj.lcase_disp)
print(node_obj.modal_disp)
#%%
#%%
#mesh_obj.plot_results(results_type='disp',lcase=1,lcase_type=2)
| StarcoderdataPython |
3531679 | # External module dependencies
from typing import Callable, Optional, List, Dict, Set
from threading import Thread
from queue import Queue, Empty
from time import sleep
# Internal module dependencies
from .util import SingleWriteMultipleReadLock
from . import log
###############################################################################
# Exceptions
###############################################################################
class TaskError(Exception):
def __init__(self, description : str, message : str):
super().__init__()
self.description = description
self.message = message
###############################################################################
# Classes
###############################################################################
Work = Callable[[], Optional[str]]
class Task:
def __init__(self,
flows : Dict[str, int],
work : Work,
force : bool = False
):
self._valid : bool = False
self._active : bool = True
self._force : bool = force
self._deps : Set['Task'] = set()
self._refs : Set['Task'] = set()
self._flows : Dict[str, int] = flows
self._work : Work = work
def __hash__(self):
return hash(id(self))
def get_deps(self) -> Set['Task']:
return self._deps
def get_refs(self) -> Set['Task']:
return self._refs
def get_flows(self):
return self._flows
def get_valid(self):
if self._force: return False
return self._valid
def set_valid(self, valid : bool):
self._valid = valid
def get_active(self):
if self._force: return True
return self._active
def set_active(self, active : bool):
self._active = active
def add_dependency(self, other : 'Task'):
for flow, stage in self._flows.items():
if flow not in other._flows: continue
if stage >= other._flows[flow]: continue
raise RuntimeError(
'Tasks can not depend on tasks of '
'later stages in the same workflow!'
)
self._deps.add(other)
other._refs.add(self)
def add_dependencies(self, others : List['Task']):
for other in others: self.add_dependency(other)
def remove_dependency(self, other : 'Task'):
if other not in self._deps: return
self._deps.remove(other)
other._refs.remove(self)
def remove_dependencies(self, others : List['Task']):
for other in others:
if other not in self._deps: continue
self._deps.remove(other)
other._refs.remove(self)
def clear_dependencies(self):
for other in self._deps:
other._refs.remove(self)
self._deps = set()
def perform(self) -> Optional[str]:
return self._work()
class Worker(Thread):
def __init__(self,
dequeue : Callable[[Callable[[List[Task]], None]], None],
index : int
):
super().__init__()
self._dequeue = dequeue
self._index = index
self._running = False
def _work(self, sequence : List[Task]):
for task in sequence:
output = task.perform()
if output: log.debug('Worker %d: %s' % (self._index, output))
def run(self):
self._running = True
while self._running:
self._dequeue(self._work)
def stop(self):
self._running = False
Batch = List[Task]
Program = List[List[Batch]]
class Evaluator:
def __init__(self, worker_count : int):
self._running : bool = False
self._paused : bool = False
self._program : Program = []
self._queue : Queue[List[Task]] = Queue()
self._exception : Queue[TaskError] = Queue()
self._pause = SingleWriteMultipleReadLock()
self._workers = [
Worker(self._dequeue, index + 1)
for index in range(worker_count)
]
def _dequeue(self, perform : Callable[[List[Task]], None]):
self._pause.acquire_read()
try:
work = self._queue.get(timeout=1)
perform(work)
self._queue.task_done()
except Empty: sleep(1)
except TaskError as e:
self._exception.put(e)
self._queue.task_done()
finally:
self._pause.release_read()
def _check(self) -> bool:
try:
error = self._exception.get(block = False)
self._exception.task_done()
self.on_task_error(error)
return False
except Empty: return True
def on_task_error(self, error : TaskError) -> None:
raise error
def start(self):
log.debug('Evaluator.start()')
if self._running:
raise RuntimeError('Evaluator can not start if already running!')
self._running = True
# Main loop
for worker in self._workers: worker.start()
try:
while self._running:
if not self._check() and len(self._program) != 0:
self.pause()
self.deprogram()
self.resume()
if len(self._program) == 0: sleep(1); continue
batch = self._program.pop(0)
for sequence in batch: self._queue.put(sequence)
self._queue.join()
finally:
self._running = False
for worker in self._workers: worker.stop()
for worker in self._workers: worker.join()
def pause(self):
log.debug('Evaluator.pause()')
if self._paused:
raise RuntimeError('Evaluator can not pause if already paused!')
self._pause.acquire_read()
self._paused = True
def reprogram(self, program : Program):
log.debug('Evaluator.reprogram()')
if self._running and not self._paused:
raise RuntimeError('Evaluator must be paused prior to reprogramming!')
# Clear existing program
while not self._queue.empty():
try: self._queue.get()
except Empty: break
self._queue.task_done()
# Set the new schedule
self._program = program
def deprogram(self):
log.debug('Evaluator.deprogram()')
if self._running and not self._paused:
raise RuntimeError('Evaluator must be paused prior to deprogramming!')
# Clear existing program
while not self._queue.empty():
try: self._queue.get()
except Empty: break
self._queue.task_done()
# Clear schedule
self._program = list()
def resume(self):
log.debug('Evaluator.resume()')
if not self._paused:
raise RuntimeError('Evaluator can not resume if not paused!')
self._paused = False
self._pause.release_read()
def stop(self):
log.debug('Evaluator.stop()')
if not self._running:
raise RuntimeError('Evaluator can not stop if already not running!')
self._running = False
| StarcoderdataPython |
11305966 | <gh_stars>0
"""
This module contains our Django helper functions for the "tutor" application.
"""
import json
import re
import time
import urllib
import websockets
from django.contrib.auth.models import User
import core.models
from accounts.models import UserInformation
from core.models import Lesson
from data_analysis.models import DataLog
from tutor.py_helper_functions.mutation import reverse_mutate
def user_auth(request):
"""function user_auth This function handles the auth logic for a user in both django users and UserInformation
Args:
request (HTTPRequest): A http request object created automatically by Django.
Returns:
Boolean: A boolean to signal if the user has been found in our database
"""
if request.user.is_authenticated:
user = User.objects.get(email=request.user.email)
if UserInformation.objects.filter(user=user).exists():
return True
return False
def alternate_set_check(current_lesson, alternate_type):
"""function alternate_set_check This function handles the logic for a if a lesson has an alternate
Args:
current_lesson: a Lesson that is currently being completed
alternate_type: type of lesson to use for lookup, enum found in core.models.LessonAlternate. Supports None,
in which case it will simply return None for the alternate lesson.
Returns:
LessonAlternate model, or None if no redirect needed
"""
if alternate_type is None:
# Nothing triggered, so nothing to activate
return None
try:
# Obvious attempt
return core.models.LessonAlternate.objects.get(lesson=current_lesson, type=alternate_type)
except core.models.LessonAlternate.DoesNotExist:
if alternate_type != core.models.AlternateType.DEFAULT:
try:
# If what I was searching for type-wise doesn't exist as an alternate option, try the default
print("Lesson", current_lesson, "activated a type of", alternate_type, "but didn't supply a lesson to "
"redirect to!")
return core.models.LessonAlternate.objects.get(lesson=current_lesson,
type=core.models.AlternateType.DEFAULT)
except core.models.LessonAlternate.DoesNotExist:
pass
# If all else fails, don't redirect
return None
def check_type(current_lesson, submitted_code):
"""function check_type This function finds the type of alternate to look for.
Only to be called on incorrect answers.
Args:
current_lesson (Lesson): lesson that is currently being completed
submitted_code (String): all the code submitted to RESOLVE, mutated in the form presented to user
Returns:
type: type of lesson to use for lookup (integer enumeration). Default if no incorrect answers were triggered.
"""
for answer in get_confirm_lines(reverse_mutate(submitted_code)):
try:
return core.models.IncorrectAnswer.objects.get(answer_text=answer, lesson=current_lesson).type
except core.models.IncorrectAnswer.DoesNotExist:
continue
return core.models.AlternateType.DEFAULT
def browser_response(current_lesson, current_assignment, current_user, submitted_answer, status, lines, unlock_next,
alt_activated):
"""function browser_response This function finds the feedback to show to the user
Args:
current_lesson: a Lesson that is currently being completed
current_assignment: The assignment containing the lesson
current_user: The UserInfo that is attempting the lesson
submitted_answer: string of code that user submitted
status: string of result from compiler
lines: array of confirms and their statuses
unlock_next: boolean for unlocking next button
alt_activated: boolean changing feedback for whether a wrong answer has activated an alternate
Returns:
dict that should be send to front-end JS
"""
if not alt_activated:
if status == 'success':
headline = 'Correct'
text = current_lesson.correct_feedback
else:
try:
if current_lesson.is_parsons:
headline = "Try Again!"
if status == "error":
text = "The code fragments are producing a syntax error. Ensure that if/else statments and loops have and end statement to complete them and they have content."
else:
text = "Code fragments in your program are wrong, or in wrong order. Move, remove, or replace fragments to meet the all of the confirm statements."
else:
feedback = current_lesson.feedback.get(feedback_type=check_type(current_lesson, submitted_answer))
headline = feedback.headline
text = feedback.feedback_text
except core.models.Feedback.DoesNotExist:
headline = "Try Again!"
text = "Did you read the reference material?"
else:
headline = "ALT!"
text = "[explanation about alt, directions to hit next lesson]"
return {'resultsHeader': headline,
'resultDetails': text,
'status': status,
'lines': lines,
'unlock_next': unlock_next
}
def align_with_previous_lesson(user, code):
"""function align_with_previous_lesson This function changes the mutation to match the last lesson they did
Args:
user: user model using tutor
code: code that user submitted in last lesson
Returns:
code: code with variables matching letters of that from their last lesson
"""
last_attempt = DataLog.objects.filter(user_key=User.objects.get(email=user)).order_by('-id')[0].code
occurrence = 3
original = ["I", "J", "K"]
fruit = ["Apple", "Banana", "Orange"]
variables = []
index = 0
start = last_attempt.find("Var", index) + 3
end = last_attempt.find(":", start)
variables = last_attempt[start:end]
variables = variables.split()
for i in range(0, len(variables)):
variables[i] = variables[i][:1]
for i in range(0, len(variables)):
code = code.replace(original[i], fruit[i])
for i in range(0, len(variables)):
code = code.replace(fruit[i], variables[i])
change = variables[0] + "nteger"
code = code.replace(change, "Integer")
change = variables[0] + "f"
code = code.replace(change, "If")
return code
def replace_previous(user, code, is_alt):
"""function replace_previous This function changes the previous lesson code
Args:
user: user model using tutor
code: code that user submitted in last lesson
is_alt: boolean for if alternate lesson
Returns:
code: ? string of code
"""
if not DataLog.objects.filter(user_key=User.objects.get(email=user)).exists():
print("There is no datalog")
return code
if is_alt:
code = align_with_previous_lesson(user, code)
last_attempt = DataLog.objects.filter(user_key=User.objects.get(email=user)).order_by('-id')[0].code
# Checks if there is code to be replaced
present = code.find('/*previous')
if present != -1:
print("present")
occurrence = 20
indices1 = []
indices2 = []
index1 = 0
index2 = 0
# Has to identify the starting and ending index for each confirm statement. The format does differ
# between the old and new.
for i in range(0, occurrence, 2):
if last_attempt.find('Confirm', index1) != -1:
indices1.append(last_attempt.find('Confirm', index1))
index1 = indices1[i] + 1
indices1.append(last_attempt.find(';', index1) + 1)
index1 = indices1[i + 1] + 1
indices2.append(code.find('Confirm', index2))
index2 = indices2[i] + 1
indices2.append(code.find(';', index2) + 1)
index2 = indices2[i + 1] + 1
old_strings = []
new_strings = []
for i in range(0, len(indices1), 2):
old_strings.append(last_attempt[indices1[i]:indices1[i + 1]])
new_strings.append(code[indices2[i]:indices2[i + 1]])
for i in range(0, len(old_strings)):
code = code.replace(new_strings[i], old_strings[i])
return code
async def send_to_verifier(code):
"""
Sends a string to RESOLVE verifier and interprets its response.
@param code: A string that the user submitted through the browser
@return: Tuple defined as (status string, lines dict, vcs dict, time taken)
"""
async with websockets.connect(
'wss://resolve.cs.clemson.edu/teaching/Compiler?job=verify2&project=Teaching_Project', ping_interval=None) \
as ws:
start_time = time.time()
await ws.send(encode(code))
vcs = {} # vc ID to 'success' or 'failure'
vcs_info = {} # vc IDs to actual strings of results for data logging
while True:
response = json.loads(await ws.recv())
if response.get('status') == 'error':
# Need to do some crazy stuff because of the way RESOLVE's errors work
lines = []
for error_dict in response['errors']:
for error_dict_sub in error_dict['errors']:
error_dict_sub['error']['msg'] = decode(error_dict_sub['error']['msg'])
unique = True
line_num = error_dict_sub['error']['ln']
for line in lines:
if line['lineNum'] == line_num:
unique = False
break
if unique:
lines.append({'lineNum': line_num, 'status': 'failure'})
return 'error', lines, response['errors'], time.time() - start_time
if response.get('status') is None:
return 'failure', None, '', time.time() - start_time
if response['status'] == 'processing':
result = response['result']
vcs_info[result['id']] = result['result']
if re.search(r"^Proved", result['result']):
vcs[result['id']] = 'success'
else:
vcs[result['id']] = 'failure'
if response['status'] == 'complete':
response['result'] = decode_json(response['result'])
lines = overall_status(response, vcs)
join_vc_info(response['result']['vcs'], vcs_info)
return response['status'], lines, response['result']['vcs'], time.time() - start_time
def join_vc_info(vcs, vcs_info):
"""
Joins the vcs from RESOLVE's final response with the info from each of the processing responses for data logging
@param vcs: VCS from RESOLVE's final response
@param vcs_info: Dict of VC IDs to VC info generated from processing responses
@return: None
"""
for vc in vcs:
vc['result'] = vcs_info[vc.get('vc')]
def encode(data):
"""
Don't ask, just accept. This is how the Resolve Web API works at the
moment. If you want to fix this, PLEASE DO.
"""
return json.dumps({'name': 'BeginToReason', 'pkg': 'User', 'project': 'Teaching_Project',
'content': urllib.parse.quote(data), 'parent': 'undefined', 'type': 'f'})
def decode(data):
"""
Taken straight from editorUtils.js, or at least as straight as I could.
"""
data = urllib.parse.unquote(data)
data = re.sub(r"%20", " ", data)
data = re.sub(r"%2B", "+", data)
data = re.sub(r"<vcFile>(.*)</vcFile>", r"\1", data)
data = urllib.parse.unquote(data)
data = urllib.parse.unquote(data)
data = re.sub(r"\n", "", data)
return data
def decode_json(data):
return json.loads(decode(data))
def overall_status(data, vcs):
"""
Takes RESOLVE's response along with the processing VCs, updates the status to be 'success' or 'failure',
and returns an array of lines and their statuses.
@param data: RESOLVE's final response
@param vcs: dict made from the processing responses from RESOLVE
@return: individual line status array
"""
overall = 'success'
lines = {}
for vc in data['result']['vcs']:
if vcs.get(vc.get('vc')) != 'success':
overall = 'failure'
if lines.get(vc.get('lineNum')) != 'failure': # Don't overwrite an already failed line
lines[vc.get('lineNum')] = vcs.get(vc.get('vc'))
# Convert lines dict to array of dicts
line_array = []
for line, status in lines.items():
line_array.append({"lineNum": line, "status": status})
# Update response
data['status'] = overall
return line_array
def get_confirm_lines(code):
"""
Takes the block of code submitted to RESOLVE and returns a list of the lines that start with Confirm or ensures,
keeping the semicolons attached at the end, and removing all spaces (starting, ending, or in between)
@param code: All code submitted to RESOLVE verifier
@return: List of confirm/ensures statements, missing the confirm/ensures but with their semicolons, all spaces
removed
"""
# Regex explanation: [^;]* is any amount of characters that isn't a semicolon, so what this is saying is find
# all Confirm [characters that aren't ;]; OR ensures [characters that aren't ;];
# The parentheses tell regex what to actually return out, so the Confirm/ensures are chopped off but they did have
# to be present for it to match
lines = []
for match in re.findall("Confirm ([^;]*;)|ensures ([^;]*;)", code):
for group in match:
if group:
# This check gets rid of the empty matches made by having 2 group statements
# Get rid of all spaces
lines.append(re.sub(" ", "", group))
return lines
def clean_variable(variable):
"""
Makes a string safe to use in an HTML template by escaping newlines
@param variable: A string (most likely code submitted by user)
@return: Escaped string
"""
variable = re.sub("\r\n", r"\\r\\n", variable)
variable = re.sub("\r", r"\\r", variable)
return re.sub("\n", r"\\n", variable) | StarcoderdataPython |
9652793 | <reponame>kuis-isle3hw/simple_assembler
import os
import sys
def read_data():
"""
コマンドライン引数の一番目で指定されたファイルから読み取り、一行ずつリストにして返す。
コマンドライン引数が指定されなかった場合は、usageを表示してプログラムを終了する。
"""
if len(sys.argv) < 2:
print("usage: python3 assembler.py input-file [output-file]", file=sys.stderr)
exit(1)
path_in = sys.argv[1]
fin = open(path_in)
s = [tmp.strip() for tmp in fin.readlines()]
fin.close()
return s
def preproc(line):
"""
一行の命令を命令名と引数の列に分解する。
引数はカンマ区切りで分割され、前から順番にargsに入る。
d(Rb)の形式のものは、d,Rbの順でargsに入る。
"""
head, tail = "", ""
for i in range(len(line)):
if line[i] == " ":
tail = line[i + 1 :]
break
head += line[i]
cmd = head.upper()
tmp = [s.strip() for s in tail.split(",") if not s == ""]
args = []
for i in range(len(tmp)):
if "(" in tmp[i] and ")" in tmp[i]:
a = tmp[i][: tmp[i].find("(")].strip()
b = tmp[i][tmp[i].find("(") + 1 : tmp[i].find(")")].strip()
try:
args.append(int(a))
args.append(int(b))
except Exception:
raise ValueError
else:
try:
args.append(int(tmp[i]))
except Exception:
raise ValueError
return cmd, args
def to_binary(num, digit, signed=False):
"""
integerを指定された桁数(digit)の二進数に変換する。
signed=Falseの場合は0埋めされ、signed=Trueの場合は二の補数表示になる。
"""
if signed:
if not -(2 ** (digit - 1)) <= num < 2 ** (digit - 1):
raise ValueError(num)
return format(num & (2 ** digit - 1), "0" + str(digit) + "b")
else:
if not 0 <= num < 2 ** digit:
raise ValueError(num)
return format(num, "0" + str(digit) + "b")
def assemble(data):
result = []
for i in range(len(data)):
cmd, args = "", []
try:
cmd, args = preproc(data[i])
except ValueError:
print(str(i + 1) + "行目: 命令の引数が不正です", file=sys.stderr)
exit(1)
try:
if cmd == "ADD":
result.append(
"11"
+ to_binary(args[1], 3)
+ to_binary(args[0], 3)
+ "0000"
+ "0000"
)
elif cmd == "SUB":
result.append(
"11"
+ to_binary(args[1], 3)
+ to_binary(args[0], 3)
+ "0001"
+ "0000"
)
elif cmd == "AND":
result.append(
"11"
+ to_binary(args[1], 3)
+ to_binary(args[0], 3)
+ "0010"
+ "0000"
)
elif cmd == "OR":
result.append(
"11"
+ to_binary(args[1], 3)
+ to_binary(args[0], 3)
+ "0011"
+ "0000"
)
elif cmd == "XOR":
result.append(
"11"
+ to_binary(args[1], 3)
+ to_binary(args[0], 3)
+ "0100"
+ "0000"
)
elif cmd == "CMP":
result.append(
"11"
+ to_binary(args[1], 3)
+ to_binary(args[0], 3)
+ "0101"
+ "0000"
)
elif cmd == "MOV":
result.append(
"11"
+ to_binary(args[1], 3)
+ to_binary(args[0], 3)
+ "0110"
+ "0000"
)
elif cmd == "SLL":
result.append(
"11"
+ "000"
+ to_binary(args[0], 3)
+ "1000"
+ to_binary(args[1], 4)
)
elif cmd == "SLR":
result.append(
"11"
+ "000"
+ to_binary(args[0], 3)
+ "1001"
+ to_binary(args[1], 4)
)
elif cmd == "SRL":
result.append(
"11"
+ "000"
+ to_binary(args[0], 3)
+ "1010"
+ to_binary(args[1], 4)
)
elif cmd == "SRA":
result.append(
"11"
+ "000"
+ to_binary(args[0], 3)
+ "1011"
+ to_binary(args[1], 4)
)
elif cmd == "IN":
result.append("11" + "000" + to_binary(args[0], 3) + "1100" + "0000")
elif cmd == "OUT":
result.append("11" + to_binary(args[0], 3) + "000" + "1101" + "0000")
elif cmd == "HLT":
result.append("11" + "000" + "000" + "1111" + "0000")
elif cmd == "LD":
result.append(
"00"
+ to_binary(args[0], 3)
+ to_binary(args[2], 3)
+ to_binary(args[1], 8, signed=True)
)
elif cmd == "ST":
result.append(
"01"
+ to_binary(args[0], 3)
+ to_binary(args[2], 3)
+ to_binary(args[1], 8, signed=True)
)
elif cmd == "LI":
result.append(
"10"
+ "000"
+ to_binary(args[0], 3)
+ to_binary(args[1], 8, signed=True)
)
elif cmd == "B":
result.append("10" + "100" + "000" + to_binary(args[0], 8, signed=True))
elif cmd == "BE":
result.append("10" + "111" + "000" + to_binary(args[0], 8, signed=True))
elif cmd == "BLT":
result.append("10" + "111" + "001" + to_binary(args[0], 8, signed=True))
elif cmd == "BLE":
result.append("10" + "111" + "010" + to_binary(args[0], 8, signed=True))
elif cmd == "BNE":
result.append("10" + "111" + "011" + to_binary(args[0], 8, signed=True))
else:
print(str(i + 1) + "行目:コマンド名が正しくありません", file=sys.stderr)
exit(1)
except ValueError as e:
print(str(i + 1) + "行目 " + str(e) + ": 値の大きさが不正です", file=sys.stderr)
exit(1)
return result
def write_result(result):
"""
アセンブルした二進数のリストを書き込む
書き込み先は、コマンドライン引数によって指定された場合はそのファイル、
されなかった場合は標準出力
ワード幅は16,ワード数は256としている
DATA_RADIXは二進数、ADDRESS_RADIXはDECとしているが
HEXのほうがよいか?
"""
if len(sys.argv) >= 3:
fout = open(sys.argv[2], mode="w")
fout.write("WIDTH=16;\n")
fout.write("DEPTH=256;\n")
fout.write("ADDRESS_RADIX=DEC;\n")
fout.write("DATA_RADIX=BIN;\n")
fout.write("CONTENT BEGIN\n")
for i in range(len(result)):
fout.write("\t" + str(i) + " : " + result[i] + ";\n")
fout.write("END;\n")
fout.close()
else:
print("WIDTH=16;")
print("DEPTH=256;")
print("ADDRESS_RADIX=DEC;")
print("DATA_RADIX=BIN;")
print("CONTENT BEGIN")
for i in range(len(result)):
print("\t" + str(i) + " : " + result[i] + ";")
print("END;")
data = read_data()
result = assemble(data)
write_result(result)
| StarcoderdataPython |
4882998 | <reponame>abcdefg-dev-dd/asxdcvfg<filename>main.py
from __future__ import division
import argparse
from dataset.pems_d import *
from utils.metrics import *
from utils.process import *
import os
from trainer.ctrainer import CTrainer
from trainer.rtrainer import RTrainer
from nets.traverse_net import TraverseNet, TraverseNetst
from nets.stgcn_net import STGCNnet
from nets.graphwavenet import gwnet
from nets.astgcn_net import ASTGCNnet
from nets.dcrnn_net import DCRNNModel
import pickle
import dgl
import json
import random
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
import numpy as np
torch.set_num_threads(3)
def str_to_bool(value):
if isinstance(value, bool):
return value
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError(f'{value} is not a valid boolean value')
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
def run(dataloader, device,params,net_params, adj_mx=None):
scaler = dataloader['scaler']
if net_params['model']=='traversenet':
file = open(params['graph_path'], "rb")
graph = pickle.load(file)
relkeys = graph.keys()
print([t[1] for t in graph.keys()])
graph = dgl.heterograph(graph)
graph = graph.to(device)
# file = open('./data/randg/metr_ed1.pkl', "rb")
# #file = open('./data/metr-Gstd.pkl', "rb")
# ds = pickle.load(file)
# for t in ds.keys():
# graph.edges[t].data['weight'] = ds[t]
model = TraverseNet(net_params, graph, relkeys)
optimizer = optim.Adam(model.parameters(), lr=params['lr'], weight_decay=params['weight_decay'])
num_training_steps = dataloader['train_loader'].num_batch*params['epochs']
num_warmup_steps = int(num_training_steps*0.1)
print('num_training_step:', num_training_steps)
#lr_scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps)
trainer = CTrainer(model, optimizer, masked_mae, dataloader, params, net_params['seq_out_len'], scaler, device)
elif net_params['model']=='traversenet-ab':
#traversenet-ab is a model setting in ablation study that interleaves temporal graphs with spatial graphs.
file = open(params['graph_path'], "rb")
graph = pickle.load(file)
file.close()
relkeys = graph.keys()
print([t[1] for t in graph.keys()])
graph = dgl.heterograph(graph)
graph = graph.to(device)
file1 = open(params['graph_path1'], "rb")
graph1 = pickle.load(file1)
file1.close()
relkeys1 = graph1.keys()
print([t[1] for t in graph1.keys()])
graph1 = dgl.heterograph(graph1)
graph1 = graph1.to(device)
model = TraverseNetst(net_params, graph, graph1, relkeys, relkeys1)
optimizer = optim.Adam(model.parameters(), lr=params['lr'], weight_decay=params['weight_decay'])
trainer = CTrainer(model, optimizer, masked_mae, dataloader, params, net_params['seq_out_len'], scaler, device)
elif net_params['model']=='stgcn':
adj_mx = sym_adj((adj_mx+adj_mx.transpose())/2)
adj_mx = torch.Tensor(adj_mx.todense()).to(device)
model = STGCNnet(net_params, adj_mx)
optimizer = optim.Adam(model.parameters(), lr=params['lr'], weight_decay=params['weight_decay'])
trainer = CTrainer(model, optimizer, masked_mae, dataloader, params, net_params['seq_out_len'], scaler, device)
elif net_params['model']=='graphwavenet':
supports = [torch.Tensor(asym_adj(adj_mx).todense()).to(device),torch.Tensor(asym_adj(np.transpose(adj_mx)).todense()).to(device)]
model = gwnet(net_params, device, supports)
optimizer = optim.Adam(model.parameters(), lr=params['lr'], weight_decay=params['weight_decay'])
trainer = CTrainer(model, optimizer, masked_mae, dataloader, params, net_params['seq_out_len'], scaler, device)
elif net_params['model']=='astgcn':
L_tilde = scaled_Laplacian(adj_mx)
cheb_polynomials = [torch.from_numpy(i).type(torch.FloatTensor).to(device) for i in
cheb_polynomial(L_tilde, net_params['K'])]
model = ASTGCNnet(cheb_polynomials, net_params, device)
optimizer = optim.Adam(model.parameters(), lr=params['lr'], weight_decay=params['weight_decay'])
trainer = CTrainer(model, optimizer, masked_mae, dataloader, params, net_params['seq_out_len'], scaler, device)
elif net_params['model']=='dcrnn':
model = DCRNNModel(adj_mx, device, net_params)
optimizer = optim.Adam(model.parameters(), lr=params['lr'], eps=params['epsilon'])
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 30, 40, 50], gamma=0.1)
trainer = RTrainer(model, optimizer, lr_scheduler, masked_mae, dataloader, params, net_params, scaler, device)
elif net_params['model']=='gru':
#the GRU model is equivalent to a DCRNN model with identity graph adjacency matrix.
adj_mx = np.eye(net_params['num_nodes'])
model = DCRNNModel(adj_mx, device, net_params)
optimizer = optim.Adam(model.parameters(), lr=params['lr'], eps=params['epsilon'])
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 30, 40, 50], gamma=0.1)
trainer = RTrainer(model, optimizer, lr_scheduler, masked_mae, dataloader, params, net_params, scaler, device)
else:
print("model is not defined.")
exit
nParams = sum([p.nelement() for p in model.parameters()])
print('Number of model parameters is', nParams)
# nParams = sum([p.nelement() for p in model.start_conv.parameters()])
# print('Number of model parameters for start conv is ', nParams)
# nParams = sum([p.nelement() for p in model.transformer.parameters()])
# print('Number of model parameters for transformer is ', nParams)
print("start training...",flush=True)
his_loss, train_time, val_time = [], [], []
minl = 1e5
for i in range(params['epochs']):
train_loss,train_mape,train_rmse, traint = trainer.train_epoch()
train_time.append(traint)
valid_loss, valid_mape, valid_rmse, valt = trainer.val_epoch()
val_time.append(valt)
his_loss.append(valid_loss)
log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch, Valid Time: {:.4f}/epoch'
print(log.format(i, train_loss, train_mape, train_rmse, valid_loss, valid_mape, valid_rmse, traint, valt),flush=True)
out_dir = params['out_dir']
if not os.path.exists(out_dir):
os.makedirs(out_dir)
torch.save(model.state_dict(), '{}.pkl'.format(out_dir + "/epoch_" + str(i)))
if valid_loss<minl:
torch.save(trainer.model.state_dict(), '{}.pkl'.format(out_dir + "/epoch_best"))
minl = valid_loss
print("Average Training Time: {:.4f} secs/epoch".format(np.mean(train_time)))
print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))
bestid = np.argmin(his_loss)
print("Training finished")
print("The valid loss on best model is", str(round(his_loss[bestid],4)))
trainer.model.load_state_dict(torch.load('{}.pkl'.format(out_dir + "/epoch_best")))
trmae, trmape, trrmse = trainer.ev_valid('train')
vmae, vmape, vrmse = trainer.ev_valid('val')
# tmae, tmape, trmse = trainer.ev_test('test')
tmae, tmape, trmse = trainer.ev_valid('test')
print('test', tmae,tmape,trmse)
return trmae, trmape, trrmse, vmae, vmape, vrmse, tmae, tmape, trmse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='./config/traversenet.json')
parser.add_argument('--seed', type=int, help='random seed')
parser.add_argument('--epochs', type=int, help='number of epochs')
parser.add_argument('--runs', type=int, help='number of runs')
parser.add_argument('--batch_size', type=int, help='batch size')
parser.add_argument('--gpuid', type=int, help='device')
parser.add_argument('--dataset', type=str, help='dataset')
parser.add_argument('--graph_path', type=str, help='preprocessed graph for graph traversenet')
parser.add_argument('--graph_path1', type=str, help='another preprocessed graph for a variant of graph traversenet in ablation study')
parser.add_argument('--in_dim', type=int, help='dimension of inputs')
parser.add_argument('--out_dim', type=int, help='dimension of outputs')
parser.add_argument('--dim', type=int, help='hidden feature dimsion')
parser.add_argument('--dropout', type=float, help='dropout rate')
parser.add_argument('--lr', type=float, help='learning rate')
parser.add_argument('--weight_decay', type=float, help='weight decay rate')
parser.add_argument('--heads', type=int, help='number of heads for the attention mechanism')
parser.add_argument('--num_layers', type=int, help='number of layers')
parser.add_argument('--num_nodes', type=int, help='number of nodes')
parser.add_argument('--num_rel', type=int, help='number of relation types in the preprocessed heterogenous graph')
parser.add_argument('--seq_in_len', type=int, help='input sequence length')
parser.add_argument('--seq_out_len', type=int, help='output sequence length')
parser.add_argument('--out_dir', type=str, help='model save path')
parser.add_argument('--model', type=str, help='model name')
parser.add_argument('--out_level', type=int, default=0, help='output level, 0 for traffic flow prediction, 2 for traffic speed prediction.(only applicable to PEMS-04 and PEMS-08)')
args = parser.parse_args()
print(args)
with open(args.config) as f:
config = json.load(f)
if args.gpuid is not None:
config['gpu']['id'] = args.gpuid
params = config['params']
if args.seed is not None:
params['seed'] = args.seed
if args.epochs is not None:
params['epochs'] = args.epochs
if args.runs is not None:
params['runs'] = args.runs
if args.batch_size is not None:
params['batch_size'] = args.batch_size
if args.dataset is not None:
params['dataset'] = args.dataset
if args.graph_path is not None:
params['graph_path'] = args.graph_path
if args.graph_path1 is not None:
params['graph_path1'] = args.graph_path1
if args.out_dir is not None:
params['out_dir'] = args.out_dir
if args.out_level is not None:
params['out_level'] = args.out_level
if args.lr is not None:
params['lr'] = args.lr
if args.weight_decay is not None:
params['weight_decay'] = args.weight_decay
net_params = config['net_params']
if args.model is not None:
net_params['model'] = args.model
if args.dim is not None:
net_params['dim'] = args.dim
if args.in_dim is not None:
net_params['in_dim'] = args.in_dim
if args.out_dim is not None:
net_params['out_dim'] = args.out_dim
if args.num_layers is not None:
net_params['num_layers'] = args.num_layers
if args.num_nodes is not None:
net_params['num_nodes'] = args.num_nodes
if args.seq_in_len is not None:
net_params['seq_in_len'] = args.seq_in_len
if args.seq_out_len is not None:
net_params['seq_out_len'] = args.seq_out_len
if args.num_rel is not None:
net_params['num_rel'] = args.num_rel
if args.dropout is not None:
net_params['dropout'] = args.dropout
if args.heads is not None:
net_params['heads'] = args.heads
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
dataloader = load_data(params['batch_size'], "data/"+params['dataset']+".pkl", device)
adj_mx = np.array(dataloader['adj']+torch.eye(net_params['num_nodes']))
trmae, trmape, trrmse, vmae, vmape, vrmse, mae, mape, rmse = [], [], [], [], [], [], [], [], []
for i in range(params['runs']):
tm1, tm2, tm3, vm1, vm2, vm3, m1, m2, m3 = run(dataloader, device,params,net_params, adj_mx)
trmae.append(tm1)
trmape.append(tm2)
trrmse.append(tm3)
vmae.append(vm1)
vmape.append(vm2)
vrmse.append(vm3)
mae.append(m1)
mape.append(m2)
rmse.append(m3)
print('\n\nResults for {:d} runs\n\n'.format(params['runs']))
#train data
print('train\tMAE\tRMSE\tMAPE')
log = 'mean:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.mean(trmae),np.mean(trrmse),np.mean(trmape)))
log = 'std:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.std(trmae),np.std(trrmse),np.std(trmape)))
print('\n\n')
#valid data
print('valid\tMAE\tRMSE\tMAPE')
log = 'mean:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.mean(vmae),np.mean(vrmse),np.mean(vmape)))
log = 'std:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.std(vmae),np.std(vrmse),np.std(vmape)))
print('\n\n')
#test data
print('test\tMAE\tRMSE\tMAPE')
log = 'mean:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.mean(mae),np.mean(rmse),np.mean(mape)))
log = 'std:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.std(mae),np.std(rmse),np.std(mape)))
print('\n\n')
if __name__ == "__main__":
main()
| StarcoderdataPython |
1775035 | <filename>solthiruthi/resources.py
## -*- coding: utf-8 -*-
## This file is part of Open-Tamil project.
## (C) 2015,2020 <NAME>
##
from __future__ import print_function
import os
def _make_dict_with_path( srcfiles ):
return dict( [( srcfile.split(u".txt")[0], mk_path( srcfile ) ) \
for srcfile in srcfiles] )
def get_data_dir():
dirname, filename = os.path.split(os.path.abspath(__file__))
return os.path.sep.join([dirname,u'data'])
def get_data_dictionaries( ):
srcfiles = {'tamilvu' : 'tamilvu_dictionary_words.txt',
'projmad':'proj-madurai-040415.txt',
'wikipedia':'wikipedia_full_text_032915.txt',
'english':'english_dictionary_words.txt',
'parallel':'parallel_dictionary.txt',
'vatamozhi':'monierwilliams_dictionary_words.txt'}
for k, v in srcfiles.items():
srcfiles[k] = mk_path( v )
return srcfiles
def get_data_categories( ):
# add new elements to end
srcfiles = ['peyargal.txt',
'capitals-n-countries.txt',
'maligaiporul.txt',
'mooligaigal.txt',
'nagarangal.txt',
'palam.txt',
'vilangugal.txt',
'TamilStopWords.txt']
return _make_dict_with_path(srcfiles)
DATADIR = get_data_dir()
def mk_path( srcfile ):
return os.path.sep.join( [DATADIR, srcfile] )
CATEGORY_DATA_FILES = get_data_categories( )
DICTIONARY_DATA_FILES = get_data_dictionaries( )
| StarcoderdataPython |
8072854 | <reponame>noahf100/sitemap-generator<filename>scrapy_sitemap.py
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
class RecipeSpider(CrawlSpider):
name = 'recipe_spider'
def __init__(self):
self.start_urls = ['https://www.hellofresh.com']
self.wanted_prefixes = ['https://www.hellofresh.com/recipes']
self.allowed_domains = ['www.hellofresh.com']
self._rules = (Rule (LinkExtractor(), callback=self.parse_item, follow=True),)
def _MatchesPrefix(self, url):
for prefix in self.wanted_prefixes:
if url.startswith(prefix):
return True
return False
def parse_item(self, response):
url = response.url
print(url)
if self._MatchesPrefix(url):
return {
'url': url
} | StarcoderdataPython |
4883598 | # Generated by Django 2.2.3 on 2020-04-14 06:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Filmmakers', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='celebrity',
name='celebrity_name',
field=models.CharField(max_length=50, verbose_name='明星名字'),
),
]
| StarcoderdataPython |
5042788 | <reponame>cs-nerds/lishebora-shipping-service
from geopy import distance
def get_distance_km(point1: tuple, point2: tuple) -> float:
return distance.distance(point1, point2).km
| StarcoderdataPython |
5198162 | from __future__ import absolute_import, print_function
import collections
import unittest
import huffman
class TestCodebookGeneration(unittest.TestCase):
def test_basic(self):
output = huffman.codebook([("A", 2), ("B", 4), ("C", 1), ("D", 1)])
expected = {"A": "10", "B": "0", "C": "110", "D": "111"}
self.assertEqual(output, expected)
def test_counter(self):
input_ = sorted(collections.Counter("man the stand banana man").items())
output = huffman.codebook(input_)
expected = {
" ": "111",
"a": "10",
"b": "0101",
"d": "0110",
"e": "11000",
"h": "0100",
"m": "0111",
"n": "00",
"s": "11001",
"t": "1101",
}
self.assertEqual(output, expected)
| StarcoderdataPython |
9728345 | #!/opt/local/bin/python
# Checked to see which directions we'll be dealing with in the input with the following:
'''
computer:12 paul$ grep L 12_input.txt | tr -d L | sort | uniq -c
26 180
7 270
62 90
computer:12 paul$ grep R 12_input.txt | tr -d R | sort | uniq -c
22 180
5 270
58 90
'''
# So, we're only dealing with simple NSEW directions, but can turn three steps in each direction.
input_file="12_test.txt" # 25
input_file="12_input.txt" #
headings = ["N", "E", "S", "W"]
movements = ["N", "E", "S", "W", "F"]
turns = ["L", "R"]
heading = "E"
x = 0
y = 0
def load_instructions(filename):
myfile = open(filename)
instructions=[]
for i in myfile:
# Going ot try this tuple thing...
instructions.append((i[0], int(i[1:])))
return instructions
def interpret_instruction(instruction):
# Check instruction for heading or movement. If movement, move. If heading, update.
if instruction[0] in turns:
turn_ship(instruction)
elif instruction[0] in movements:
move_ship(instruction)
else:
print("Invalid instruction!")
def turn_ship(instruction):
global heading
# Steps are always 90 degree increments - we can use this for stepping through our LUT.
steps = int(instruction[1] / 90)
if instruction[0] == "L":
# Turn left - subtract heading.
new_heading = (headings.index(heading) - steps) % 4
elif instruction[0] == "R":
# Turn right - add heading.
new_heading = (headings.index(heading) + steps) % 4
else:
print("Invalid heading!")
heading = headings[new_heading]
#print("Turned", instruction[0], "to", heading)
def move_ship(instruction):
global x
global y
direction = movements.index(instruction[0])
amount = instruction[1]
shift = int(direction / 2 * - 1)
if instruction[0] == "F":
# Move forward X amount.
# To use less code, we'll just translate this to a direction and amount.
direction = movements.index(heading)
#else:
# Now that forward was normalized, the remaining movements should work.
#print("Moving", direction, amount)
if direction % 2:
# left-right.
if int(direction / 2):
amount *= -1
x += amount
#print("Moving x", amount, "to", x)
else:
# up-down
if int(direction / 2):
amount *= -1
y += amount
#print("Moving y", amount, "to", y)
instlist = load_instructions(input_file)
for i in instlist:
interpret_instruction(i)
print("Final position:", x, y)
print("Manhattan Distance:", abs(x) + abs(y))
| StarcoderdataPython |
11235480 | <reponame>mipsu/Kiny-Painel<gh_stars>1-10
#---------------------------------------#
global R,B,C,G
R='\033[1;31m';B='\033[1;34m';C='\033[1;37m';G='\033[1;32m';Format="\033[0m";Letra="\033[38;5;15m";Fundo="\033[48;5;19m"
from os import system
from os import execl
from sys import executable
from sys import argv
from os import name
from time import sleep
#---------------------------------------#
def clear(): system('cls' if name == 'nt' else 'clear')
def restart(): execl(executable, executable, *argv)
def sair(): system('rm -rf __pycache__ && clear');print(f'{banner}{G}_ ! _ {C}Até logo. {G} _ ! _{C}');Sair=True
#---------------------------------------#
try: from requests import get
except: system('python3 -m pip install --upgrade pip && pip3 install -r requirements.txt');restart()
#---------------------------------------#
def show():
file = open('LICENSE','r')
print(file.read());
file.close()
input(f'\n{B}<{C} Aperte Enter para retornar ao menu {B}>{C}')
#---------------------------------------#
try:
user = open('username','r')
user2 = str(user.read())
user.close()
except:
clear();user1=input(f'{C}Digite seu nome de usuário{B} >>>{C} ')
user=open('username','w+')
user.write(user1)
user.close()
restart()
user=user2
#---------------------------------------#
banner=f'''{B}
__ __ __ __ __ __ __
/\ \/ / /\ \ /\ "-.\ \ /\ \_\ \
\ \ _"-. \ \ \ \ \ \-. \ \ \____ \
\ \_\ \_\ \ \_\ \ \_\\"\_\ \/\_____\
\/_/\/_/ \/_/ \/_/ \/_/ \/_____/
{C}Coded By: {B}Kiny{C}\n{Fundo}{Letra}Hello, {user}!{Format} {Fundo}{C}{Letra}Version: 4.0{Format}{C} {Fundo}{C}{Letra} PIX: (61) 9603-5417{Format}{C}\n\n{Fundo}{C}{Letra}_ ! _ Esse programa foi disponiblizado gratuitamente, se você pagou, foi enganado._ ! _{Format}{C}\n\n'''
#---------------------------------------#
try: api=get('http://pubproxy.com/api/proxy').json();ip=api['data'][0]['ip']+':'+api['data'][0]['port'];v=get(url='https://raw.githubusercontent.com/Kiny-Kiny/Kiny-Painel/main/source/apis.json', proxies={'http': ip}).json()
except: restart()
#---------------------------------------#
num_status = (f'{G}ON{C}' if "ON" in v['numero'][1] else f'{R}OFF{C}')
cpf_status = (f'{G}ON{C}' if "ON" in v['cpf'][1] else f'{R}OFF{C}')
nome_status = (f'{G}ON{C}' if "ON" in v['nome'][1] else f'{R}OFF{C}')
cnpj_status = (f'{G}ON{C}' if "ON" in v['cnpj'][1] else f"{R}OFF{C}")
placa_status = (f'{G}ON{C}' if "ON" in v['placa'][1] else f"{R}OFF{C}")
ip_status = (f'{G}ON{C}' if "ON" in v['ip'][1] else '{R}OFF{C}')
cep_status = (f'{G}ON{C}' if "ON" in v['cep'][1] else '{R}OFF{C}')
covid_status = (f'{G}ON{C}' if "ON" in v['covid'][1] else '{R}OFF{C}')
bin_status = (f'{G}ON{C}' if "ON" in v['bin'][1] else '{R}OFF{C}')
banco_status = (f'{G}ON{C}' if "ON" in v['banco'][1] else '{R}OFF{C}')
#---------------------------------------#
def rds():
clear();print(f'{banner}');r_social=v['r_social']
for i in r_social:
print(f'{B}_ ! _{C} ',i)
input(f'\n{B}<{C} Aperte Enter para retornar ao menu {B}>{C}')
#---------------------------------------#
Sair=False
while Sair==False:
def init():
if op == 1:
r_msg = f'{B}_ ! _{C} Exemplo: 18996166070{B}_ ! _{C}\nDigite o número que irá consultar'
req= v['numero'][0]
elif op == 2:
r_msg=f'{B}_ ! _{C} Exemplo: 00000000272{B}_ ! _{C}\nDigite o CPF que irá consultar '
req= v['cpf'][0]
elif op == 3:
r_msg=f'{B}_ ! _{C} Exemplo: <NAME>{B}_ ! _{C}\nDigite o nome que irá consultar '
req= v['nome'][0]
elif op == 4:
r_msg=f'{B}_ ! _{C} Exemplo: 01944765000142{B}_ ! _{C}\nDigite o CNPJ que irá consultar '
req=v['cnpj'][0]
elif op == 5:
r_msg=f'{B}_ ! _{C} Exemplo: bpm9099{B}_ ! _{C}\nDigite a placa que irá consultar '
req=v['placa'][0]
elif op == 6:
r_msg=f'{B}_ ! _{C} Exemplo: 192.168.127.12{B}_ ! _{C}\nDigite o IP que irá consultar '
req=v['ip'][0]
elif op ==7:
r_msg=f'{B}_ ! _{C} Exemplo: 13218840{B}_ ! _{C}\nDigite o CEP que irá consultar '
req=v['cep'][0]
elif op ==8:
r_msg=f'{B}_ ! _{C} Exemplo: RJ{B}_ ! _{C}\nDigite o UF que irá consultar '
req=v['covid'][0]
elif op ==9:
r_msg=f'{B}_ ! _{C} Exemplo: 45717360{B}_ ! _{C}\nDigite a BIN que irá consultar '
req=v['bin'][0]
elif op ==10:
r_msg=f'{B}_ ! _{C} Exemplo: 237{B}_ ! _{C}\nDigite o código bancario que irá consultar '
req=v['banco'][0]
else:
print(f'{R}- ! -{C} Opção Inválida {R}- ! -{C}');sleep(2);restart()
clear();r=input(f'{banner}{C}{r_msg}{B}>>>{C} ')
if 'placa' in req or 'placa' in req or 'cep' in req:
msg1=req+r+'/json'
else:
msg1=req+r
try:
msg= get(msg1, proxies={'http': ip}, verify=False).text.replace('<br>', '\n').replace('<p>', '').replace('{', '').replace('}', '').replace(',', '\n').replace(':', ' ')
except:
msg=f'{R}- ! -{C} API OFFLINE OU SERVIDOR FORA DO AR{R}- ! -{C}'
clear();sub=int(input(f'{banner}\n{msg}\n{B}- ! -{C} Deseja fazer uma nova consulta? {B}- ! -{C}\n[{B}1{C}] Sim\n[{B}2{C}] Nao\n{B}===> {C}'))
if sub == 1: init()
elif sub == 2: sair()
else: pass
#---------------------------------------#
try:
clear();op=int(input(f'{banner} [{B}1{C}] Número [{num_status}]\n [{B}2{C}] CPF [{cpf_status}]\n [{B}3{C}] Nome [{nome_status}]\n [{B}4{C}] CNPJ [{cnpj_status}]\n [{B}5{C}] Placa [{placa_status}]\n [{B}6{C}] IP [{ip_status}]\n [{B}7{C}] CEP [{cep_status}]\n [{B}8{C}] COVID [{covid_status}]\n [{B}9{C}] BIN [{bin_status}]\n [{B}10{C}] Banco [{banco_status}]\n\n [{B}97{C}] LICENSE\n [{B}98{C}] Redes Sociais\n [{B}99{C}] Trocar nome\n [{R}0{C}] Sair\n{B} ===>{C} '))
if op == 0: Sair=True
elif op == 97: show()
elif op == 98: rds()
elif op == 99: system('rm -rf username');restart()
else: init()
except: pass
sair()
#---------------------------------------#
| StarcoderdataPython |
8187364 | <filename>labs/backend/migrations/0003_auto_20200117_1317.py
# Generated by Django 2.2.6 on 2020-01-17 13:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0002_threadtask'),
]
operations = [
migrations.RemoveField(
model_name='threadtask',
name='task',
),
migrations.AddField(
model_name='threadtask',
name='name',
field=models.TextField(default='old'),
preserve_default=False,
),
]
| StarcoderdataPython |
8042605 | import os
import glob
import json
from collections import OrderedDict
from colorama import Fore
from toolset.databases import databases
from toolset.utils.output_helper import log
class Metadata:
supported_dbs = []
for name in databases:
supported_dbs.append((name, '...'))
def __init__(self, benchmarker=None):
self.benchmarker = benchmarker
def gather_languages(self):
'''
Gathers all the known languages in the suite via the folder names
beneath FWROOT.
'''
lang_dir = os.path.join(self.benchmarker.config.lang_root)
langs = []
for dir in glob.glob(os.path.join(lang_dir, "*")):
langs.append(dir.replace(lang_dir, "")[1:])
return langs
def gather_language_tests(self, language):
'''
Gathers all the test names from a known language
'''
try:
dir = os.path.join(self.benchmarker.config.lang_root, language)
tests = map(lambda x: os.path.join(language, x), os.listdir(dir))
return filter(lambda x: os.path.isdir(
os.path.join(self.benchmarker.config.lang_root, x)), tests)
except Exception:
raise Exception(
"Unable to locate language directory: {!s}".format(language))
def get_framework_config(self, test_dir):
'''
Gets a framework's benchmark_config from the given
test directory
'''
dir_config_files = glob.glob("{!s}/{!s}/benchmark_config.json".format(
self.benchmarker.config.lang_root, test_dir))
if len(dir_config_files):
return dir_config_files[0]
else:
raise Exception(
"Unable to locate tests in test-dir: {!s}".format(test_dir))
def gather_tests(self, include=None, exclude=None):
'''
Given test names as strings, returns a list of FrameworkTest objects.
For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
variables for checking the test directory, the test database os, and
other useful items.
With no arguments, every test in this framework will be returned.
With include, only tests with this exact name will be returned.
With exclude, all tests but those excluded will be returned.
'''
# Help callers out a bit
include = include or []
exclude = exclude or []
# Search for configuration files
config_files = []
if self.benchmarker.config.test_lang:
self.benchmarker.config.test_dir = []
for lang in self.benchmarker.config.test_lang:
self.benchmarker.config.test_dir.extend(
self.gather_language_tests(lang))
if self.benchmarker.config.test_dir:
for test_dir in self.benchmarker.config.test_dir:
config_files.append(self.get_framework_config(test_dir))
else:
config_files.extend(
glob.glob("{!s}/*/*/benchmark_config.json".format(
self.benchmarker.config.lang_root)))
tests = []
for config_file_name in config_files:
config = None
with open(config_file_name, 'r') as config_file:
try:
config = json.load(config_file)
except ValueError:
log("Error loading config: {!s}".format(config_file_name),
color=Fore.RED)
raise Exception("Error loading config file")
# Find all tests in the config file
config_tests = self.parse_config(config,
os.path.dirname(config_file_name))
# Filter
for test in config_tests:
if hasattr(test, "tags"):
if "broken" in test.tags:
continue
if self.benchmarker.config.tag:
for t in self.benchmarker.config.tag:
if t in test.tags and test.name not in exclude:
tests.append(test)
break
if len(include) > 0:
if test.name in include:
tests.append(test)
elif test.name not in exclude and not self.benchmarker.config.tag:
tests.append(test)
# Ensure we were able to locate everything that was
# explicitly included
if len(include):
names = {test.name for test in tests}
if len(set(include) - set(names)):
missing = list(set(include) - set(names))
raise Exception("Unable to locate tests %s" % missing)
tests = list(set(tests))
tests.sort(key=lambda x: x.name)
return tests
def tests_to_run(self):
'''
Gathers all tests for current benchmark run.
'''
return self.gather_tests(self.benchmarker.config.test,
self.benchmarker.config.exclude)
def gather_frameworks(self, include=None, exclude=None):
'''
Return a dictionary mapping frameworks->[test1,test2,test3]
for quickly grabbing all tests in a grouped manner.
Args have the same meaning as gather_tests
'''
tests = self.gather_tests(include, exclude)
frameworks = dict()
for test in tests:
if test.framework not in frameworks:
frameworks[test.framework] = []
frameworks[test.framework].append(test)
return frameworks
def has_file(self, test_dir, filename):
'''
Returns True if the file exists in the test dir
'''
path = test_dir
if not self.benchmarker.config.lang_root in path:
path = os.path.join(self.benchmarker.config.lang_root, path)
return os.path.isfile("{!s}/{!s}".format(path, filename))
@staticmethod
def test_order(type_name):
"""
This sort ordering is set up specifically to return the length
of the test name. There were SO many problems involved with
'plaintext' being run first (rather, just not last) that we
needed to ensure that it was run last for every framework.
"""
return len(type_name)
def parse_config(self, config, directory):
"""
Parses a config file into a list of FrameworkTest objects
"""
from toolset.benchmark.framework_test import FrameworkTest
tests = []
# The config object can specify multiple tests
# Loop over them and parse each into a FrameworkTest
for test in config['tests']:
tests_to_run = [name for (name, keys) in test.iteritems()]
if "default" not in tests_to_run:
log("Framework %s does not define a default test in benchmark_config.json"
% config['framework'],
color=Fore.YELLOW)
# Check that each test configuration is acceptable
# Throw exceptions if a field is missing, or how to improve the field
for test_name, test_keys in test.iteritems():
# Validates and normalizes the benchmark_config entry
test_keys = Metadata.validate_test(test_name, test_keys,
config['framework'], directory)
# Map test type to a parsed FrameworkTestType object
runTests = dict()
# TODO: remove self.benchmarker.config.types
for type_name, type_obj in self.benchmarker.config.types.iteritems():
try:
# Makes a FrameWorkTestType object using some of the keys in config
# e.g. JsonTestType uses "json_url"
runTests[type_name] = type_obj.copy().parse(test_keys)
except AttributeError:
# This is quite common - most tests don't support all types
# Quitely log it and move on (debug logging is on in travis and this causes
# ~1500 lines of debug, so I'm totally ignoring it for now
# log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
pass
# We need to sort by test_type to run
sortedTestKeys = sorted(
runTests.keys(), key=Metadata.test_order)
sortedRunTests = OrderedDict()
for sortedTestKey in sortedTestKeys:
sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
# Prefix all test names with framework except 'default' test
# Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
if test_name == 'default':
test_name = config['framework']
else:
test_name = "%s-%s" % (config['framework'], test_name)
# By passing the entire set of keys, each FrameworkTest will have a member for each key
tests.append(
FrameworkTest(test_name, directory, self.benchmarker,
sortedRunTests, test_keys))
return tests
def to_jsonable(self):
'''
Returns an array suitable for jsonification
'''
all_tests = self.gather_tests()
return map(lambda test: {
"project_name": test.project_name,
"name": test.name,
"approach": test.approach,
"classification": test.classification,
"database": test.database,
"framework": test.framework,
"language": test.language,
"orm": test.orm,
"platform": test.platform,
"webserver": test.webserver,
"os": test.os,
"database_os": test.database_os,
"display_name": test.display_name,
"notes": test.notes,
"versus": test.versus,
"tags": hasattr(test, "tags") and test.tags or []
}, all_tests)
def list_test_metadata(self):
'''
Prints the metadata for all the available tests
'''
all_tests_json = json.dumps(self.to_jsonable())
with open(
os.path.join(self.benchmarker.results.directory,
"test_metadata.json"), "w") as f:
f.write(all_tests_json)
@staticmethod
def validate_test(test_name, test_keys, project_name, directory):
"""
Validate and normalizes benchmark config values for this test based on a schema
"""
recommended_lang = directory.split('/')[-2]
windows_url = "https://github.com/TechEmpower/FrameworkBenchmarks/issues/1038"
schema = {
'language': {
# Language is the only key right now with no 'allowed' key that can't
# have a "None" value
'required':
True,
'help': ('language',
'The language of the framework used, suggestion: %s' %
recommended_lang)
},
'webserver': {
'help':
('webserver',
'Name of the webserver also referred to as the "front-end server"'
)
},
'classification': {
'allowed': [('Fullstack', '...'), ('Micro', '...'),
('Platform', '...')]
},
'database': {
'allowed':
Metadata.supported_dbs +
[('None',
'No database was used for these tests, as is the case with Json Serialization and Plaintext'
)]
},
'approach': {
'allowed': [('Realistic', '...'), ('Stripped', '...')]
},
'orm': {
'required_with':
'database',
'allowed':
[('Full',
'Has a full suite of features like lazy loading, caching, multiple language support, sometimes pre-configured with scripts.'
),
('Micro',
'Has basic database driver capabilities such as establishing a connection and sending queries.'
),
('Raw',
'Tests that do not use an ORM will be classified as "raw" meaning they use the platform\'s raw database connectivity.'
)]
},
'platform': {
'help':
('platform',
'Name of the platform this framework runs on, e.g. Node.js, PyPy, hhvm, JRuby ...'
)
},
'framework': {
# Guaranteed to be here and correct at this point
# key is left here to produce the set of required keys
},
'os': {
'allowed':
[('Linux',
'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
),
('Windows',
'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
% windows_url)]
},
'database_os': {
'required_with':
'database',
'allowed':
[('Linux',
'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
),
('Windows',
'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
% windows_url)]
}
}
# Check the (all optional) test urls
Metadata.validate_urls(test_name, test_keys)
def get_test_val(k):
return test_keys.get(k, "none").lower()
def throw_incorrect_key(k, acceptable_values, descriptors):
msg = (
"`%s` is a required key for test \"%s\" in framework \"%s\"\n"
% (k, test_name, project_name))
if acceptable_values:
msg = (
"Invalid `%s` value specified for test \"%s\" in framework \"%s\"; suggestions:\n"
% (k, test_name, project_name))
helpinfo = ('\n').join([
" `%s` -- %s" % (v, desc)
for (v, desc) in zip(acceptable_values, descriptors)
])
msg = msg + helpinfo + "\n"
raise Exception(msg)
# Check values of keys against schema
for key in schema.keys():
val = get_test_val(key)
test_keys[key] = val
acceptable_values = None
descriptors = None
if 'allowed' in schema[key]:
allowed = schema[key].get('allowed', [])
acceptable_values, descriptors = zip(*allowed)
acceptable_values = [a.lower() for a in acceptable_values]
if val == "none":
# incorrect if key requires a value other than none
if schema[key].get('required', False):
throw_incorrect_key(key, acceptable_values, descriptors)
# certain keys are only required if another key is not none
if 'required_with' in schema[key]:
if get_test_val(schema[key]['required_with']) != "none":
throw_incorrect_key(key, acceptable_values, descriptors)
# if we're here, the key needs to be one of the "allowed" values
elif acceptable_values and val not in acceptable_values:
throw_incorrect_key(key, acceptable_values, descriptors)
test_keys['project_name'] = project_name
return test_keys
@staticmethod
def validate_urls(test_name, test_keys):
"""
Separated from validate_test because urls are not required anywhere. We know a url is incorrect if it is
empty or does not start with a "/" character. There is no validation done to ensure the url conforms to
the suggested url specifications, although those suggestions are presented if a url fails validation here.
"""
example_urls = {
"json_url":
"/json",
"db_url":
"/mysql/db",
"query_url":
"/mysql/queries?queries= or /mysql/queries/",
"fortune_url":
"/mysql/fortunes",
"update_url":
"/mysql/updates?queries= or /mysql/updates/",
"plaintext_url":
"/plaintext",
"cached_query_url":
"/mysql/cached_queries?queries= or /mysql/cached_queries"
}
for test_url in [
"json_url", "db_url", "query_url", "fortune_url", "update_url",
"plaintext_url", "cached_query_url"
]:
key_value = test_keys.get(test_url, None)
if key_value is not None and not key_value.startswith('/'):
errmsg = """`%s` field in test \"%s\" does not appear to be a valid url: \"%s\"\n
Example `%s` url: \"%s\"
""" % (test_url, test_name, key_value, test_url,
example_urls[test_url])
raise Exception(errmsg)
| StarcoderdataPython |
172086 | <filename>riptable/rt_dataset.py
# -*- coding: utf-8 -*-
__all__ = ['Dataset', ]
from collections import abc, Counter, namedtuple
import operator
import os
from typing import Any, Callable, Iterable, List, Mapping, Optional, Sequence, Tuple, Union, TYPE_CHECKING
import warnings
import numpy as np
from .rt_struct import Struct
from .rt_fastarray import FastArray
from .rt_enum import TypeId, DS_DISPLAY_TYPES, NumpyCharTypes, DisplayDetectModes, INVALID_DICT, MATH_OPERATION, \
ApplyType, SDSFileType, ColHeader, TOTAL_LONG_NAME, CategoryMode
from .Utils.rt_display_properties import format_scalar
from .rt_hstack import hstack_any
from .rt_groupby import GroupBy
from .rt_timers import GetTSC
from .rt_numpy import full, lexsort, reindex_fast, unique, ismember, empty, arange, tile, ones, zeros
from .rt_numpy import sum, mean, var, std, argmax, argmin, min, max, median, cumsum, putmask, mask_ori, mask_andi
from .rt_numpy import nanargmin, nanargmax, nansum, nanmean, nanvar, nanstd, nanmedian, nanmin, nanmax
from .rt_numpy import isnan, isnanorzero, isnotfinite, bool_to_fancy, hstack, vstack, combine2keys, cat2keys
from .rt_grouping import combine2groups
from .rt_display import DisplayTable, DisplayDetect, DisplayString
from .rt_misc import build_header_tuples
from . import rt_merge
from .rt_sort_cache import SortCache
from .rt_utils import describe, quantile, is_list_like, get_default_value, _possibly_convert_rec_array, sample
from .rt_sds import save_sds, compress_dataset_internal, COMPRESSION_TYPE_NONE, COMPRESSION_TYPE_ZSTD, _sds_path_single, _write_to_sds, load_sds
from .Utils.rt_metadata import MetaData
from .rt_mlutils import normalize_minmax, normalize_zscore
from .rt_itemcontainer import ItemContainer
from .rt_imatrix import IMatrix
if TYPE_CHECKING:
from datetime import timedelta
from .rt_accum2 import Accum2
from .rt_categorical import Categorical
from .rt_multiset import Multiset
# pandas is an optional dependency.
try:
import pandas as pd
except ImportError:
pass
# pyarrow is an optional dependency.
try:
import pyarrow as pa
except ImportError:
pass
ArrayCompatible = Union[list, abc.Iterable, np.ndarray]
class Dataset(Struct):
"""
The Dataset class is the workhorse of riptable; it may be considered as an NxK array of values (of mixed type,
constant by column) where the rows are integer indexed and the columns are indexed by name (as well as
integer index). Alternatively it may be regarded as a dictionary of arrays, all of the same length.
The Dataset constructor takes dictionaries (dict, OrderedDict, etc...), as well as single instances of
Dataset or Struct (if all entries are of the same length).
Dataset() := Dataset({}).
The constructor dictionary keys (or element/column names added later) must be legal Python
variable names, not starting with '_' and not conflicting with any Dataset member names.
**Column indexing behavior**::
>>> st['b'] # get a column (equiv. st.b)
>>> st[['a', 'e']] # get some columns
>>> st[[0, 4]] # get some columns (order is that of iterating st (== list(st))
>>> st[1:5:2] # standard slice notation, indexing corresponding to previous
>>> st[bool_vector_len5] # get 'True' columns
In all of the above: ``st[col_spec] := st[:, colspec]``
**Row indexing behavior**::
>>> st[2, :] # get a row (all columns)
>>> st[[3, 7], :] # get some rows (all columns)
>>> st[1:5:2, :] # standard slice notation (all columns)
>>> st[bool_vector_len5, :] # get 'True' rows (all columns)
>>> st[row_spec, col_spec] # get specified rows for specified columns
Note that because ``st[spec] := st[:, spec]``, to specify rows one *must* specify columns
as well, at least as 'the all-slice': e.g., ``st[row_spec, :]``.
Wherever possible, views into the original data are returned. Use
:meth:`~rt.rt_dataset.Dataset.copy` where necessary.
Examples
--------
A Dataset with six integral columns of length 10::
>>> import string
>>> ds = rt.Dataset({_k: list(range(_i * 10, (_i + 1) * 10)) for _i, _k in enumerate(string.ascii_lowercase[:6])})
Add a column of strings (stored internally as ascii bytes)::
>>> ds.S = list('ABCDEFGHIJ')
Add a column of non-ascii strings (stored internally as a Categorical column)::
>>> ds.U = list('ℙƴ☂ℌøἤ-613')
>>> print(ds)
# a b c d e f S U
- - -- -- -- -- -- - -
0 0 10 20 30 40 50 A ℙ
1 1 11 21 31 41 51 B ƴ
2 2 12 22 32 42 52 C ☂
3 3 13 23 33 43 53 D ℌ
4 4 14 24 34 44 54 E ø
5 5 15 25 35 45 55 F ἤ
6 6 16 26 36 46 56 G -
7 7 17 27 37 47 57 H 6
8 8 18 28 38 48 58 I 1
9 9 19 29 39 49 59 J 3
>>> ds.get_ncols()
8
>>> ds.get_nrows()
10
``len`` applied to a Dataset returns the number of rows in the Dataset.
>>> len(ds)
10
>>> # Not too dissimilar from numpy/pandas in many ways.
>>> ds.shape
(10, 8)
>>> ds.size
80
>>> ds.head()
>>> ds.tail(n=3)
>>> assert (ds.c == ds['c']).all() and (ds.c == ds[2]).all()
>>> print(ds[1:8:3, :3])
# a b c
- - -- --
0 1 11 21
1 4 14 24
2 7 17 27
>>> ds.newcol = np.arange(100, 110) # okay, a new entry
>>> ds.newcol = np.arange(200, 210) # okay, replace the entry
>>> ds['another'] = 6 # okay (scalar is promoted to correct length vector)
>>> ds['another'] = ds.another.astype(np.float32) # redefines type of column
>>> ds.col_remove(['newcol', 'another'])
Fancy indexing for get/set::
>>> ds[1:8:3, :3] = ds[2:9:3, ['d', 'e', 'f']]
Equivalents::
>>> for colname in ds: print(colname, ds[colname])
>>> for colname, array in ds.items(): print(colname, array)
>>> for colname, array in zip(ds.keys(), ds.values()): print(colname, array)
>>> for colname, array in zip(ds, ds.values()): print(colname, array)
>>> if key in ds:
... assert getattr(ds, key) is ds[key]
Context manager::
>>> with Dataset({'a': 1, 'b': 'fish'}) as ds0:
... print(ds0.a)
[1]
>>> assert not hasattr(ds0, 'a')
Dataset cannot be used in a boolean context ``(if ds: ...)``,
use ``ds.any(axis='all')`` or ``ds.all(axis='all')`` instead::
>>> ds1 = ds[:-2] # Drop the string columns, Categoricals are 'funny' here.
>>> ds1.any(axis='all')
True
>>> ds1.all(axis='all')
False
>>> ds1.a[0] = -99
>>> ds1.all(axis='all')
True
>>> if (ds2 <= ds3).all(axis='all'): ...
Do math::
>>> ds1 += 5
>>> ds1 + 3 * ds2 - np.ones(10)
>>> ds1 ** 5
>>> ds.abs()
>>> ds.sum(axis=0, as_dataset=True)
# a b c d e f
- -- --- --- --- --- ---
0 39 238 338 345 445 545
>>> ds.sum(axis=1)
array([ 51, 249, 162, 168, 267, 180, 186, 285, 198, 204])
>>> ds.sum(axis=None)
1950
"""
def __init__(
self,
inputval: Optional[Union[ArrayCompatible, dict, Iterable[ArrayCompatible], Iterable[Tuple[str, ArrayCompatible]], 'ItemContainer']] = None,
base_index: int = 0,
sort: bool = False,
unicode: bool = False):
if inputval is None:
inputval = dict()
self._pre_init(sort=sort)
# fast track for itemcontainer from dataset/subclass
if isinstance(inputval, ItemContainer):
self._init_from_itemcontainer(inputval)
elif isinstance(inputval, list):
# dataset raises an error, pdataset does not
raise TypeError(
'Dataset can be created from list or iterable of values with Dataset.concat_rows(), Dataset.concat_columns, Dataset.from_rows() or Dataset.from_tagged_rows().')
# all other initializers will be flipped to a dictionary, or raise an error
else:
inputval = self._init_columns_as_dict(inputval, base_index=base_index, sort=sort, unicode=unicode)
self._init_from_dict(inputval, unicode=unicode)
self._post_init()
# ------------------------------------------------------------
def _init_columns_as_dict(self, columns, base_index=0, sort=True, unicode=False):
"""
Most methods of dataset construction will be turned into a dictionary before
setting dataset columns. This will return the resulting dictionary for each type
or raise an error.
"""
if isinstance(columns, dict):
pass
# TODO: pull out itemcontainer
elif isinstance(columns, Struct):
columns = columns._as_dictionary()
# check for pandas without importing
elif columns.__class__.__name__ == 'DataFrame':
columns = self._init_from_pandas_df(columns, unicode=unicode)
# record arrays have a void dtype
elif isinstance(columns, np.ndarray):
if columns.dtype.char == 'V':
columns = _possibly_convert_rec_array(columns)
else:
raise TypeError(f"Can only initialize datasets from arrays that are numpy record arrays.")
# If we get an Iterable of 2-tuples (a string key and a list/iterable/array)
# or an iterable of arrays (where we'll generate names like 'col_0', 'col_1', etc.).
# NOTE: The latter one shouldn't go here; it should go in Dataset.from_rows() or similar instead.
elif isinstance(columns, abc.Iterable) and not isinstance(columns, (str, bytes)):
raise NotImplementedError("Need to implement support for creating a Dataset from an iterable.")
else:
raise TypeError('Unexpected argument in Dataset.__init__', type(columns))
return columns
# ------------------------------------------------------------
def _init_from_itemcontainer(self, columns):
"""
Store the itemcontainer and set _nrows.
"""
self._all_items = columns
self._nrows = len(list(self._all_items.values())[0][0])
# ------------------------------------------------------------
def _pre_init(self, sort=False):
"""
Leave this here to chain init that only Dataset has.
"""
super()._pre_init()
self._sort_display = sort
# ------------------------------------------------------------
def _post_init(self):
"""
Leave this here to chain init that only Dataset has.
"""
super()._post_init()
# ------------------------------------------------------------
def _possibly_convert_array(self, v, name, unicode=False):
"""
If an array contains objects, it will attempt to flip based on the type of the first item.
By default, flip any numpy arrays to FastArray. (See UseFastArray flag)
The constructor will warn the user whenever object arrays appear, and raise an error if conversion
was unsuccessful.
Examples
--------
String objects:
>>> ds = rt.Dataset({'col1': np.array(['a','b','c'], dtype=object)})
>>> ds.col1
FastArray([b'a', b'b', b'c'], dtype='|S1')
Numeric objects:
>>> ds = rt.Dataset({'col1': np.array([1.,2.,3.], dtype=object)})
>>> ds.col1
FastArray([1., 2., 3.])
Mixed type objects:
>>> ds = rt.Dataset({'col1': np.array([np.nan, 'str', 1], dtype=object)})
ValueError: could not convert string to float: 'str'
TypeError: Cannot handle a numpy object array of type <class 'float'>
Note: depending on the order of mixed types in an object array, they may be converted to strings.
for performance, only the type of the first item is examined
Mixed type objects starting with string:
>>> ds = rt.Dataset({'col1': np.array(['str', np.nan, 1], dtype=object)})
>>> ds.col1
FastArray([b'str', b'nan', b'1'], dtype='|S3')
"""
if self.UseFastArray:
# flip value to FastArray
if not isinstance(v, TypeRegister.Categorical):
if isinstance(v, np.ndarray):
c = v.dtype.char
if c == 'O':
# make sure, scalar type so no python objects like dicts come through
# try float, but most objects will flip to bytes or unicode
# TODO: Simplify to use np.isscalar() here?
if isinstance(v[0], (str, np.str_, bytes, np.bytes_, int, float, bool, np.integer, np.floating, np.bool_)):
try:
# attempt to autodetect based on first element
# NOTE: if the first element is a float and Nan.. does that mean keep looking?
if isinstance(v[0], (str, np.str_)):
# NOTE this might get converted to 'S' if unicode is False for FastArrays
v=v.astype('U')
elif isinstance(v[0], (bytes, np.bytes_)):
v=v.astype('S')
elif isinstance(v[0], (int, np.integer)):
v=v.astype(np.int64)
elif isinstance(v[0], (bool, np.bool_)):
v=v.astype(np.bool_)
else:
v = v.astype(np.float64)
except:
v = self._object_as_string(name, v)
else:
raise TypeError(f'Cannot convert object array {v} containing {type(v[0])}')
elif c == 'M':
# handle numpy datetime, will be in UTC
v = TypeRegister.DateTimeNano(v, from_tz='GMT', to_tz='GMT')
# numpy arrays with bytes will be converted here unless unicode was requested
# fast arrays will not be flipped, even if unicode
if not isinstance(v, FastArray):
v = FastArray(v, unicode=unicode)
else:
if isinstance(v, FastArray):
v = v._np
# possible expanson of scalars or arrays of 1
if v.shape[0]== 1 and self._nrows is not None and self._nrows > 1:
# try to use repeat to solve mismatch problem
v = v.repeat(self._nrows)
return v
# ------------------------------------------------------------
def _object_as_string(self, name, v):
"""
After failing to convert objects to a numeric type, or when the first item is
a string or bytes, try to flip the array to a bytes array, then unicode array.
"""
try:
v = v.astype('S')
except (UnicodeEncodeError, SystemError):
try:
v = v.astype('U')
except:
raise ValueError(f"Object strings could not be converted to bytestrings or unicode for {name!r}. First item was {type(v[0])}")
return v
# ------------------------------------------------------------
def _possibly_convert(self, name, v, unicode=False):
"""
Input: any data type that can be added to a dataset
Returns: a numpy based array
"""
if not isinstance(v, np.ndarray):
# pandas Series containing Categorical
if hasattr(v, 'cat'):
v = TypeRegister.Categorical(v.values)
# pandas Categorical
elif hasattr(v, '_codes'):
v = TypeRegister.Categorical(v)
elif isinstance(v, (tuple, Struct)):
raise TypeError(f'Cannot create a Dataset column out of a {type(v).__name__}.')
elif not isinstance(v, list):
# convert scalar to list then to array
v = np.asanyarray([v])
else:
# convert list to an array
v = np.asanyarray(v)
v = self._ensure_vector(v)
v = self._possibly_convert_array(v, name, unicode=unicode)
return v
# ------------------------------------------------------------
def _ensure_vector(self, vec):
if len(vec.shape) != 1:
vec = vec.squeeze()
if len(vec.shape) == 0:
vec = vec.reshape((1,))
return vec
# ------------------------------------------------------------
def _check_addtype(self, name, value):
# TODO use _possibly_convert -- why are these two routines different?
if not isinstance(value, np.ndarray):
if isinstance(value,set):
raise TypeError(f'Cannot create Dataset column {name!r} out of tuples or sets {value!r}.')
# following pandas
if self._nrows is None:
if isinstance(value, (list, tuple)):
self._nrows = len(value)
else:
# how to get here:
# ds=Dataset()
# ds[['g','c']]=3
self._nrows = 1
if isinstance(value, (list, tuple)):
rowlen = len(value)
if self._nrows != rowlen and rowlen !=1:
raise TypeError("Row mismatch in Dataset._check_addtype", self._nrows, len(value), value)
value = np.asanyarray(value)
if value.shape[0] ==1 and self._nrows != 1:
# for when user types in a list of 1 item and wants it to repeat
value = value.repeat(self._nrows)
else:
# if they try to add a dataset to a single column
# then if the dataset has one column, use that
if isinstance(value, Dataset):
if self._nrows != value._nrows:
raise TypeError("Row mismatch in Dataset._check_addtype. Tried to add Dataset of different lengths", self._nrows, value._nrows)
if value._ncols==1:
return value[0]
else:
# skip over groupbykeys
labels = value.label_get_names()
count =0
first = None
# loop over all columns, not including labels
for c in value.keys():
if c not in labels:
first = c
count += 1
if count == 1:
return value[first]
else:
# perhaps see if we can find the same name?
raise TypeError(f"Cannot determine which column of Dataset to add to the Dataset column {name!r}.")
if callable(getattr(value, 'repeat', None)):
# for when user types in a list of 1 item and wants it to repeat to match dataset row length
value = value.repeat(self._nrows)
else:
try:
# NOT an array, or a list, tuple, or Dataset at this point
value = full(self._nrows, value)
except Exception as ex:
raise TypeError(f'Cannot create a single Dataset column {name!r} out of type {type(value)!r}. Error {ex}')
value = self._ensure_vector(value)
# this code will add the name
value = self._possibly_convert_array(value, name)
self._check_add_dimensions(value)
return value
# ------------------------------------------------------------
def _init_from_pandas_df(self, df, unicode=False):
"""
Pulls data from pandas dataframes. Uses get attribute, so does not need to import pandas.
"""
df_dict = {}
for k in df.columns:
col = df[k]
# categoricals will be preserved in _possibly_convert
if hasattr(col, 'cat'):
pass
# series column (added with underlying array)
elif hasattr(col, 'values'):
col = col.values
else:
raise TypeError(f"Cannot initialize column of type {type(col)}")
#col = self._possibly_convert(k, col, unicode=unicode)
df_dict[k] = col
return df_dict
# ------------------------------------------------------------
def _init_from_dict(self, dictionary, unicode=False):
# all __init__ paths funnel into this
allnames = Struct.AllNames
self._validate_names(dictionary)
self._nrows = None
self._ncols = 0
if allnames:
for colname, arr in dictionary.items():
arr = self._possibly_convert(colname, arr, unicode=unicode)
self._add_allnames(colname, arr, 0)
else:
for colname, arr in dictionary.items():
if colname[0] != '_':
# many different types of data can be passed in here
arr = self._possibly_convert(colname, arr, unicode=unicode)
# add the array to this class
self._superadditem(colname, arr)
# pull the items so getattr doesn't need to be called
items = self._all_items.get_dict_values()
for i in items:
# dict values are in a list
col = i[0]
self._check_add_dimensions(col)
# as in pandas DataFrame, these are attributes that must be updated when modifying columns/rows
# self._superadditem('columns', list(self.keys()))
# ------------------------------------------------------------
def _check_add_dimensions(self, col):
"""
Used in _init_from_dict and _replaceitem.
If _nrows has not been set, it will be here.
"""
if col.ndim > 0:
if self._nrows is None:
self._nrows = col.shape[0]
else:
if self._nrows != col.shape[0]:
raise ValueError(f"Column length mismatch in Dataset constructor: Dataset had {self._nrows}, cannot add column with length {col.shape[0]} and ndims {col.ndim} col : {col}")
else:
raise ValueError(f"Datasets only support columns of 1 or more dimensions. Got {col.ndim} dimensions.")
# ------------------------------------------------------------
def __del__(self):
# print("**Tell the sort cache we are gone")
# print(f"dataset size deleted")
# import traceback
# traceback.print_stack()
try:
SortCache.invalidate(self._uniqueid)
except AttributeError:
pass
# --------------------------------------------------------
def _copy_attributes(self, ds, deep=False):
"""
After constructing a new dataset or pdataset, copy over attributes for sort, labels, footers, etc.
Called by Dataset._copy(), PDataset._copy()
"""
# copy over the sort list
if self._col_sortlist is not None:
new_sortlist = [_k for _k in self._col_sortlist if _k in ds]
if len(new_sortlist) > 0:
ds._col_sortlist = new_sortlist
# reassign labels
ds.label_set_names(self.label_get_names())
# copy footers
# TODO NW The _footers is now deprecated, I think, and should be removed throughout
if hasattr( self, '_footers' ):
footers = {}
for f, item in self._footers.items():
footers[f] = item.copy() if (deep and item) else item
ds._footers = footers
return ds
# --------------------------------------------------------
def _copy(self, deep=False, rows=None, cols=None, base_index=0, cls=None):
"""
Bracket indexing that returns a dataset will funnel into this routine.
deep : if True, perform a deep copy on column array
rows : row mask
cols : column mask
base_index : used for head/tail slicing
cls : class of return type, for subclass super() calls
First argument must be deep. Deep cannnot be set to None. It must be True or False.
"""
if cls is None:
cls = type(self)
newcols = self._as_itemcontainer(deep=deep, rows=rows, cols=cols, base_index=base_index)
# newcols is either an ItemContainer or a dictionary
ds = cls(newcols, base_index=base_index)
ds = self._copy_attributes(ds, deep=deep)
## # ! TO DO fixup sortkeys, this block would change type of self._col_sortlist from [] to {}.
## if self._col_sortlist is not None:
## # copy the dictionary
## # TODO: turn these keys into new_sort or active sort if there wasn't one
## keylist = {_k: _v for _k, _v in self._col_sortlist.items()}
## # also copy keylist here
## keylist = self._copy_from_dict(keylist, copy=deep, rows=rows, cols=cols)
## ds._col_sortlist = keylist
return ds
# --------------------------------------------------------
def _as_itemcontainer(self, deep=False, rows=None, cols=None, base_index=0):
"""
Returns an ItemContainer object for quick reconstruction or slicing/indexing of a dataset.
Will perform a deep copy if requested and necessary.
"""
def apply_rowmask(arr, mask):
# callback for applying mask/slice to columns
name = arr.get_name()
arr = arr[mask]
arr.set_name(name)
return arr
if rows is None:
# item container copy, with or without a column selection
newcols = self._all_items.copy(cols=cols)
else:
# get array data, slice, send back to item container for copy
# slice will take a view of array (same memory)
# boolean/fancy index will always make copy
# will also slice/restore FastArray subclasses
newcols = self._all_items.copy_apply(apply_rowmask, rows, cols=cols)
# only slices, full arrays need a deep copy
if deep and (isinstance(rows, slice) or rows is None):
for v in newcols.iter_values():
name = v[0].get_name()
v[0] = v[0].copy()
v[0].set_name(name)
# deep copy item_attributes
for i, vn in enumerate(v[1:]):
v[i+1] = vn.copy() if hasattr(vn, 'copy') else vn
return newcols
# --------------------------------------------------------
def _autocomplete(self) -> str:
return f'Dataset{self.shape}'
# --------------------------------------------------------
def copy(self, deep=True):
"""
Make a copy of the Dataset.
Parameters
----------
deep : bool
Indicates whether the underlying data should be copied too. Defaults to True.
Returns
-------
Dataset
Examples
--------
>>> ds = rt.Dataset({'a': np.arange(-3,3), 'b':3*['A', 'B'], 'c':3*[True, False]})
>>> ds
# a b c
- -- - -----
0 -3 A True
1 -2 B False
2 -1 A True
3 0 B False
4 1 A True
5 2 B False
>>> ds1 = ds.copy()
>>> ds.a = ds.a + 1
>>> ds1
# a b c
- -- - -----
0 -3 A True
1 -2 B False
2 -1 A True
3 0 B False
4 1 A True
5 2 B False
Even though we have changed ds, ds1 remains unchanged.
"""
return self._copy(deep)
# --------------------------------------------------------
def filter(self, rowfilter: np.ndarray, inplace:bool=False) -> 'Dataset':
"""
Use a row filter to make a copy of the Dataset.
Parameters
----------
rowfilter: array, fancy index or boolean mask
inplace : bool
When set to True will reduce memory overhead. Defaults to False.
Examples
--------
Filter a Dataset using the least memory possible
>>> ds = rt.Dataset({'a': rt.arange(10_000_000), 'b': rt.arange(10_000_000.0)})
>>> f = rt.logical(rt.arange(10_000_000) % 2)
>>> ds.filter(f, inplace=True)
# a b
------- ------- ---------
0 1 1.00
1 3 3.00
2 5 5.00
... ... ...
4999997 9999995 1.000e+07
4999998 9999997 1.000e+07
4999999 9999999 1.000e+07
<BLANKLINE>
[5000000 rows x 2 columns] total bytes: 57.2 MB
"""
if inplace:
# normalize rowfilter
if np.isscalar(rowfilter):
rowfilter=np.asanyarray([rowfilter])
elif not isinstance(rowfilter, np.ndarray):
rowfilter=np.asanyarray(rowfilter)
self._all_items.copy_inplace(rowfilter)
# check for boolean array
if rowfilter.dtype.char == '?':
newlen = np.sum(rowfilter)
else:
newlen = len(rowfilter)
self._nrows = newlen
return self
else:
return self._copy(False, rowfilter)
def get_nrows(self):
"""
Get the number of elements in each column of the Dataset.
Returns
-------
int
The number of elements in each column of the Dataset.
"""
return self._nrows
## -------------------------------------------------------
#def save_uncompressed(self, path, name):
# """
# *not implemented*
# """
# self.save(self, path, name, compress=False)
# -------------------------------------------------------
def save(self, path: Union[str, os.PathLike] = '', share: Optional[str] = None, compress:bool=True, overwrite:bool=True, name: Optional[str] = None, onefile:bool=False,
bandsize: Optional[int] = None, append: Optional[str] = None, complevel: Optional[int] = None):
"""
Save a dataset to a single .sds file or shared memory.
Parameters
----------
path : str or os.PathLike
full path to save location + file name (if no .sds extension is included, it will be added)
share : str, optional
Shared memory name. If set, dataset will be saved to shared memory and NOT to disk
when shared memory is specified, a filename must be included in path. only this will be used,
the rest of the path will be discarded.
compress : bool
Use compression when saving the file. Shared memory is always saved uncompressed.
overwrite : bool
Defaults to True. If False, prompt the user when overwriting an existing .sds file;
mainly useful for Struct.save(), which may call Dataset.save() multiple times.
name : str, optional
bandsize : int, optional
If set to an integer > 10000 it will compress column data every bandsize rows
append : str, optional
If set to a string it will append to the file with the section name.
complevel : int, optional
Compression level from 0 to 9. 2 (default) is average. 1 is faster, less compressed, 3 is slower, more compressed.
Examples
--------
>>> ds = rt.Dataset({'col_'+str(i):a rt.range(5) for i in range(3)})
>>> ds.save('my_data')
>>> os.path.exists('my_data.sds')
True
>>> ds.save('my_data', overwrite=False)
my_data.sds already exists and is a file. Overwrite? (y/n) n
No file was saved.
>>> ds.save('my_data', overwrite=True)
Overwriting file with my_data.sds
>>> ds.save('shareds1', share='sharename')
>>> os.path.exists('shareds1.sds')
False
See Also
--------
Dataset.load(), Struct.save(), Struct.load(), load_sds(), load_h5()
"""
if share is not None:
if path=='':
raise ValueError(f'Must provide single .sds file name for item with share name {share}. e.g. my_ds.save("dataset1.sds", share="{share}")')
save_sds(path, self, share=share, compress=compress, overwrite=overwrite, name=name, onefile=onefile, bandsize=bandsize, append=append, complevel=complevel)
# -------------------------------------------------------
@classmethod
def load(cls, path: Union[str, os.PathLike] = '', share=None, decompress:bool=True, info:bool=False, include: Optional[Sequence[str]] = None,
filter: Optional[np.ndarray] = None, sections: Optional[Sequence[str]] = None, threads: Optional[int] = None):
"""
Load dataset from .sds file or shared memory.
Parameters
----------
path : str
full path to load location + file name (if no .sds extension is included, it will be added)
share : str, optional
shared memory name. loader will check for dataset in shared memory first. if it's not there, the
data (if file found on disk) will be loaded into the user's workspace AND shared memory. a sharename
must be accompanied by a file name. (the rest of a full path will be trimmed off internally)
decompress : bool
**not implemented. the internal .sds loader will detect if the file is compressed
info : bool
Defaults to False. If True, load information about the contained arrays instead of loading them from file.
include : sequence of str, optional
Defaults to None. If provided, only load certain columns from the dataset.
filter : np.ndarray of int or np.ndarray of bool, optional
sections : sequence of str, optional
threads : int, optional
Defaults to None. Request certain number of threads during load.
Examples
--------
>>> ds = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(3)})
>>> ds.save('my_data')
>>> rt.Dataset.load('my_data')
# col_0 col_1 col_2
- ----- ----- -----
0 0.94 0.88 0.87
1 0.95 0.93 0.16
2 0.18 0.94 0.95
3 0.41 0.60 0.05
4 0.53 0.23 0.71
>>> ds = rt.Dataset.load('my_data', share='sharename')
>>> os.remove('my_data.sds')
>>> os.path.exists('my_data.sds')
False
>>> rt.Dataset.load('my_data', share='sharename')
# col_0 col_1 col_2
- ----- ----- -----
0 0.94 0.88 0.87
1 0.95 0.93 0.16
2 0.18 0.94 0.95
3 0.41 0.60 0.05
4 0.53 0.23 0.71
"""
return load_sds(path, share=share, info=info, include=include, filter=filter, sections=sections, threads=threads)
# -------------------------------------------------------
@property
def size(self) -> int:
"""
Number of elements in the Dataset (nrows x ncols).
Returns
-------
int
The number of elements in the Dataset (nrows x ncols).
"""
return self._ncols * self._nrows
### We can recreate this once we have a non-display transpose() method.
## @property
## def T(self):
## return self.transpose()
# -------------------------------------------------------
def _add_allnames(self, colname, arr, nrows) -> None:
'''
Internal routine used to add columns only when AllNames is True.
'''
if nrows == 0 or nrows == self.get_nrows():
if self._all_items.item_exists(colname):
self._replaceitem_allnames(colname, arr)
else:
self._addnewitem_allnames(colname, arr)
else:
raise NotImplementedError(f'Cannot set {colname!r} because rows are different lengths.')
# -------------------------------------------------------
def __setitem__(self, fld, value):
"""
Parameters
----------
fld : (rowspec, colspec) or colspec (=> rowspec of :)
value : scalar, sequence or dataset value
* scalar is always valid
* if (rowspec, colspec) is an NxK selection:
(1xK), K>1: allow |sequence| == K
(Nx1), N>1: allow |sequence| == N
(NxK), N, K>1: allow only w/ |dataset| = NxK
* sequence can be list, tuple, np.ndarray, FastArray
Raises
------
IndexError
"""
def setitem_mask(arr, mask, value):
arr[mask] = value
def setitem_fill(value, nrows):
return full
col_idx, row_idx, ncols, nrows, row_arg = self._extract_indexing(fld)
if col_idx is None:
col_idx = list(self.keys())
# BUG: set item with dataset for only one column
#print('col_idx',col_idx)
#print('row_idx',row_idx)
#print('ncols',ncols)
#print('row_arg',row_arg)
if ncols <= 1:
# this path is also for when the dataset is empty
if not isinstance(col_idx, str): col_idx = col_idx[0]
if col_idx in self:
if row_idx is None:
self.__setattr__(col_idx, value)
#self._superadditem(col_idx, value)
#setattr(self, col_idx, value)
else:
# apply row mask
arr=getattr(self, col_idx)
# setting a single col dataset from a dataset
if isinstance(value, Dataset):
arr[row_idx] = value[0]
else:
arr[row_idx] = value
elif Struct.AllNames:
self._add_allnames(col_idx, value, nrows)
elif self.is_valid_colname(col_idx):
if nrows == self.get_nrows() or nrows ==0:
self.__setattr__(col_idx, value)
else:
raise NotImplementedError(f'Cannot set {col_idx!r} because rows are different lengths.')
elif col_idx in ['True','False','None']:
col_idx = col_idx.lower()
if nrows == self.get_nrows() or nrows ==0:
self.__setattr__(col_idx, value)
else:
raise NotImplementedError(f'Cannot set {col_idx!r} because rows are different lengths.')
else:
raise IndexError(f'Invalid column name: {col_idx!r}')
elif nrows == 1:
if not all(self.col_exists(colname) for colname in col_idx):
raise IndexError('If creating a new column can only do one at a time.')
if np.isscalar(value):
self._all_items.apply(setitem_mask, row_idx, value, cols=col_idx)
elif isinstance(value, Dataset) and value.shape == (1, len(col_idx)):
# this case comes up crucially in ds[3, :] /= 2, for example
for colname, _cn in zip(col_idx, value):
getattr(self, colname)[row_idx] = value[_cn][0]
elif len(value) == len(col_idx):
for colname, array in zip(col_idx, value):
getattr(self, colname)[row_idx] =array
else:
raise ValueError('Must have equal len keys and value when setting with a sequence.')
else:
if np.isscalar(value):
#if not all(self.col_exists(_k) for _k in col_idx):
# raise IndexError('If creating a new column can only do one at a time.')
if row_idx is not None:
self._all_items.apply(setitem_mask, row_idx, value, cols=col_idx)
else:
# fill column with scalar
for colname in col_idx:
setattr(self, colname, value)
elif isinstance(value, Dataset):
# TJD 10.2018 - the row mask appears to have already been applied to value
# NOTE: if the row mask is a boolean, we could sum it to get the count
# NOTE: if the row mask is fancy indexing, we could get length
if row_idx is not None and col_idx is not None:
# both row and col mask
for i,c in enumerate(col_idx):
# inplace operation
#self[i][row_idx] = value[i]
getattr(self, c)[row_idx]=value[i]
elif row_idx is not None:
#no col mask
for i in range(ncols):
# inplace operation
self[i][row_idx] = value[i]
elif col_idx is not None:
#no row mask
# example: ds[['g','c']]=Dataset({'a':arange(10),'b':arange(10.0)}):
for i,c in enumerate(col_idx):
setattr(self, c, value[i])
else:
#no row and no col mask
for i in range(ncols):
self[i] = value[i]
else:
raise ValueError(f'Must have same-shape Dataset when setting {nrows}x{ncols} sub-Dataset. Type: {type(value)}')
return
# -------------------------------------------------------
def __getitem__(self, index):
"""
Parameters
----------
index : (rowspec, colspec) or colspec
Returns
-------
the indexed row(s), cols(s), sub-dataset or single value
Raises
------
IndexError
When an invalid column name is supplied.
TypeError
"""
def single_array(col_idx, row_idx):
# will either return or return an error
try:
np_arr = self.col_get_value(col_idx)
except:
raise IndexError(f"Could not find column named: {col_idx}")
if row_idx is not None:
# array indexing takes place early here
return np_arr[row_idx]
else:
return np_arr
# optimization for default case
if isinstance(index, str):
return self.col_get_value(index)
col_idx, row_idx, ncols, nrows, row_arg = self._extract_indexing(index)
# check for a single string which selects a single column
if isinstance(col_idx, str):
return single_array(col_idx, row_idx)
# if a single integer specified, make a list of one number for fancy column indexing
if isinstance(row_arg, (int, np.integer)):
row_idx = [row_arg]
return self._copy(deep=False, rows=row_idx, cols=col_idx)
# ------------------------------------------------------------
def _dataset_compare_check(self, func_name, lhs):
# comparison function will be called by an array the size of the indexes, either
# interperetted as integers, or as categorical strings
# if compared to string, make sure the string matches the string type in categories
if isinstance(lhs, Dataset):
nrows = self.get_nrows()
if lhs.get_nrows() != nrows:
# Allow is length is 1 so that broadcasting applies?
# N.B. Right now this causes a DeprecationWarning in numpy, not sure what type it will be.
raise ValueError("The two Datasets have different lengths and cannot be compared")
else:
# returns a new dataset
newds = {}
# for all columns that match
for colname in self.keys():
# if the lhs dataset has the same column name, compare
if hasattr(lhs, colname):
# get the function reference for the comparison operator
func = getattr(self[colname], func_name)
# add the boolean array to the new dataset
newds[colname] = func(lhs[colname])
else:
newds[colname] = np.array([False] * nrows)
for colname in lhs:
if colname not in newds:
newds[colname] = np.array([False] * nrows)
return type(self)(newds)
else:
raise TypeError(f'Cannot compare a Dataset to type {type(lhs).__name__}.')
# ------------------------------------------------------------
def __ne__(self, lhs):
return self._dataset_compare_check('__ne__', lhs)
def __eq__(self, lhs):
return self._dataset_compare_check('__eq__', lhs)
def __ge__(self, lhs):
return self._dataset_compare_check('__ge__', lhs)
def __gt__(self, lhs):
return self._dataset_compare_check('__gt__', lhs)
def __le__(self, lhs):
return self._dataset_compare_check('__le__', lhs)
def __lt__(self, lhs):
return self._dataset_compare_check('__lt__', lhs)
# ------------------------------------------------------------
def __len__(self):
# Debated October 2019
# For Dataset we will return the number of rows for length
rows= self._nrows
if rows is None:
rows = 0
return rows
# ------------------------------------------------------------
def putmask(self, mask, values):
"""
Call riptable ``putmask`` routine which is faster than ``__setitem__`` with bracket indexing.
Parameters
----------
mask : ndarray of bools
boolean numpy array with a length equal to the number of rows in the dataset.
values : rt.Dataset or ndarray
* Dataset: Corresponding column values will be copied, must have same shape as calling dataset.
* ndarray: Values will be copied to each column, must have length equal to calling dataset's nrows.
Returns
-------
None
Examples
--------
>>> ds = rt.Dataset({'a': np.arange(-3,3), 'b':np.arange(6), 'c':np.arange(10,70,10)})
>>> ds
# a b c
- -- - --
0 -3 0 10
1 -2 1 20
2 -1 2 30
3 0 3 40
4 1 4 50
5 2 5 60
>>> ds1 = ds.copy()
>>> ds.putmask(ds.a < 0, np.arange(100,106))
>>> ds
# a b c
- --- --- ---
0 100 100 100
1 101 101 101
2 102 102 102
3 0 3 40
4 1 4 50
5 2 5 60
>>> ds.putmask(np.array([True, True, False, False, False, False]), ds1)
>>> ds
# a b c
- --- --- ---
0 -3 0 10
1 -2 1 20
2 102 102 102
3 0 3 40
4 1 4 50
5 2 5 60
"""
if not(isinstance(mask, np.ndarray) and mask.dtype.char == '?' and len(mask) == self._nrows):
raise ValueError(f"Mask must be a boolean numpy array of the same length as the number of rows in the dataset.")
if isinstance(values, Dataset):
if self.shape == values.shape:
col_src = list(values.values())
col_dst = list(self.values())
for i in range(self._ncols):
putmask( col_dst[i], mask, col_src[i] )
else:
raise ValueError(f"Dataset put values must have same shape as other dataset. Got {self.shape} vs. {values.shape}")
elif isinstance(values, np.ndarray):
if len(values) == self._nrows:
col_dst = list(self.values())
for i in range(self._ncols):
putmask( col_dst[i], mask, values )
else:
raise ValueError(f"Array put values must have a length equal to dataset's rows. Got {len(values)} vs. {self._nrows}")
else:
raise TypeError(f"Cannot call dataset putmask with type {type(values)}.")
## ------------------------------------------------------------
#def iterrows(self):
# """
# NOTE: This routine is slow
# It returns a struct with scalar values for each row.
# It does not preserve dtypes.
# Do not modify anything you are iterating over.
# Example:
# --------
# >>> ds=Dataset({'test':arange(10)*3, 'test2':arange(10.0)/2})
# >>> temp=[*ds.iterrows()]
# >>> temp[2]
# (2,
# # Name Type Size 0 1 2
# - ----- ------- ---- --- - -
# 0 test int32 0 27
# 1 test2 float64 0 4.5
# [2 columns])
# """
# mykeys = self.keys()
# temp_struct = TypeRegister.Struct({colname:0 for colname in mykeys})
# # for all the rows in the dataset
# for rownum in range(self._nrows):
# # for all the columns
# for colname in mykeys:
# temp_struct[colname]=self[colname][rownum]
# yield rownum, temp_struct
# ------------------------------------------------------------
def iterrows(self):
"""
NOTE: This routine is slow
It returns a struct with scalar values for each row.
It does not preserve dtypes.
Do not modify anything you are iterating over.
Examples
--------
>>> ds = rt.Dataset({'test': rt.arange(10)*3, 'test2': rt.arange(10.0)/2})
>>> temp=[*ds.iterrows()]
>>> temp[2]
(2,
# Name Type Size 0 1 2
- ----- ------- ---- --- - -
0 test int32 0 27
1 test2 float64 0 4.5
<BLANKLINE>
[2 columns])
"""
full_columns = tuple(self.values())
temp_struct = TypeRegister.Struct({})
# make shallow copies of all lists containing column data, so original columns don't swapped out
temp_items = self._all_items._items.copy()
temp_struct._all_items._items = temp_items
for k, v in temp_items.items():
temp_items[k] = v.copy()
# manually set item dict, number of columns
temp_struct._all_items._items = temp_items
temp_struct._ncols = self._ncols
# these values will be swapped internally
temp_vals = temp_struct._all_items.get_dict_values()
# check if any there are any array/fastarray subclasses in the columns
np_safe = True
for v in full_columns:
if TypeRegister.is_array_subclass(v):
np_safe = False
break
# if there are no subclasses in the dataset, we take the fast path and call np getitem directly
if np_safe:
# faster to store function pointer
npget = np.ndarray.__getitem__
# for each row, swap out the item values in the temporary struct's item container
for rownum in range(self._nrows):
for ci in range(self._ncols):
temp_vals[ci][0] = npget(full_columns[ci],rownum)
yield rownum, temp_struct
else:
# for each row, swap out the item values in the temporary struct's item container
for rownum in range(self._nrows):
for ci in range(self._ncols):
temp_vals[ci][0] = full_columns[ci][rownum]
yield rownum, temp_struct
# ------------------------------------------------------------
def isin(self, values):
"""
Call :meth:`~rt.rt_fastarray.FastArray.isin` for each column in the `Dataset`.
Parameters
----------
values : scalar or list or array_like
A list or single value to be searched for.
Returns
-------
Dataset
Dataset of boolean arrays with the same column headers as the original dataset.
True indicates that the column element occurred in the provided values.
Notes
-----
Note: different behavior than pandas DataFrames:
* Pandas handles object arrays, and will make the comparison for each element type in the provided list.
* Riptable favors bytestrings, and will make conversions from unicode/bytes to match for operations as necessary.
* We will also accept single scalars for values.
Examples
--------
>>> data = {'nums': rt.arange(5), 'strs': rt.FA(['a','b','c','d','e'], unicode=True)}
>>> ds = rt.Dataset(data)
>>> ds.isin([2, 'b'])
# nums strs
- ----- -----
0 False False
1 False True
2 False False
3 False False
4 False False
>>> df = pd.DataFrame(data)
>>> df.isin([2, 'b'])
nums strs
0 False False
1 False True
2 True False
3 False False
4 False False
See Also
--------
pandas.DataFrame.isin()
"""
# this is repeat code from FastArray isin, but this way, the values only need to be converted once for each column
#x = values
#if isinstance(values, (bool, np.bool_, bytes, str, int, np.integer, float, np.floating)):
# x = np.array([x])
## numpy will find the common dtype (strings will always win)
#elif isinstance(x, list):
# x = np.array(x)
data = {}
for name, col in self.items():
data[name] = col.isin(values)
return type(self)(data)
# -------------------------------------------------------
@property
def imatrix(self) -> Optional[np.ndarray]:
"""
Returns the 2d array created from `imatrix_make`.
Returns
-------
imatrix : np.ndarray, optional
If `imatrix_make` was previously called, returns the 2D array created and cached internally
by that method. Otherwise, returns ``None``.
Examples
--------
>>> ds = rt.Dataset({'a': np.arange(-3,3), 'b':np.arange(6), 'c':np.arange(10,70,10)})
>>> ds
# a b c
- -- - --
0 -3 0 10
1 -2 1 20
2 -1 2 30
3 0 3 40
4 1 4 50
5 2 5 60
>>> ds.imatrix # returns nothing since we have not called imatrix_make
>>> ds.imatrix_make()
FastArray([[-3, 0, 10],
[-2, 1, 20],
[-1, 2, 30],
[ 0, 3, 40],
[ 1, 4, 50],
[ 2, 5, 60]])
>>> ds.imatrix
FastArray([[-3, 0, 10],
[-2, 1, 20],
[-1, 2, 30],
[ 0, 3, 40],
[ 1, 4, 50],
[ 2, 5, 60]])
>>> ds.a = np.arange(6)
>>> ds
# a b c
- - - --
0 0 0 10
1 1 1 20
2 2 2 30
3 3 3 40
4 4 4 50
5 5 5 60
>>> ds.imatrix # even after changing the dataset, the matrix remains the same.
FastArray([[-3, 0, 10],
[-2, 1, 20],
[-1, 2, 30],
[ 0, 3, 40],
[ 1, 4, 50],
[ 2, 5, 60]])
"""
try:
return self._imatrix.imatrix
except:
return None
@property
def imatrix_ds(self):
"""
Returns the dataset of the 2d array created from `imatrix_make`.
Examples
--------
>>> ds = rt.Dataset({'a': np.arange(-3,3), 'b':np.arange(6), 'c':np.arange(10,70,10)})
>>> ds
# a b c
- -- - --
0 -3 0 10
1 -2 1 20
2 -1 2 30
3 0 3 40
4 1 4 50
5 2 5 60
<BLANKLINE>
[6 rows x 3 columns] total bytes: 144.0 B
>>> ds.imatrix_make(colnames = ['a', 'c'])
FastArray([[-3, 10],
[-2, 20],
[-1, 30],
[ 0, 40],
[ 1, 50],
[ 2, 60]])
>>> ds.imatrix_ds
# a c
- -- --
0 -3 10
1 -2 20
2 -1 30
3 0 40
4 1 50
5 2 60
"""
try:
return self._imatrix.dataset
except:
return None
@property
def imatrix_cls(self):
"""
Returns the `IMatrix` class created by `imatrix_make`.
"""
try:
return self._imatrix
except:
return None
# -------------------------------------------------------
def imatrix_make(
self,
dtype: Optional[Union[str, np.dtype]] = None,
order: str = 'F',
colnames: Optional[List[str]] = None,
cats: bool = False,
gb: bool = False,
inplace: bool = True,
retnames: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, List[str]]]:
"""
Parameters
----------
dtype : str or np.dtype, optional, default None
Defaults to None, can force a final dtype such as ``np.float32``.
order : {'F', 'C'}
Defaults to 'F', can be 'C' also;
when 'C' is used, `inplace` cannot be True since the shape will not match.
colnames : list of str, optional
Column names to turn into a 2d matrix.
If None is passed, it will use all computable columns in the Dataset.
cats : bool, default False
If set to True will include categoricals.
gb : bool, default False
If set to True will include the groupby keys.
inplace : bool, default True
If set to True (default) will rearrange and stack the columns in the dataset to be part of the matrix.
If set to False, the columns in the existing dataset will not be affected.
retnames : bool, default False
Defaults to False. If set to True will return the column names it used.
Returns
-------
imatrix : np.ndarray
A 2D array (matrix) containing the data from this `Dataset` with the specified `order`.
colnames : list of str, optional
If `retnames` is True, a list of the column names included in the returned matrix;
otherwise, this list is not returned.
Examples
--------
>>> arrsize=3
>>> ds=rt.Dataset({'time': rt.arange(arrsize * 1.0), 'data': rt.arange(arrsize)})
>>> ds.imatrix_make(dtype=rt.int32)
FastArray([[0, 0],
[1, 1],
[2, 2]])
"""
if order != 'F' and order != 'C':
raise ValueError(f"Invalid order '{order}' specified. The order must be either 'F' or 'C'.")
if order != 'F' and inplace:
raise ValueError("Only the 'F' order may be specified when `inplace` is True.")
if inplace:
ds=self
else:
ds=self.copy(deep=False)
if colnames is None:
#just use the computables?
colnames=[]
labels = self.label_get_names()
for colname, array in ds.items():
append = False
if array.iscomputable():
append=True
else:
# todo specific check for date/datetime also
if isinstance(array, TypeRegister.Categorical):
if cats is True:
append=True
else:
# possibly handle
pass
if append:
if gb is True or colname not in labels:
colnames.append(colname)
if not isinstance(colnames,list):
raise TypeError(f"Pass in a list of column names such as imatrix_make(['Exch1','Exch2', 'Exch3'])")
if len(colnames) < 1:
raise ValueError(f"The colnames list must contain at least one item")
ds._imatrix= IMatrix(ds, dtype=dtype, order=order, colnames=colnames)
#reassign the columns
ids = ds.imatrix_ds
for c in colnames:
ds[c]=ids[c]
if retnames:
return ds._imatrix.imatrix, colnames
else:
return ds._imatrix.imatrix
# -------------------------------------------------------
# 2d arithmetic functions.
def imatrix_y(self, func: Union[callable, str, List[Union[callable, str]]], name: Optional[Union[str, List[str]]] = None) -> 'Dataset':
"""
Parameters
----------
func : callable or str or list of callable
Function or method name of function.
name : str or list of str, optional
Returns
-------
Dataset
Y axis calculations for the functions
Example
-------
>>> ds = rt.Dataset({'a1': rt.arange(3)%2, 'b1': rt.arange(3)})
>>> ds.imatrix_y([np.sum, np.mean])
# a1 b1 Sum Mean
- -- -- --- ----
0 0 0 0 0.00
1 1 1 2 1.00
2 0 2 2 1.00
"""
try:
if self.imatrix is None:
self.imatrix_make()
except:
raise ValueError(f'No imatrix or failed to create one. Use imatrix_make to create one.')
if not isinstance(func, list):
func = [func]
if name is not None:
if not isinstance(name, list):
name = [name]
for f, n in zip(func, name):
self._imatrix_y_internal(f, name=n)
else:
for f in func:
self._imatrix_y_internal(f)
return self
# -------------------------------------------------------
# 2d arithmetic functions.
def _imatrix_y_internal(self, func, name: Optional[str] = None, showfilter: bool = True) -> Optional[Tuple[Any, str, callable]]:
"""
Parameters
----------
func: function or method name of function
Returns
-------
Y axis calculations
name of the column used
func used
"""
imatrix = self.imatrix
if not callable(func):
func = getattr(imatrix, func)
if callable(func):
if name is None:
name = func.__name__
name = str.capitalize(name)
row_count, col_count = imatrix.shape
# horizontal func
#print("im0", imatrix.nansum())
resultY = func(imatrix, axis=1)
# possibly remove filtered top row
if not showfilter:
resultY = resultY[1:]
# add the Total column to the dataset
# BUG? check for existing colname?
self[name]=resultY
oldsummary = self.summary_get_names()
if name not in oldsummary:
oldsummary.append(name)
self.summary_set_names(oldsummary)
return resultY, name, func
return None
# -------------------------------------------------------
# 2d arithmetic functions.
def imatrix_xy(self, func: Union[callable, str], name: Optional[str] = None, showfilter: bool = True) -> Tuple[Optional['Dataset'], Optional['Dataset'], Optional[str]]:
"""
Parameters
----------
func : str or callable
function or method name of function
name
showfilter : bool
Returns
-------
X and Y axis calculations
"""
resultY, name, func = self._imatrix_y_internal(func, name=name, showfilter=showfilter)
if resultY is not None:
imatrix = self.imatrix
row_count, col_count = imatrix.shape
# reserve an extra for the total of result
resultX = empty(col_count+1, dtype=resultY.dtype)
# based on the size...consider #imatrix.nansum(axis=0, out=resultX)
for i in range(col_count):
arrslice = imatrix[:,i]
# possibly skip over first value
if not showfilter:
arrslice =arrslice[1:]
resultX[i] = func(arrslice)
# calc total of result - cell on far right and bottom
resultX[-1] = func(resultY)
return resultX, resultY, name
return None, None, None
# -------------------------------------------------------
def imatrix_totals(self, colnames=None, name=None):
if self.imatrix is None:
self.imatrix_make(colnames=colnames)
totalsX, totalsY, name = self.imatrix_xy(np.sum, name=name)
if totalsY is not None:
# tell display that this dataset has a footer
footerdict = dict(zip(self.imatrix_ds,totalsX))
footerdict[name] = totalsX[-1]
self.footer_set_values(name, footerdict)
return self
# -------------------------------------------------------
def fillna(self, value=None, method: Optional[str] = None, inplace: bool = False, limit: Optional[int] = None) -> Optional['Dataset']:
"""
Returns a copy with all invalid values set to the given value.
Optionally modify the original, this might fail if locked.
Parameters
----------
value
A replacement value (CANNOT be a dict yet)
method : {'backfill', 'bfill', 'pad', 'ffill', None}
* backfill/bfill: calls :meth:`~rt.rt_fastarray.FastArray.fill_backward`
* pad/ffill: calls :meth:`~rt.rt_fastarray.FastArray.fill_forward`
* None: calls :meth:`~rt.rt_fastarray.FastArray.replacena`
inplace : bool, default False
If True, modify original column arrays.
limit : int, optional, default None
Only valid when `method` is not None.
The maximium number of consecutive invalid values.
A gap with more than this will be partially filled.
Returns
-------
Dataset, optional
Examples
--------
>>> ds = rt.Dataset({'A': rt.arange(3), 'B': rt.arange(3.0)})
>>> ds.A[2]=ds.A.inv
>>> ds.B[1]=np.nan
>>> ds.fillna(rt.FastArray.fillna, 0)
# A B
- - ----
0 0 0.00
1 1 0.00
2 0 2.00
>>> ds = rt.Dataset({'A':[np.nan, 2, np.nan, 0], 'B': [3, 4, np.nan, 1],
... 'C':[np.nan, np.nan, np.nan, 5], 'D':[np.nan, 3, np.nan, 4]})
>>> ds.fillna(method='ffill')
# A B C D
- ---- ---- ---- ----
0 nan 3.00 nan nan
1 2.00 4.00 nan 3.00
2 2.00 4.00 nan 3.00
3 0.00 1.00 5.00 4.00
"""
if method is not None:
if method in ['backfill','bfill']:
return self.apply_cols(FastArray.fill_backward, value, inplace=inplace, limit=limit)
if method in ['pad','ffill']:
return self.apply_cols(FastArray.fill_forward, value, inplace=inplace, limit=limit)
raise KeyError(f"fillna: The method {method!r} must be 'backfill', 'bfill', 'pad', 'ffill'")
if value is None:
raise ValueError(f"fillna: Must specify either a 'value' that is not None or a 'method' that is not None.")
if limit is not None:
raise KeyError(f"fillna: There is no limit when method is None")
return self.apply_cols(FastArray.replacena, value, inplace=inplace)
# -------------------------------------------------------
# Arithmetic functions.
def apply_cols(
self, func_or_method_name, *args, fill_value=None, unary: bool = False,
labels: bool = False, **kwargs
) -> Optional['Dataset']:
"""
Apply function (or named method) on each column.
If results are all None (*=, +=, for example), None is returned;
otherwise a Dataset of the return values will be returned (+, *, abs);
in this case they are expected to be scalars or vectors of same length.
Constraints on first elem. of args (if unary is False, as for func being an arith op.).
lhs can be::
1. a numeric scalar
2. a list of numeric scalars, length nrows (operating on each column)
3. an array of numeric scalars, length nrows (operating on each column)
4. a column vector of numeric scalars, shape (nrows, 1) (reshaped and operating on each column)
5. a Dataset of numeric scalars, shape (nrows, k) (operating on each matching column by name)
6. a Struct of (possibly mixed) (1), (2), (3), (4) (operating on each matching column by name)
Parameters
----------
func_or_method_name: callable or name of method to be called on each column
args: arguments passed to the func call.
fill_value
The fill value to use for columns with non-computable types.
* None: return original column in result
* alt_func (callable): force computation with alt_func
* scalar: apply as uniform fill value
* dict / defaultdict: Mapping of colname->fill_value.
Specify per-column `fill_value` behavior.
Column names can be mapped to one of the other value
Columns whose names are missing from the mapping (or are mapped to ``None``)
will be dropped.
Key-value pairs where the value is ``None``, or an absent column name
None, or an absent column name if not a ``defaultdict`` still means
None (or absent if not a defaultdict) still means drop column
and an alt_func still means force compute via alt_func.
unary: If False (default) then enforce shape constraints on first positional arg.
labels: If False (default) then do not apply the function to any label columns.
kwargs: all other kwargs are passed to func.
Returns
-------
Dataset, optional
Examples
--------
>>> ds = rt.Dataset({'A': rt.arange(3), 'B': rt.arange(3.0)})
>>> ds.A[2]=ds.A.inv
>>> ds.B[1]=np.nan
>>> ds
# A B
- --- ----
0 0 0.00
1 1 nan
2 Inv 2.00
>>> ds.apply_cols(rt.FastArray.fillna, 0)
>>> ds
# A B
- - ----
0 0 0.00
1 1 0.00
2 0 2.00
"""
_is_numeric = lambda _x: isinstance(_x, (int, float, np.integer, np.floating))
_is_ok_list = lambda _x: isinstance(_x, list) and len(_x) == nrows and all(_is_numeric(_e) for _e in _x)
_is_ok_array = lambda _x: isinstance(_x, np.ndarray) and _x.shape == (nrows,)
_is_ok_col_vector = lambda _x: isinstance(_x, np.ndarray) and _x.shape == (nrows, 1)
_is_for_column = lambda _x: _is_numeric(_x) or _is_ok_list(_x) or _is_ok_array(_x) or _is_ok_col_vector(_x)
if len(args) == 0 and not unary:
unary = True
if not unary:
lhs = args[0]
nrows = self.get_nrows()
if _is_numeric(lhs):
pass
elif lhs is None:
pass
elif _is_ok_list(lhs):
pass
elif _is_ok_array(lhs):
pass
elif _is_ok_col_vector(lhs):
args = (lhs.ravel(),) + args[1:] if len(args) > 1 else (lhs.ravel(),)
elif isinstance(lhs, Dataset) and all(_is_ok_col_vector(_v) for _k, _v in lhs.items() if _k in self):
return self._operate_iter_input_cols(args, fill_value, func_or_method_name, kwargs, lhs)
elif isinstance(lhs, Struct) and all(_is_for_column(_v) for _k, _v in lhs.items() if _k in self):
return self._operate_iter_input_cols(args, fill_value, func_or_method_name, kwargs, lhs)
else:
raise ValueError(
f'{self.__class__.__name__}.apply_cols(): lhs must be scalar or flat list/array or column vector of length nrows (for column-wise); a Struct/Dataset of same for (row/element-wise).')
# Otherwise unary, so just an operation on one array
def _operate_on_array(array, func_or_method_name, *args, **kwargs):
if array.iscomputable():
if callable(func_or_method_name):
ret_array = func_or_method_name(array, *args, **kwargs)
else:
# print('v',type(array))
# print('func',func_or_method_name)
# print('kwargs',kwargs)
func = getattr(array, func_or_method_name)
ret_array = func(*args, **kwargs)
elif callable(fval):
ret_array = fval(array, *args, **kwargs)
elif fval is not None:
ret_array = fval
else:
ret_array = array
return ret_array
od = {}
for colname, array in self.items():
# not all arrays are computable, such as *= for a string array
if colname in self.label_get_names() and not labels:
od[colname] = array
else:
if isinstance(fill_value, dict):
# try/catch instead of get() to support defaultdict usage
try:
fval = fill_value[colname]
except KeyError:
fval = None
else:
fval = fill_value
od[colname] = _operate_on_array(array, func_or_method_name, *args, **kwargs)
if all(_x is None for _x in od.values()):
return None
try:
ret_obj = type(self)(od)
except Exception:
raise ValueError(f"the return {od} could not be made into a dataset.")
# Handle summary columns
summary_colnames = []
if self.summary_get_names():
for i, name in enumerate(self.summary_get_names()):
summary_colnames += ['Summary' + str(i)]
ret_obj.col_rename(name, summary_colnames[i])
# Handle footers
footers = {}
if self.footer_get_values():
try:
num_labels = len(self.label_get_names()) if self.label_get_names() else 0
arrays = []
for self_footervals in self.footer_get_values().values():
array = FastArray(self_footervals[num_labels:])
arrays += [_operate_on_array(array, func_or_method_name, *args, **kwargs)]
footers = self._construct_new_footers(arrays, num_labels, summary_colnames)
except:
footers = None
ret_obj = self._add_labels_footers_summaries(ret_obj, summary_colnames, footers)
return ret_obj
def _construct_new_footers(self, arrays, num_labels, summary_colnames):
footers = {}
try:
for arr in arrays:
col_vals = {}
summary_colnum = 0
for i_raw, col_name in enumerate(list(self.keys())):
i = i_raw - num_labels
if i < 0:
continue
if col_name in self.summary_get_names():
col_vals[summary_colnames[summary_colnum]] = arr[i]
summary_colnum += 1
else:
col_vals[col_name] = arr[i]
footers['Footer' + str(len(footers))] = col_vals
return footers
except:
return None
def _add_labels_footers_summaries(self, ret_obj, summary_colnames, footers):
if self.label_get_names():
ret_obj.label_set_names(self.label_get_names())
if summary_colnames:
ret_obj.summary_set_names(summary_colnames)
if footers:
for label, footerdict in footers.items():
ret_obj.footer_set_values(label, footerdict)
return ret_obj
def _operate_iter_input_cols(self, args, fill_value, func_or_method_name, kwargs, lhs):
"""
Operate iteratively across all columns in the dataset and matching ones
in lhs.
In order to operate on summary columns and footer rows, such as those
generated by accum2, require that self and lhs conform in the sense
of having the same number of labels, footers, and summary columns,
with all label columns to the left and all summary columns to the
right. The operation is then performed on positionally corresponding
elements in the summary columns and footer rows, skipping the label column(s).
"""
od = {}
conform = self._labels_footers_summaries_conform(lhs)
summary_colnames = []
for colname in self.keys():
lhs_colname = colname
od_colname = colname
if conform and self.summary_get_names() and colname in self.summary_get_names():
od_colname = 'Summary' + str(len(summary_colnames))
lhs_colname = lhs.summary_get_names()[len(summary_colnames)]
summary_colnames += [od_colname]
if lhs_colname in lhs and colname not in self.label_get_names():
self1 = Dataset({'a': self[colname]})
_v = getattr(lhs, lhs_colname)
args1 = (_v,) + args[1:] if len(args) > 1 else (_v,)
self1 = self1.apply_cols(func_or_method_name, *args1, fill_value=fill_value, **kwargs)
od[od_colname] = getattr(self1, 'a')
else:
od[od_colname] = getattr(self, colname)
if all(_x is None for _x in od.values()):
return None
# Handle footers
footers = {}
if conform and self.footer_get_values():
num_labels = len(self.label_get_names()) if self.label_get_names() else 0
arrays = []
for self_footervals, lhs_footervals in zip(
self.footer_get_values(fill_value=np.nan).values(),
lhs.footer_get_values(fill_value=np.nan).values()):
self1 = Dataset({'v1': self_footervals[num_labels:]})
_v = FastArray(lhs_footervals[num_labels:])
args1 = (_v,) + args[1:] if len(args) > 1 else (_v,)
self1 = self1.apply_cols(func_or_method_name, *args1, fill_value=fill_value, **kwargs)
arrays += [self1['v1']]
footers = self._construct_new_footers(arrays, num_labels, summary_colnames)
ret_obj = self._add_labels_footers_summaries(type(self)(od), summary_colnames, footers)
return ret_obj
def _labels_footers_summaries_conform(self, other):
def _footers_conform():
self_footers = self.footer_get_values()
other_footers = other.footer_get_values()
if bool(self_footers) != bool(other_footers):
return False
if self_footers:
if len(self_footers) != len(other_footers):
return False
for v1, v2 in zip(self_footers.values(), other_footers.values()):
if len(v1) != len(v2):
return False
return True
def _columns_conform(func, left_or_right='left'):
def _get_indexes(ds, names):
return [ds.keys().index(names[i]) for i in range(len(names))]
self_names = func(self)
other_names = func(other)
if bool(self_names) != bool(other_names):
return False
if self_names:
self_indexes = _get_indexes(self, self_names)
other_indexes = _get_indexes(other, other_names)
if self_indexes != other_indexes:
return False
if left_or_right == 'left':
if self_indexes != list(range(len(self_names))):
return False
if left_or_right == 'right':
if self_indexes != list(range(len(self.keys())))[-len(self_names):]:
return False
return True
if isinstance(other, Dataset) and _footers_conform() and\
_columns_conform(Dataset.label_get_names, 'left') and\
_columns_conform(Dataset.summary_get_names, 'right'):
return True
else:
return False
def __iadd__(self, lhs):
return self.apply_cols('__iadd__', lhs)
def __isub__(self, lhs):
return self.apply_cols('__isub__', lhs)
def __imul__(self, lhs):
return self.apply_cols('__imul__', lhs)
# def __imatmul__(self, lhs): return self.apply_cols('__imatmul__', lhs)
def __itruediv__(self, lhs):
return self.apply_cols('__itruediv__', lhs)
def __ifloordiv__(self, lhs):
return self.apply_cols('__ifloordiv__', lhs)
def __imod__(self, lhs):
return self.apply_cols('__imod__', lhs)
def __ipow__(self, lhs, modulo=None):
if modulo is not None:
return self.apply_cols('__ipow__', lhs, modulo)
else:
return self.apply_cols('__ipow__', lhs)
def __ilshift__(self, lhs):
return self.apply_cols('__ilshift__', lhs)
def __irshift__(self, lhs):
return self.apply_cols('__irshift__', lhs)
def __iand__(self, lhs):
return self.apply_cols('__iand__', lhs)
def __ixor__(self, lhs):
return self.apply_cols('__ixor__', lhs)
def __ior__(self, lhs):
return self.apply_cols('__ior__', lhs)
# Not all 'reflected' ops are defined (for example 5<<ds), are not reasonable to support;
# divmod(a, b) returns two values, maybe support one day returning pair of datasets?
def __radd__(self, lhs):
return self.apply_cols('__radd__', lhs)
def __rsub__(self, lhs):
return self.apply_cols('__rsub__', lhs)
def __rmul__(self, lhs):
return self.apply_cols('__rmul__', lhs)
def __rtruediv__(self, lhs):
return self.apply_cols('__rtruediv__', lhs)
def __rfloordiv__(self, lhs):
return self.apply_cols('__rfloordiv__', lhs)
def __rmod__(self, lhs):
return self.apply_cols('__rmod__', lhs)
def __rpow__(self, lhs):
return self.apply_cols('__rpow__', lhs)
def __rand__(self, lhs):
return self.apply_cols('__rand__', lhs)
def __rxor__(self, lhs):
return self.apply_cols('__rxor__', lhs)
def __ror__(self, lhs):
return self.apply_cols('__ror__', lhs)
def __add__(self, lhs):
return self.apply_cols('__add__', lhs)
def __sub__(self, lhs):
return self.apply_cols('__sub__', lhs)
def __mul__(self, lhs):
return self.apply_cols('__mul__', lhs)
# def __matmul__(self, lhs): return self.apply_cols('__matmul__', lhs)
def __truediv__(self, lhs):
return self.apply_cols('__truediv__', lhs)
def __floordiv__(self, lhs):
return self.apply_cols('__floordiv__', lhs)
def __mod__(self, lhs):
return self.apply_cols('__mod__', lhs)
def __pow__(self, lhs, modulo=None):
if modulo is not None:
return self.apply_cols('__pow__', lhs, modulo)
else:
return self.apply_cols('__pow__', lhs)
def __lshift__(self, lhs):
return self.apply_cols('__lshift__', lhs)
def __rshift__(self, lhs):
return self.apply_cols('__rshift__', lhs)
def __and__(self, lhs):
return self.apply_cols('__and__', lhs)
def __xor__(self, lhs):
return self.apply_cols('__xor__', lhs)
def __or__(self, lhs):
return self.apply_cols('__or__', lhs)
def __neg__(self):
return self.apply_cols('__neg__', unary=True)
def __pos__(self):
return self.apply_cols('__pos__', unary=True)
def __abs__(self):
return self.apply_cols('__abs__', unary=True)
def __invert__(self):
return self.apply_cols('__invert__', unary=True)
def abs(self) -> 'Dataset':
"""
Return a dataset where all elements are replaced, as appropriate, by their absolute value.
Returns
-------
Dataset
Examples
--------
>>> ds = rt.Dataset({'a': np.arange(-3,3), 'b':3*['A', 'B'], 'c':3*[True, False]})
>>> ds
# a b c
- -- - -----
0 -3 A True
1 -2 B False
2 -1 A True
3 0 B False
4 1 A True
5 2 B False
>>> ds.abs()
# a b c
- - - -----
0 3 A True
1 2 B False
2 1 A True
3 0 B False
4 1 A True
5 2 B False
"""
return abs(self)
@property
def dtypes(self) -> Mapping[str, np.dtype]:
"""
Returns dictionary of dtype for each column.
Returns
-------
dict
Dictionary containing the dtype for each column in the Dataset.
"""
return {colname: getattr(self, colname).dtype for colname in self.keys()}
def astype(self, new_type, ignore_non_computable: bool = True):
"""
Return a new Dataset w/ changed types.
Will ignore string and categorical columns unless forced.
Do not do this unless you know they will convert nicely.
Parameters
----------
new_type : a suitable type object for each row
ignore_non_computable : bool
If True then try to convert string and categoricals. Defaults to False.
Returns
-------
Dataset
A new Dataset w/ changed types.
Examples
--------
>>> ds = rt.Dataset({'a': np.arange(-3,3), 'b':3*['A', 'B'], 'c':3*[True, False]})
>>> ds
# a b c
- -- - -----
0 -3 A True
1 -2 B False
2 -1 A True
3 0 B False
4 1 A True
5 2 B False
<BLANKLINE>
[6 rows x 3 columns] total bytes: 36.0 B
>>> ds.astype(int)
# a b c
- -- - -
0 -3 A 1
1 -2 B 0
2 -1 A 1
3 0 B 0
4 1 A 1
5 2 B 0
<BLANKLINE>
[6 rows x 3 columns] total bytes: 54.0 B
>>> ds.astype(bool)
# a b c
- ----- - -----
0 True A True
1 True B False
2 True A True
3 False B False
4 True A True
5 True B False
<BLANKLINE>
[6 rows x 3 columns] total bytes: 18.0 B
"""
fval = None if ignore_non_computable else (lambda _v, _t: _v.astype(_t))
return self.apply_cols('astype', new_type, unary=True, fill_value=fval)
# -------------------------------------------------------------
def one_hot_encode(self, columns: Optional[List[str]] = None, exclude: Optional[Union[str, List[str]]] = None) -> None:
"""
Replaces categorical columns with one-hot-encoded columns for their categories.
Original columns will be removed from the dataset.
Default is to encode all categorical columns. Otherwise, certain columns can be specified.
Also an optional exclude list for convenience.
Parameters
----------
columns : list of str, optional
specify columns to encode (if set, exclude param will be ignored)
exclude : str or list of str, optional
exclude certain columns from being encoded
"""
# build column name list
if columns is None:
columns = self.keys()
if exclude is not None:
if not isinstance(exclude, list):
exclude = [exclude]
columns = [c for c in columns if c not in exclude]
cat_cols = []
for c in columns:
col = getattr(self, c)
if isinstance(col, TypeRegister.Categorical):
cat_cols.append(c)
cat_list, one_hot_cols = col.one_hot_encode()
for name, one_hot in zip(cat_list, one_hot_cols):
setattr(self, c+'__'+name, one_hot)
self.col_remove(cat_cols)
def head(self, n: int = 20) -> 'Dataset':
"""
Return view into beginning of Dataset.
Parameters
----------
n : int
Number of rows at the head to return.
Returns
-------
Dataset
A new dataset which is a view into the original.
"""
if self._nrows is None: self._nrows = 0
rows = min(self._nrows, n)
return self[:rows, :]
def tail(self, n: int = 20) -> 'Dataset':
"""
Return view into end of Dataset.
Parameters
----------
n : int
Number of rows at the tail to return.
Returns
-------
Dataset
A new dataset which is a view into the original.
"""
if self._nrows is None:
self._nrows = 0
return self[:0, :]
rows = min(self._nrows, n)
return self[-rows:, :]
def dhead(self, n: int = 0) -> None:
"""
Displays the head of the Dataset. Compare with :meth:`~rt.rt_dataset.Dataset.head` which returns a new Dataset.
"""
table = DisplayTable()
if n == 0:
# use default if empty
n = table.options.HEAD_ROWS
print(self.head(n=n)._V)
def dtail(self, n: int = 0) -> None:
"""
Displays the tail of the Dataset. Compare with :meth:`~rt.rt_dataset.Dataset.tail` which returns a new Dataset.
"""
table = DisplayTable()
if n == 0:
# use default if empty
n = table.options.TAIL_ROWS
temp = self.tail(n=n)
print(temp)
def asrows(self, as_type: Union[str, type] = 'Dataset', dtype: Optional[Union[str, np.dtype]] = None):
"""
Iterate over rows in any number of of ways, set as_type as appropriate.
When some columns are strings (unicode or byte) and as_type is 'array',
best to set dtype=object.
Parameters
----------
as_type : {'Dataset', 'Struct', 'dict', 'OrderedDict', 'namedtuple', 'tuple', 'list', 'array', 'iter'}
A string selector which determines return type of iteration, defaults to 'Dataset'.
dtype : str or np.dtype, optional
For ``as_type='array'``; if set, force the numpy type of the returned array. Defaults to None.
Returns
-------
iterator over selected type.
"""
if type(as_type) is type:
as_type = as_type.__name__
if as_type == 'Dataset':
# special case treatment results in large speedup
for _i in range(self.get_nrows()):
yield self._copy(rows=[_i])
return
elif as_type == 'Struct':
func = lambda _v, _c=list(self): Struct(dict(zip(_c, _v)))
elif as_type == 'dict':
func = lambda _v, _c=list(self): dict(zip(_c, _v))
elif as_type == 'OrderedDict':
from collections import OrderedDict
func = lambda _v, _c=list(self): OrderedDict(zip(_c, _v))
elif as_type == 'namedtuple':
DatasetRow = namedtuple('DatasetRow', list(self))
func = lambda _v, _dr=DatasetRow: _dr(*_v)
elif as_type == 'tuple':
func = tuple
elif as_type == 'list':
func = list
elif as_type == 'array':
func = lambda _v, _dt=dtype: np.array(list(_v), dtype=_dt)
elif as_type in {'iter', 'iterator'}:
cols = list(self.values())
for _i in range(self.get_nrows()):
yield (_c[_i] for _c in cols)
return
else:
raise ValueError(f'Dataset.asrows(as_type={as_type!r}) not valid.')
cols = list(self.values())
for _i in range(self.get_nrows()):
yield func(_c[_i] for _c in cols)
def tolist(self):
"""
Return list of lists of values, by rows.
Returns
-------
list of lists.
"""
if self.size > 10_000:
warnings.warn(f"Dataset has {self.size} elements. Performance will suffer when converting values to python lists.")
# TJD this code is slow and needs review
return [[self[_i, _c] for _c in self.keys()] for _i in range(self.get_nrows())]
def to_pandas(self, unicode: bool = True, use_nullable: bool = True) -> 'pd.DataFrame':
"""
Create a pandas DataFrame from this riptable.Dataset.
Will attempt to preserve single-key categoricals, otherwise will appear as
an index array. Any byte strings will be converted to unicode unless unicode=False.
Parameters
----------
unicode : bool
Set to False to keep byte strings as byte strings. Defaults to True.
use_nullable : bool
Whether to use pandas nullable integer dtype for integer columns (default: True).
Returns
-------
pandas.DataFrame
Raises
------
NotImplementedError
If a ``CategoryMode`` is not handled for a given column.
Notes
-----
As of Pandas v1.1.0 ``pandas.Categorical`` does not handle riptable ``CategoryMode``s for ``Dictionary``,
``MultiKey``, nor ``IntEnum``. Converting a Categorical of these category modes will result in loss of information
and emit a warning. Although the column values will be respected, the underlying category codes will be remapped
as a single key categorical.
See Also
--------
riptable.Dataset.from_pandas
"""
import pandas as pd
def _to_unicode_if_string(arr):
if arr.dtype.char == 'S':
arr = arr.astype('U')
return arr
data = self.asdict()
for key, col in self.items():
dtype = col.dtype
if isinstance(col, TypeRegister.Categorical):
if col.category_mode in (CategoryMode.Default, CategoryMode.StringArray, CategoryMode.NumericArray):
pass # already compatible with pandas; no special handling needed
elif col.category_mode in (CategoryMode.Dictionary, CategoryMode.MultiKey, CategoryMode.IntEnum):
# Pandas does not have a notion of a IntEnum, Dictionary, and Multikey category mode.
# Encode dictionary codes to a monotonically increasing sequence and construct
# pandas Categorical as if it was a string or numeric array category mode.
old_category_mode = col.category_mode
col = col.as_singlekey()
warnings.warn(f"Dataset.to_pandas: column '{key}' converted from {repr(CategoryMode(old_category_mode))} to {repr(CategoryMode(col.category_mode))}.")
else:
raise NotImplementedError(f'Dataset.to_pandas: Unhandled category mode {repr(CategoryMode(col.category_mode))}')
base_index = 0 if col.base_index is None else col.base_index
codes = np.asarray(col) - base_index
categories = _to_unicode_if_string(col.category_array) if unicode else col.category_array
data[key]: pd.Categorical = pd.Categorical.from_codes(codes, categories=categories)
elif isinstance(col, TypeRegister.DateTimeNano):
utc_datetime = pd.DatetimeIndex(col, tz='UTC')
tz = _RIPTABLE_TO_PANDAS_TZ[col._timezone._to_tz]
tz_datetime = utc_datetime.tz_convert(tz)
data[key] = tz_datetime
elif isinstance(col, TypeRegister.TimeSpan):
data[key] = pd.to_timedelta(col)
# TODO: riptable.DateSpan doesn't have a counterpart in pandas, what do we want to do?
elif use_nullable and np.issubdtype(dtype, np.integer):
# N.B. Has to use the same dtype for `isin` otherwise riptable will convert the dtype
# and the invalid value.
is_invalid = col.isin(FastArray([INVALID_DICT[dtype.num]], dtype=dtype))
# N.B. Have to make a copy of the array to numpy array otherwise pandas seg
# fault in DataFrame.
# NOTE: not all versions of pandas have pd.arrays
if hasattr(pd, 'arrays'):
data[key] = pd.arrays.IntegerArray(np.array(col), mask=is_invalid)
else:
data[key] = np.array(col)
else:
data[key] = _to_unicode_if_string(col) if unicode else col
return pd.DataFrame(data)
def as_pandas_df(self):
"""
This method is deprecated, please use riptable.Dataset.to_pandas.
Create a pandas DataFrame from this riptable.Dataset.
Will attempt to preserve single-key categoricals, otherwise will appear as
an index array. Any bytestrings will be converted to unicode.
Returns
-------
pandas.DataFrame
See Also
--------
riptable.Dataset.to_pandas
riptable.Dataset.from_pandas
"""
warnings.warn('as_pandas_df is deprecated and will be removed in future release, '
'please use "to_pandas" method',
FutureWarning, stacklevel=2)
return self.to_pandas()
@classmethod
def from_pandas(cls, df: 'pd.DataFrame', tz: str = 'UTC') -> 'Dataset':
"""
Creates a riptable Dataset from a pandas DataFrame. Pandas categoricals
and datetime arrays are converted to their riptable counterparts.
Any timezone-unaware datetime arrays (or those using a timezone not
recognized by riptable) are localized to the timezone specified by the
tz parameter.
Recognized pandas timezones:
UTC, GMT, US/Eastern, and Europe/Dublin
Parameters
----------
df: pandas.DataFrame
The pandas DataFrame to be converted
tz: string
A riptable-supported timezone ('UTC', 'NYC', 'DUBLIN', 'GMT') as fallback timezone.
Returns
-------
riptable.Dataset
See Also
--------
riptable.Dataset.to_pandas
"""
import pandas as pd
data = {}
for key in df.columns:
col = df[key]
dtype = col.dtype
dtype_kind = dtype.kind
iscat = False
if hasattr(pd, 'CategoricalDtype'):
iscat = isinstance(dtype, pd.CategoricalDtype)
else:
iscat = dtype.num == 100
if iscat or isinstance(col, pd.Categorical):
codes = col.cat.codes
categories = col.cat.categories
# check for newer version of pandas
if hasattr(codes, 'to_numpy'):
codes = codes.to_numpy()
categories = categories.to_numpy()
else:
codes = np.asarray(codes)
categories = np.asarray(categories)
data[key] = TypeRegister.Categorical(codes + 1, categories=categories)
elif hasattr(pd, 'Int8Dtype') and \
isinstance(dtype, (pd.Int8Dtype, pd.Int16Dtype, pd.Int32Dtype, pd.Int64Dtype,
pd.UInt8Dtype, pd.UInt16Dtype, pd.UInt32Dtype,
pd.UInt64Dtype)):
data[key] = np.asarray(col.fillna(INVALID_DICT[dtype.numpy_dtype.num]),
dtype=dtype.numpy_dtype)
elif dtype_kind == 'M':
try:
ptz = str(dtype.tz)
try:
_tz = _PANDAS_TO_RIPTABLE_TZ[ptz]
except KeyError:
raise ValueError(
"Unable to convert a datetime array with timezone={}".format(ptz))
except AttributeError:
_tz = tz
data[key] = TypeRegister.DateTimeNano(np.asarray(col, dtype='i8'),
from_tz='UTC', to_tz=_tz)
elif dtype_kind == 'm':
data[key] = TypeRegister.TimeSpan(np.asarray(col, dtype='i8'))
elif dtype_kind == 'O':
if len(col) > 0:
first_element = col.iloc[0]
if isinstance(first_element, (int, float, np.number)):
# An object array with number (int or float) in it probably means there is
# NaN in it so convert to float64.
new_col = np.asarray(col, dtype='f8')
else:
try:
new_col = np.asarray(col, dtype='S')
except UnicodeEncodeError:
new_col = np.asarray(col, dtype='U')
else:
new_col = np.asarray(col, dtype='S')
data[key] = new_col
else:
data[key] = df[key]
return cls(data)
@staticmethod
def from_arrow(
tbl: 'pa.Table', zero_copy_only: bool = True, writable: bool = False, auto_widen: bool = False,
fill_value: Optional[Mapping[str, Any]] = None
) -> 'Dataset':
"""
Convert a pyarrow `Table` to a riptable `Dataset`.
Parameters
----------
tbl : pyarrow.Table
zero_copy_only : bool, default True
If True, an exception will be raised if the conversion to a `FastArray` would require copying the
underlying data (e.g. in presence of nulls, or for non-primitive types).
writable : bool, default False
For `FastArray`s created with zero copy (view on the Arrow data), the resulting array is not writable (Arrow data is immutable).
By setting this to True, a copy of the array is made to ensure it is writable.
auto_widen : bool, optional, default to False
When False (the default), if an arrow array contains a value which would be considered
the 'invalid'/NA value for the equivalent dtype in a `FastArray`, raise an exception.
When True, the converted array
fill_value : Mapping[str, int or float or str or bytes or bool], optional, defaults to None
Optional mapping providing non-default fill values to be used. May specify as many or as few columns
as the caller likes. When None (or for any columns which don't have a fill value specified in the mapping)
the riptable invalid value for the column (given it's dtype) will be used.
Returns
-------
Dataset
Notes
-----
This function does not currently support pyarrow's nested Tables. A future version of riptable may support
nested Datasets in the same way (where a Dataset contains a mixture of arrays/columns or nested Datasets having
the same number of rows), which would make it trivial to support that conversion.
"""
import pyarrow as pa
ds_cols = {}
for col_name, col in zip(tbl.column_names, tbl.columns):
if isinstance(col, (pa.Array, pa.ChunkedArray)):
rt_arr = FastArray.from_arrow(col, zero_copy_only=zero_copy_only, writable=writable, auto_widen=auto_widen)
else:
# Unknown/unsupported type being used as a column -- can't convert.
raise RuntimeError(f"Unable to convert column '{col_name}' from object of type '{type(col)}'.")
ds_cols[col_name] = rt_arr
return Dataset(ds_cols)
def to_arrow(self, *, preserve_fixed_bytes: bool = False, empty_strings_to_null: bool = True) -> 'pa.Table':
"""
Convert a riptable `Dataset` to a pyarrow `Table`.
Parameters
----------
preserve_fixed_bytes : bool, optional, defaults to False
For `FastArray` columns which are ASCII string arrays (dtype.kind == 'S'),
set this parameter to True to produce a fixed-length binary array
instead of a variable-length string array.
empty_strings_to_null : bool, optional, defaults To True
For `FastArray` columns which are ASCII or Unicode string arrays,
specify True for this parameter to convert empty strings to nulls in the output.
riptable inconsistently recognizes the empty string as an 'invalid',
so this parameter allows the caller to specify which interpretation
they want.
Returns
-------
pyarrow.Table
Notes
-----
TODO: Maybe add a ``destroy`` bool parameter here to indicate the original arrays should be deleted
immediately after being converted to a pyarrow array? We'd need to handle the case where the
pyarrow array object was created in "zero-copy" style and wraps our original array (vs. a new
array having been allocated via pyarrow); in that case, it won't be safe to delete the original
array. Or, maybe we just call 'del' anyway to decrement the object's refcount so it can be
cleaned up sooner (if possible) vs. waiting for this whole method to complete and the GC and
riptable "Recycler" to run?
"""
import pyarrow as pa
# Convert each of the columns to a pyarrow array.
arrow_col_dict = {}
for col_name in self.keys():
orig_col = self[col_name]
try:
# Convert the column/array using the FastArray.to_arrow() method (or the inherited overload
# for derived classes). This allows additional options to be passed when converting, to give
# callers more flexibility.
arrow_col = orig_col.to_arrow(
preserve_fixed_bytes=preserve_fixed_bytes,
empty_strings_to_null=empty_strings_to_null
)
except BaseException as exc:
# Create another exception which wraps the given exception and provides
# the column name in the error message to make it easier to diagnose issues.
raise RuntimeError(f"Unable to convert column '{col_name}' to a pyarrow array.") from exc
arrow_col_dict[col_name] = arrow_col
# Create the pyarrow.Table from the dictionary of pyarrow arrays.
return pa.table(arrow_col_dict)
@staticmethod
def _axis_key(axis):
try:
return {0: 0, 'c': 0, 'C': 0, 'col': 0, 'COL': 0, 'column': 0, 'COLUMN': 0,
1: 1, 'r': 1, 'R': 1, 'row': 1, 'ROW': 1,
None: None, 'all': None, 'ALL': None}[axis]
except KeyError:
raise NotImplementedError(f'Not a valid value for axis: {axis!r}.')
# -------------------------------------------------------------
def any(self, axis: Optional[int] = 0, as_dataset: bool = True):
"""
Returns truth 'any' value along `axis`. Behavior for ``axis=None`` differs from pandas!
Parameters
----------
axis : int, optional, default axis=0
* axis=0 (dflt.) -> over columns (returns Struct (or Dataset) of bools)
string synonyms: c, C, col, COL, column, COLUMN
* axis=1 -> over rows (returns array of bools)
string synonyms: r, R, row, ROW
* axis=None -> over rows and columns (returns bool)
string synonyms: all, ALL
as_dataset : bool
When ``axis=0``, return Dataset instead of Struct. Defaults to False.
Returns
-------
Struct (or Dataset) or list or bool
"""
def _col_any(_col):
try:
return bool(_col.any())
except TypeError:
return any(_col)
axis = self._axis_key(axis)
cond_rtn_type = type(self) if as_dataset else Struct
if axis == 0:
return cond_rtn_type({_cn: _col_any(_val) for _cn, _val in self.items()})
if axis is None:
return any(_col_any(_val) for _cn, _val in self.items())
if axis == 1:
# for each col, !=0 to get back bool array. then inplace OR all those results, careful with string arrays
temparray=zeros(len(self), dtype=bool)
for arr in self.values():
if arr.dtype.num <= 13:
# inplace OR for numerical data
# for cats we will assume 0 is the invalid and !=0 check works
# not sure about nan handling
temparray += arr != 0
else:
# care about string array?
if arr.dtype.char in 'US':
temparray += arr != ''
else:
# skip this datatype
pass
return temparray
raise NotImplementedError('Dataset.any(axis=<0, 1, None>)')
# -------------------------------------------------------------
def duplicated(self, subset: Optional[Union[str, List[str]]] = None, keep: Union[bool, str] = 'first'):
"""
Return a boolean FastArray set to True where duplicate rows exist,
optionally only considering certain columns
Parameters
----------
subset : str or list of str, optional
A column label or list of column labels to inspect for duplicate values.
When ``None``, all columns will be examined.
keep : {'first', 'last', False}, default 'first'
* ``first`` : keep duplicates except for the first occurrence.
* ``last`` : keep duplicates except for the last occurrence.
* False : set to True for all duplicates.
Examples
--------
>>> ds=rt.Dataset({'somenans': [0., 1., 2., rt.nan, 0., 5.], 's2': [0., 1., rt.nan, rt.nan, 0., 5.]})
>>> ds
# somenans s2
- -------- ----
0 0.00 0.00
1 1.00 1.00
2 2.00 nan
3 nan nan
4 0.00 0.00
5 5.00 5.00
>>> ds.duplicated()
FastArray([False, False, False, False, True, False])
Notes
-----
Consider using ``rt.Grouping(subset).ifirstkey`` as a fancy index to pull in unique rows.
"""
if subset is None:
subset = list(self.keys())
else:
if not isinstance(subset, list):
subset = [subset]
g = self.gbu(subset).get_groupings()
igroup = g['iGroup']
ifirstgroup= g['iFirstGroup']
ncountgroup = g['nCountGroup']
result = ones(igroup.shape, dtype=bool)
# return row of first occurrence
if keep == 'first':
# remove invalid bin
ifirstgroup = ifirstgroup[1:]
result[igroup[ifirstgroup]]=False
# return row of last occurrence (however, keys will be in order of their first occurrence)
elif keep == 'last':
lastindex = ifirstgroup[-1] + ncountgroup[-1] -1
# skip invalid and shift everything
ilast = ifirstgroup[2:]
ilast -=1
result[igroup[ilast]]=False
# set the last one
result[lastindex]=False
# only return rows that occur once
elif keep is False:
ifirstgroup = ifirstgroup[ncountgroup==1]
result[igroup[ifirstgroup]]=False
return result
# -------------------------------------------------------------
def drop_duplicates(self, subset=None, keep: Union[bool, str] = 'first', inplace: bool = False) -> 'Dataset':
"""
Return Dataset with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : Dataset
Notes
-----
If `keep` is 'last', the rows in the result will match pandas, but the order will be based
on first occurrence of the unique key.
Examples
--------
>>> np.random.seed(12345)
>>> ds = rt.Dataset({
... 'strcol' : np.random.choice(['a','b','c','d'], 15),
... 'intcol' : np.random.randint(0, 3, 15),
... 'rand' : np.random.rand(15)
... })
>>> ds
# strcol intcol rand
-- ------ ------ ----
0 c 2 0.05
1 b 1 0.81
2 b 2 0.93
3 b 0 0.36
4 a 2 0.69
5 b 1 0.13
6 c 1 0.83
7 c 2 0.32
8 b 1 0.74
9 c 2 0.60
10 b 2 0.36
11 b 1 0.79
12 c 0 0.70
13 b 1 0.82
14 d 1 0.90
<BLANKLINE>
[15 rows x 3 columns] total bytes: 195.0 B
Keep only the row of the first occurrence:
>>> ds.drop_duplicates(['strcol','intcol'])
# strcol intcol rand
- ------ ------ ----
0 c 2 0.05
1 b 1 0.81
2 b 2 0.93
3 b 0 0.36
4 a 2 0.69
5 c 1 0.83
6 c 0 0.70
7 d 1 0.90
<BLANKLINE>
[8 rows x 3 columns] total bytes: 104.0 B
Keep only the row of the last occurrence:
>>> ds.drop_duplicates(['strcol','intcol'], keep='last')
# strcol intcol rand
- ------ ------ ----
0 c 2 0.60
1 b 1 0.82
2 b 2 0.36
3 b 0 0.36
4 a 2 0.69
5 c 1 0.83
6 c 0 0.70
7 d 1 0.90
<BLANKLINE>
[8 rows x 3 columns] total bytes: 104.0 B
Keep only the rows which only occur once:
>>> ds.drop_duplicates(['strcol','intcol'], keep=False)
# strcol intcol rand
- ------ ------ ----
0 b 0 0.36
1 a 2 0.69
2 c 1 0.83
3 c 0 0.70
4 d 1 0.90
<BLANKLINE>
[5 rows x 3 columns] total bytes: 65.0 B
"""
if self.shape[0] == 0:
if inplace:
return self
else:
return TypeRegister.Dataset(self)
if subset is None:
subset = list(self.keys())
else:
if not isinstance(subset, list):
subset = [subset]
gb = self.gbu(subset)
# return row of first occurrence
if keep == 'first':
deduplicated = gb.first()
deduplicated.label_remove()
# return row of last occurrence (however, keys will be in order of their first occurrence)
elif keep == 'last':
deduplicated = gb.last()
deduplicated.label_remove()
# only return rows that occur once
elif keep is False:
non_duplicated = gb.count().Count == 1
deduplicated = gb.first()
deduplicated.label_remove()
deduplicated = deduplicated[non_duplicated,:]
else:
raise ValueError(f"Got unexpected value for keep {keep}.")
# replace all columns in dictionary
if inplace is True:
if deduplicated._nrows != self._nrows:
# swap out all column data
self._nrows = deduplicated._nrows
self._col_sortlist = None
self.col_replace_all(deduplicated, check_exists=False)
return self
return deduplicated
# -------------------------------------------------------------
def col_replace_all(self, newdict, check_exists: bool = True) -> None:
"""
Replace the data for each item in the item dict. Original attributes
will be retained. Useful for internal routines that need to swap out all columns quickly.
Parameters
----------
newdict : dictionary of item names -> new item data (can also be a Dataset)
check_exists : bool
if True, all newdict keys and old item keys will be compared to ensure a match
"""
self._all_items.item_replace_all(newdict, check_exists=check_exists)
# -------------------------------------------------------------
def all(self, axis=0, as_dataset: bool = True):
"""
Returns truth value 'all' along axis. Behavior for ``axis=None`` differs from pandas!
Parameters
----------
axis : int, optional
* axis=0 (dflt.) -> over columns (returns Struct (or Dataset) of bools)
string synonyms: c, C, col, COL, column, COLUMN
* axis=1 -> over rows (returns array of bools)
string synonyms: r, R, row, ROW
* axis=None -> over rows and columns (returns bool)
string synonyms: all, ALL
as_dataset : bool
When ``axis=0``, return Dataset instead of Struct. Defaults to False.
Returns
-------
Struct (or Dataset) or list or bool
"""
def _col_all(_col):
try:
return bool(_col.all())
except TypeError:
return all(_col)
axis = self._axis_key(axis)
cond_rtn_type = type(self) if as_dataset else Struct
if axis == 0:
return cond_rtn_type({_cn: _col_all(_val) for _cn, _val in self.items()})
if axis is None:
return all(_col_all(_val) for _cn, _val in self.items())
if axis == 1:
# for each col, !=0 to get back bool array. then inplace AND all those results, careful with string arrays
temparray=ones(len(self), dtype=bool)
for arr in self.values():
if arr.dtype.num <= 13:
# inplace AND for numerical data
# for cats we will assume 0 is the invalid and !=0 check works
temparray *= arr != 0
else:
# care about string array?
if arr.dtype.char in 'US':
temparray *= arr != ''
else:
# skip this datatype
pass
return temparray
raise NotImplementedError('Dataset.all(axis=<0, 1, None>)')
def sorts_on(self) -> None:
"""
Turns on all row/column sorts for display. False by default.
sorts_view must have been called before
:return: None
"""
if self._col_sortlist is None:
warnings.warn(f"sort_view was not called first. Display sorting will remain off.")
return
self._sort_display = True
def sorts_off(self) -> None:
"""
Turns off all row/column sorts for display (happens when sort_view is called)
If sort is cached, it will remain in cache in case sorts are toggled back on.
:return: None
"""
self._col_sortlist = None
self._sort_display = False
# -------------------------------------------------------
def get_row_sort_info(self):
sortdict = None
# general row sort will take precedence
if self._col_sortlist is not None:
for col in self._col_sortlist:
if col not in self:
print(str(col), "is not a valid key to sort by.")
# clear invalid sort from dataset
self._col_sortlist = None
break
else:
#sortdict = {col: self.__getattribute__(col) for col in self._col_sortlist}
sortdict = {col: self.col_get_value(col) for col in self._col_sortlist}
return self._uniqueid, self._nrows, sortdict
# -------------------------------------------------------
def _sort_lexsort(self, by, ascending=True):
bylist = by
if not isinstance(by, list):
bylist = [bylist]
sortkeys = []
for col in bylist:
sortkeys.append(self.col_get_value(col))
# larger sort
sort_rows = lexsort([sortkeys[i] for i in range(len(sortkeys) - 1, -1, -1)])
# need to truly reverse it inplace
if ascending is False:
sort_rows = sort_rows[::-1].copy()
#print("**lexsort", sort_rows)
return sort_rows
# -------------------------------------------------------
def _sort_values(self, by, axis=0, ascending=True, inplace=False, kind='mergesort',
na_position='last', copy=False, sort_rows=None):
"""
Accepts a single column name or list of column names and adds them to the dataset's column sort list.
The actual sort is performed during display; the dataset itself is not affected
unless ``inplace=True``.
When the dataset is being fed into display, the sort cache gets checked to see if a sorted
index index is being held for the keys with the dataset's matching unique ID. If a sorted
index is found, it gets passed to display. If no index is found, a lexsort is performed,
and the sort is stored in the cache.
Parameters
----------
by : string or list of strings
The column name or list of column names by which to sort
axis : int
not used
ascending : bool
not used
inplace : bool
Sort the dataset itself.
kind : str
not used
na_position : str
not used
sortrows : fancy index array
used to pass in your own sort
Returns
-------
Dataset
"""
# TODO: build a better routine to check both regular columns and groupby keys for requested sort
# this has too many repeat conditionals
# test sort keys
bylist = by
if not isinstance(by, list):
bylist = [bylist]
for col in bylist:
if col not in self:
raise ValueError(f'{col} is not a valid key to sort by.')
if inplace or copy:
if self._sort_display is True and copy is False:
# turn it off because user just specified a new sort
self.sorts_off()
#raise ValueError("sorts are turned off for display. Use ds.sort_display() to reactivate.")
# larger sort
self._natural_sort = tuple(bylist)
if sort_rows is None:
sort_rows = self._sort_lexsort(bylist, ascending)
if inplace:
#for k, v in npdict.items():
# #self.__setattr__(k, reindex_fast(sort_rows, v))
# self._superadditem(k, reindex_fast(sort_rows, v))
values = list(self.values())
keys = list(self.keys())
# TJD optimization
# Get all the same dtypes so that we can use on column as a temporary and write it into
for i,k in enumerate(keys):
self[k] = values[i][sort_rows]
# allow recycler to kick in
values[i]=None
return self
elif copy:
npdict = self._as_dictionary()
newdict = {}
for k, v in npdict.items():
newdict[k] = v[sort_rows]
# TODO: add routine to copy other ds properties/attributes (regular copy only does the dict and sortlist)
# making a copy of the dataset first and then doing a sort is twice as expensive
newds = type(self)(newdict)
newds.label_set_names(self.label_get_names())
if hasattr( self, '_footers' ):
footers = {}
for f, item in self._footers.items():
footers[f] = item.copy()
newds._footers = footers
return newds
# if drops into here, sort_view was called
if ascending is False:
self._sort_ascending = False
self._col_sortlist = bylist
self.sorts_on()
# TJD New code.. once display, turn sorts_off
return self
# -------------------------------------------------------
def sort_view(self, by, ascending=True, kind='mergesort', na_position='last'):
"""
Sorts all columns by the labels only when displayed. This routine is fast and does not change data underneath.
Parameters
----------
by : string or list of strings
The column name or list of column names by which to sort
ascending : bool
Determines if the order of sorting is ascending or not.
Examples
----------
>>> ds = rt.Dataset({'a': np.arange(10), 'b':5*['A', 'B'], 'c':3*[10,20,30]+[10]})
>>> ds
# a b c
- - - --
0 0 A 10
1 1 B 20
2 2 A 30
3 3 B 10
4 4 A 20
5 5 B 30
6 6 A 10
7 7 B 20
8 8 A 30
9 9 B 10
>>> ds.sort_view(['b','c'])
# a b c
- - - --
0 0 A 10
1 6 A 10
2 4 A 20
3 2 A 30
4 8 A 30
5 3 B 10
6 9 B 10
7 1 B 20
8 7 B 20
9 5 B 30
>>> ds.sort_view('a', ascending = False)
# a b c
- - - --
0 9 B 10
1 8 A 30
2 7 B 20
3 6 A 10
4 5 B 30
5 4 A 20
6 3 B 10
7 2 A 30
8 1 B 20
9 0 A 10
"""
self._sort_values(by, ascending=ascending, inplace=False, kind=kind, na_position=na_position, copy=False)
return self
# -------------------------------------------------------
def sort_inplace(self, by: Union[str, List[str]], ascending: bool = True, kind: str = 'mergesort', na_position: str = 'last') -> 'Dataset':
"""
Sorts all columns by the labels inplace. This routine will modify the order of all columns.
Parameters
----------
by : str or list of str
The column name or list of column names by which to sort
ascending : bool
Determines if the order of sorting is ascending or not.
Returns
-------
Dataset
The reference to the input Dataset is returned to allow for method chaining.
Examples
----------
>>> ds = rt.Dataset({'a': np.arange(10), 'b':5*['A', 'B'], 'c':3*[10,20,30]+[10]})
>>> ds
# a b c
- - - --
0 0 A 10
1 1 B 20
2 2 A 30
3 3 B 10
4 4 A 20
5 5 B 30
6 6 A 10
7 7 B 20
8 8 A 30
9 9 B 10
>>> ds.sort_inplace(['b','c'])
# a b c
- - - --
0 0 A 10
1 6 A 10
2 4 A 20
3 2 A 30
4 8 A 30
5 3 B 10
6 9 B 10
7 1 B 20
8 7 B 20
9 5 B 30
>>> ds.sort_inplace('a', ascending = False)
# a b c
- - - --
0 9 B 10
1 8 A 30
2 7 B 20
3 6 A 10
4 5 B 30
5 4 A 20
6 3 B 10
7 2 A 30
8 1 B 20
9 0 A 10
"""
return self._sort_values(by, ascending=ascending, inplace=True, kind=kind, na_position=na_position, copy=False)
def sort_copy(self, by: Union[str, List[str]], ascending: bool = True, kind: str = 'mergesort', na_position: str ='last') -> 'Dataset':
"""
Sorts all columns by the labels and returns a copy. The original dataset is not modified.
Parameters
----------
by : str or list of str
The column name or list of column names by which to sort
ascending : bool
Determines if the order of sorting is ascending or not.
Returns
-------
Dataset
Examples
----------
>>> ds = rt.Dataset({'a': np.arange(10), 'b':5*['A', 'B'], 'c':3*[10,20,30]+[10]})
>>> ds
# a b c
- - - --
0 0 A 10
1 1 B 20
2 2 A 30
3 3 B 10
4 4 A 20
5 5 B 30
6 6 A 10
7 7 B 20
8 8 A 30
9 9 B 10
>>> ds.sort_copy(['b','c'])
# a b c
- - - --
0 0 A 10
1 6 A 10
2 4 A 20
3 2 A 30
4 8 A 30
5 3 B 10
6 9 B 10
7 1 B 20
8 7 B 20
9 5 B 30
>>> ds.sort_copy('a', ascending = False)
# a b c
- - - --
0 9 B 10
1 8 A 30
2 7 B 20
3 6 A 10
4 5 B 30
5 4 A 20
6 3 B 10
7 2 A 30
8 1 B 20
9 0 A 10
"""
return self._sort_values(by, ascending=ascending, inplace=False, kind=kind, na_position=na_position, copy=True)
# -------------------------------------------------------
def _apply_outlier(self, func, name, col_keep):
pos=func()
row_func=[]
row_namefunc=[]
row_pos=[]
colnames =self.keys()
# for all the columns
for c in colnames:
# categoricals and strings might be eliminated
if c != col_keep:
try:
#get first value
val=pos[c][0]
row_pos.append(val)
row_func.append(self[c][val])
row_namefunc.append(self[col_keep][val])
except:
invalid=INVALID_DICT[self[c].dtype.num]
#print("**invalid", invalid)
row_func.append(np.nan)
row_namefunc.append(get_default_value(self[col_keep]))
row_pos.append(-1)
ds=type(self)({})
ds[name] = FastArray(row_func)
ds[col_keep] = FastArray(row_namefunc)
ds['Pos'] = FastArray(row_pos)
return ds
def outliers(self, col_keep) -> 'Multiset':
"""return a dataset with the min/max outliers for each column"""
maxds=self._apply_outlier(self.nanargmax, 'Values', col_keep)
minds=self._apply_outlier(self.nanargmin, 'Values', col_keep)
rownames=[]
colnames =self.keys()
# for all the columns
for c in colnames:
# categoricals and strings might be eliminated
if c != col_keep:
rownames.append(c)
maxds['Names'] = FastArray(rownames) # needs auto_rewrap
maxds.label_set_names(['Names'])
minds['Names'] = FastArray(rownames) # needs auto_rewrap
minds.label_set_names(['Names'])
ms=TypeRegister.Multiset({})
ms['Min']=minds
ms['Max']=maxds
ms._gbkeys = {'Names' :FastArray(rownames)}
return ms
# -------------------------------------------------------
def computable(self) -> Mapping[str, FastArray]:
"""returns a dict of computable columns. does not include groupby keys"""
return_dict = {}
labels = self.label_get_names()
for name, arr in self.items():
# any current groupby keys we will not count either
if arr.iscomputable() and name not in labels:
return_dict[name]=arr
return return_dict
# -------------------------------------------------------
def noncomputable(self) -> Mapping[str, FastArray]:
"""returns a dict of noncomputable columns. includes groupby keys"""
return_dict = {}
labels = self.label_get_names()
for name, arr in self.items():
if not arr.iscomputable() or name in labels:
return_dict[name]=arr
return return_dict
# -------------------------------------------------------
@property
def crc(self) -> 'Dataset':
"""
Returns a new dataset with the 64 bit CRC value of every column.
Useful for comparing the binary equality of columns in two datasets
Examples
--------
>>> ds1 = rt.Dataset({'test': rt.arange(100), 'test2': rt.arange(100.0)})
>>> ds2 = rt.Dataset({'test': rt.arange(100), 'test2': rt.arange(100)})
>>> ds1.crc == ds2.crc
# test test2
- ---- -----
0 True False
"""
newds={}
for colname,arr in self.items():
newds[colname]=arr.crc
return type(self)(newds)
# -------------------------------------------------------
def _mask_reduce(self, func, is_ormask: bool):
"""helper function for boolean masks: see mask_or_isnan, et al"""
mask = None
funcmask=TypeRegister.MathLedger._BASICMATH_TWO_INPUTS
if is_ormask:
funcNum=MATH_OPERATION.BITWISE_OR
else:
funcNum=MATH_OPERATION.BITWISE_AND
# loop through all computable columns
cols = self.computable()
for col in cols.values():
bool_mask = func(col)
if mask is None:
mask=bool_mask
else:
#inplace is faster
funcmask((mask, bool_mask, mask), funcNum, 0)
return mask
def mask_or_isnan(self) -> FastArray:
"""
Returns a boolean mask of all columns ORed with :meth:`~rt.rt_numpy.isnan`.
Useful to see if any elements in the dataset contain a NaN.
Returns
-------
FastArray
Examples
--------
>>> ds = rt.Dataset({'a' : [1,2,np.nan], 'b':[0, np.nan, np.nan]})
>>> ds
# a b
- ---- ---
0 1.00 0.00
1 2.00 inf
2 inf inf
[3 rows x 2 columns] total bytes: 48.0 B
>>> ds.mask_or_isnan()
FastArray([False, True, True])
"""
return self._mask_reduce(np.isnan, True)
def mask_and_isnan(self) -> FastArray:
"""
Returns a boolean mask of all columns ANDed with :meth:`~rt.rt_numpy.isnan`.
Returns
-------
FastArray
Examples
--------
>>> ds = rt.Dataset({'a' : [1,2,np.nan], 'b':[0, np.nan, np.nan]})
>>> ds
# a b
- ---- ---
0 1.00 0.00
1 2.00 inf
2 inf inf
[3 rows x 2 columns] total bytes: 48.0 B
>>> ds.mask_and_isnan()
FastArray([False, False, True])
"""
return self._mask_reduce(np.isnan, False)
def mask_or_isfinite(self) -> FastArray:
"""
Returns a boolean mask of all columns ORed with :meth:`~rt.rt_numpy.isfinite`.
Returns
-------
FastArray
Examples
--------
>>> ds = rt.Dataset({'a' : [1,2,np.inf], 'b':[0, np.inf, np.inf]})
>>> ds
# a b
- ---- ---
0 1.00 0.00
1 2.00 inf
2 inf inf
[3 rows x 2 columns] total bytes: 48.0 B
>>> ds.mask_or_isfinite()
FastArray([True, True, False])
"""
return self._mask_reduce(np.isfinite, True)
def mask_and_isfinite(self) -> FastArray:
"""
Returns a boolean mask of all columns ANDed with :meth:`~rt.rt_numpy.isfinite`.
Returns
-------
FastArray
Examples
--------
>>> ds = rt.Dataset({'a' : [1,2,np.inf], 'b':[0, np.inf, np.inf]})
>>> ds
# a b
- ---- ---
0 1.00 0.00
1 2.00 inf
2 inf inf
[3 rows x 2 columns] total bytes: 48.0 B
>>> ds.mask_and_isfinite()
FastArray([True, False, False])
"""
return self._mask_reduce(np.isfinite, False)
def mask_or_isinf(self) -> FastArray:
"""
returns a boolean mask of all columns ORed with isinf
Returns
-------
FastArray
Examples
--------
>>> ds = rt.Dataset({'a' : [1,2,np.inf], 'b':[0, np.inf, np.inf]})
>>> ds
# a b
- ---- ---
0 1.00 0.00
1 2.00 inf
2 inf inf
[3 rows x 2 columns] total bytes: 48.0 B
>>> ds.mask_or_isinf()
FastArray([False, True, True])
"""
return self._mask_reduce(np.isinf, True)
def mask_and_isinf(self) -> FastArray:
"""
returns a boolean mask of all columns ANDed with isinf
Returns
-------
FastArray
Examples
--------
>>> ds = rt.Dataset({'a' : [1,2,np.inf], 'b':[0, np.inf, np.inf]})
>>> ds
# a b
- ---- ---
0 1.00 0.00
1 2.00 inf
2 inf inf
[3 rows x 2 columns] total bytes: 48.0 B
>>> ds.mask_and_isinf()
FastArray([False, False, True])
"""
return self._mask_reduce(np.isinf, False)
def merge(
self,
right: 'Dataset',
on: Optional[Union[str, List[str]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
how: str = 'left',
suffixes: Tuple[str, str] = ('_x', '_y'),
indicator: Union[bool, str] = False,
columns_left: Optional[Union[str, List[str]]] = None,
columns_right: Optional[Union[str, List[str]]] = None,
verbose: bool = False,
hint_size: int = 0
) -> 'Dataset':
return rt_merge.merge(self, right, on=on, left_on=left_on, right_on=right_on, how=how,
suffixes=suffixes, indicator=indicator, columns_left=columns_left,
columns_right=columns_right, verbose=verbose, hint_size=hint_size)
merge.__doc__ = rt_merge.merge.__doc__
def merge2(
self,
right: 'Dataset',
on: Optional[Union[str, Tuple[str, str], List[Union[str, Tuple[str, str]]]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
how: str = 'left',
suffixes: Optional[Tuple[str, str]] = None,
copy: bool = True,
indicator: Union[bool, str] = False,
columns_left: Optional[Union[str, List[str]]] = None,
columns_right: Optional[Union[str, List[str]]] = None,
validate: Optional[str] = None,
keep: Optional[Union[str, Tuple[Optional[str], Optional[str]]]] = None,
high_card: Optional[Union[bool, Tuple[Optional[bool], Optional[bool]]]] = None,
hint_size: Optional[Union[int, Tuple[Optional[int], Optional[int]]]] = None
) -> 'Dataset':
return rt_merge.merge2(
self, right, on=on, left_on=left_on, right_on=right_on, how=how,
suffixes=suffixes, copy=copy, indicator=indicator, columns_left=columns_left, columns_right=columns_right,
validate=validate, keep=keep, high_card=high_card, hint_size=hint_size)
merge2.__doc__ = rt_merge.merge2.__doc__
def merge_asof(
self,
right: 'Dataset',
on: Optional[Union[str, Tuple[str, str]]] = None,
left_on: Optional[str] = None,
right_on: Optional[str] = None,
by: Optional[Union[str, Tuple[str, str], List[Union[str, Tuple[str, str]]]]] = None,
left_by: Optional[Union[str, List[str]]] = None,
right_by: Optional[Union[str, List[str]]] = None,
suffixes: Optional[Tuple[str, str]] = None,
copy: bool = True,
columns_left: Optional[Union[str, List[str]]] = None,
columns_right: Optional[Union[str, List[str]]] = None,
tolerance: Optional[Union[int, 'timedelta']] = None,
allow_exact_matches: bool = True,
direction: str = "backward",
check_sorted: bool = True,
matched_on: Union[bool, str] = False,
**kwargs
) -> 'Dataset':
# TODO: Adapt the logic from merge_lookup() to allow this method to support an in-place merge mode.
return rt_merge.merge_asof(
self, right,
on=on, left_on=left_on, right_on=right_on,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes, copy=copy, columns_left=columns_left, columns_right=columns_right,
tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction, check_sorted=check_sorted,
matched_on=matched_on,
**kwargs
)
merge_asof.__doc__ = rt_merge.merge_asof.__doc__
def merge_lookup(
self,
right: 'Dataset',
on: Optional[Union[str, Tuple[str, str], List[Union[str, Tuple[str, str]]]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
require_match: bool = False,
suffix: Optional[str] = None,
copy: bool = True,
columns_left: Optional[Union[str, List[str]]] = None,
columns_right: Optional[Union[str, List[str]]] = None,
keep: Optional[str] = None,
inplace: bool = False,
high_card: Optional[Union[bool, Tuple[Optional[bool], Optional[bool]]]] = None,
hint_size: Optional[Union[int, Tuple[Optional[int], Optional[int]]]] = None
) -> 'Dataset':
# This method supports an in-place mode; unless the user specifies that one,
# call the normal module-based implementation.
suffixes = ('', suffix)
if not inplace:
return rt_merge.merge_lookup(
self, right, on=on, left_on=left_on, right_on=right_on, require_match=require_match, suffixes=suffixes,
copy=copy, columns_left=columns_left, columns_right=columns_right, keep=keep,
high_card=high_card, hint_size=hint_size)
# Specifying 'columns_left' is meaningless for an in-place merge, so don't allow it.
# If the caller wants to also drop columns from this Dataset, they should do that separately.
if columns_left:
raise ValueError("'columns_left' cannot be specified when performing an in-place merge_lookup.")
# The caller selected the in-place merge; columns from the other Dataset are merged and added into this Dataset.
# Do this by calling the module version of merge_lookup but don't select any columns from the
# left Dataset (this instance). Add the resulting columns -- all taken from the right side --
# to this instance.
lookup_result = rt_merge.merge_lookup(
self, right, on=on, left_on=left_on, right_on=right_on, require_match=require_match, suffixes=suffixes,
copy=copy, columns_left=[], columns_right=columns_right, keep=keep,
high_card=high_card, hint_size=hint_size)
# Before adding the lookup result columns to this Dataset,
# we need to perform the column name conflict resolution step that's
# normally done while performing the merge. That won't have happened in
# in our call above since we only selected columns from the 'right' Dataset.
# NOTE: This must be done prior to adding the resulting columns to this Dataset,
# so that if there are any unresolvable naming conflicts (in which case we raise
# an exception), this Dataset won't have been changed at all.
left_on = rt_merge._extract_on_columns(on, left_on, True, 'on', is_optional=False)
right_on = rt_merge._extract_on_columns(on, right_on, False, 'on', is_optional=False)
columns_left = rt_merge._normalize_selected_columns(self, None)
columns_right = rt_merge._normalize_selected_columns(right, columns_right)
_, right_colname_mapping, _ = rt_merge._construct_colname_mapping(
left_on, right_on, suffixes=suffixes, columns_left=columns_left, columns_right=columns_right)
right_colname_map = dict(zip(*right_colname_mapping))
# Add the resulting columns to this Dataset.
for right_col_name in lookup_result.keys():
# The columns in the merge result won't have gone through the name-conflict resolution
# process during the merge (since we passed an empty list for the left columns), so we
# need to apply any name-mappings here when adding the result columns to this instance.
new_col_name = right_colname_map.get(right_col_name, right_col_name)
self[new_col_name] = lookup_result[right_col_name]
return self
merge_lookup.__doc__ = rt_merge.merge_lookup.__doc__
@property
def total_size(self) -> int:
"""
Returns total size of all (columnar) data in bytes.
Returns
-------
int
The total size, in bytes, of all columnar data in this instance.
"""
npdict = self._as_dictionary()
totalSize = 0
for k, v in npdict.items():
try:
totalSize += v._total_size
except:
totalSize += v.size * v.itemsize
return totalSize
def _last_row_stats(self):
return f"[{self._nrows} rows x {self._ncols} columns] total bytes: {self._sizeof_fmt(self.total_size)}"
@property
def memory_stats(self) -> None:
print(self._last_row_stats())
# ------------------------------------------------------
def get_sorted_col_data(self, col_name):
"""
Private method.
:param col_name:
:return: numpy array
"""
if col_name in self:
#col = self.__getattribute__(col_name)
col = self.col_get_value(col_name)
sort_id = self.get_row_sort_info()
sorted_row_idx = SortCache.get_sorted_row_index(*sort_id)
if sorted_row_idx is not None:
return col[sorted_row_idx]
else:
return np.copy(col)
else:
print(str(col_name), "not found in dataset.")
# -------------------------------------------------------
@property
def _sort_columns(self):
if self._col_sortlist is not None:
return self._sort_column_styles
# -------------------------------------------------------
def _footers_exist(self, labels):
"""Return a list of occurring footers from user-specified labels.
If labels is None, return list of all footer labels.
If none occur, returns None.
See Also
--------
footer_remove(), footer_get_values()
"""
if labels is None:
# remove all labels
final_labels = list(self.footers)
else:
# remove specific labels
if not isinstance(labels, list):
labels = [labels]
final_labels = [fname for fname in labels if fname in self.footers]
if len(final_labels)==0:
warnings.warn(f"No footers found for names {labels}.")
return
return final_labels
# -------------------------------------------------------
def footer_remove(self, labels=None, columns=None):
"""Remove all or specific footers from all or specific columns.
Parameters
----------
labels : string or list of strings, default None
If provided, remove only footers under these names.
columns : string or list of strings, default None
If provided, only remove (possibly specified) footers from these columns.
Examples
--------
>>> ds = rt.Dataset({'colA': rt.arange(3),'colB': rt.arange(3)*2})
>>> ds.footer_set_values('sum', {'colA':3, 'colB':6}
>>> ds.footer_set_values('mean', {'colA':1.0, 'colB':2.0})
>>> ds
# colA colB
---- ---- ----
0 0 0
1 1 2
2 2 4
---- ---- ----
sum 3 6
mean 1.00 2.00
Remove single footer from single column
>>> ds.footer_remove('sum','colA')
>>> ds
# colA colB
---- ---- ----
0 0 0
1 1 2
2 2 4
---- ---- ----
sum 6
mean 1.00 2.00
Remove single footer from all columns
>>> ds.footer_remove('mean')
>>> ds
# colA colB
--- ---- ----
0 0 0
1 1 2
2 2 4
--- ---- ----
sum 6
Remove all footers from all columns
>>> ds.footer_remove()
>>> ds
# colA colB
- ---- ----
0 0 0
1 1 2
2 2 4
Notes
-----
Calling this method with no keywords will clear all footers from all columns.
See Also
--------
Dataset.footer_set_values()
"""
if self.footers is None:
return
# get list of existing, or use all footer labels if not specified
labels = self._footers_exist(labels)
if labels is None:
return
remove_all = False
# remove from all columns
if columns is None:
remove_all = True
columns = self.keys()
else:
# remove from specific columns
if not isinstance(columns, list):
columns = [columns]
# prevent partial footers from being removed
self._ensure_atomic(columns, self.footer_remove)
# pop value from each column's footer dict
for colname in columns:
coldict = self.col_get_attribute(colname, 'Footer')
if coldict is None:
continue
for label in labels:
coldict.pop(label,None)
# if removed from all columns, remove name from master footer row
if remove_all:
for label in labels:
del self.footers[label]
# None left, remove for future display
if len(self.footers)==0:
del self.__dict__['_footers']
# -------------------------------------------------------
def footer_get_values(self, labels=None, columns=None, fill_value=None):
"""
Dictionary of footer rows. Missing footer values will be returned as None.
Parameters
----------
labels : list, optional
Footer rows to return values for. If not provided, all footer rows will be returned.
columns : list, optional
Columns to return footer values for. If not provided, all column footers will be returned.
fill_value : optional, default None
Value to use when no footer is found.
Examples
--------
>>> ds = rt.Dataset({'colA': rt.arange(5), 'colB': rt.arange(5), 'colC': rt.arange(5)})
>>> ds.footer_set_values('row1', {'colA':1, 'colC':2})
>>> ds.footer_get_values()
{'row1': [1, None, 2]}
>>> ds.footer_get_values(columns=['colC','colA'])
{'row1': [2, 1]}
>>> ds.footer_remove()
>>> ds.footer_get_values()
{}
Returns
-------
footers : dictionary
Keys are footer row names.
Values are lists of footer values or None, if missing.
"""
if self.footers is None:
return {}
labels = self._footers_exist(labels)
if labels is None:
return {}
if columns is None:
columns = self.keys()
if not isinstance(columns, list):
columns = [columns]
footerdict = { fname:[] for fname in labels }
for colname in columns:
coldict = self.col_get_attribute(colname, 'Footer')
# column had no footers, fill with None
if coldict is None:
for v in footerdict.values():
v.append(fill_value)
else:
for k, v in footerdict.items():
v.append(coldict.get(k, fill_value))
return footerdict
# -------------------------------------------------------
def footer_get_dict(self, labels=None, columns=None):
"""
Dictionary of footer rows, the latter in dictionary form.
Parameters
----------
labels : list, optional
Footer rows to return values for. If not provided, all footer rows will be returned.
columns : list of str, optional
Columns to return footer values for. If not provided, all column footers will be returned.
Examples
--------
>>> ds = rt.Dataset({'colA': rt.arange(5), 'colB': rt.arange(5), 'colC': rt.arange(5)})
>>> ds.footer_set_values('row1', {'colA':1, 'colC':2})
>>> ds.footer_get_dict()
{'row1': {'colA': 1, 'colC': 2}}
>>> ds.footer_get_dict(columns=['colC','colA'])
{'row1': [2, 1]}
>>> ds.footer_remove()
>>> ds.footer_get_dict()
{}
Returns
-------
footers : dictionary
Keys are footer row names.
Values are dictionaries of column name and value pairs.
"""
if self.footers is None:
return {}
labels = self._footers_exist(labels)
if labels is None:
return {}
if columns is None:
columns = self.keys()
if not isinstance(columns, list):
columns = [columns]
footerdict = { fname:{} for fname in labels }
for colname in columns:
coldict = self.col_get_attribute(colname, 'Footer')
# column had no footers, fill with None
if coldict is not None:
for k, d in footerdict.items():
v = coldict.get(k, None)
if v:
d[colname] = v
return footerdict
# -------------------------------------------------------
def footer_set_values(self, label:str, footerdict) -> None:
"""Assign footer values to specific columns.
Parameters
----------
label : string
Name of existing or new footer row.
This string will appear as a label on the left, below the right-most label key or row numbers.
footerdict : dictionary
Keys are valid column names (otherwise raises ValueError).
Values are scalars. They will appear as a string with their default type formatting.
Returns
-------
None
Examples
--------
>>> ds = rt.Dataset({'colA': rt.arange(3), 'colB': rt.arange(3)*2})
>>> ds.footer_set_values('sum', {'colA':3, 'colB':6})
>>> ds
# colA colB
--- ---- ----
0 0 0
1 1 2
2 2 4
--- ---- ----
sum 3 6
>>> ds.colC = rt.ones(3)
>>> ds.footer_set_values('mean', {'colC': 1.0})
>>> ds
# colA colB colC
---- ---- ---- ----
0 0 0 1.00
1 1 2 1.00
2 2 4 1.00
---- ---- ---- ----
sum 3 6
mean 1.00
Notes
-----
- Not all footers need to be set. Missing footers will appear as blank in final display.
- Footers will appear in dataset slices as they do in the original dataset.
- If the footer is a column total, it may need to be recalculated.
- This routine can also be used to replace existing footers.
See Also
--------
Dataset.footer_remove()
"""
if not isinstance(label, str):
raise TypeError(f"Footer labels must be string values, got {type(label)}")
if not isinstance(footerdict, dict):
raise TypeError(f"Footer mapping must be a dictionary of column names -> footer values for specified label {label}. Got {type(footerdict)}.")
# prevent partial footers from being set
self._ensure_atomic(footerdict,self.footer_set_values)
if self.footers is None:
# use a dict so footer row order is preserved
self._footers = dict()
self._footers[label]=None
for colname, value in footerdict.items():
coldict = self.col_get_attribute(colname, 'Footer')
# create a new footer dict
if coldict is None:
coldict = {label:value}
self.col_set_attribute(colname, 'Footer', coldict)
# modify existing footer dict
else:
coldict[label]=value
# -------------------------------------------------------
def _prepare_display_data(self):
"""Prepare column headers, arrays, and column footers for display.
Arrays will be aranged in order: Labels, sort columns, regular columns, right columns.
"""
header_tups = None
footer_tups = None
array_data = None
leftkeys = self.label_get_names()
# no labels
if len(leftkeys) == 0:
leftcols = []
# no row numbers callback
if self._row_numbers is None:
# use default row number header
leftkeys = ['#']
else:
leftcols = [self[k] for k in leftkeys]
sortkeys = []
# col_sortlist might still be set even though sorts are off
# only pull it if sorts are on
if self._sort_display:
if self._col_sortlist is not None:
sortkeys = self._col_sortlist
sortcols = [self[k] for k in sortkeys]
rightkeys = self.summary_get_names()
rightcols = [self[k] for k in rightkeys]
mainkeys = [c for c in self if c not in leftkeys and c not in rightkeys and c not in sortkeys]
maincols = [self[k] for k in mainkeys]
footers = self.footers
cols_with_footer = sortkeys + mainkeys + rightkeys
if footers is not None:
# create row for each footer label
footerkeys = [*footers]
# align footer label with right-most label column or row number column
# assume not displaying label footers for now
numleft = len(leftcols)
if numleft < 2:
padding = []
else:
# pad each row
padding = [''] * (numleft-1)
cols_with_footer = sortkeys + mainkeys + rightkeys
footerdict = self.footer_get_values(columns=cols_with_footer, fill_value='')
# lists for each footer row, empty string for blanks
footerrows = [padding + [rowname] + footervals for rowname, footervals in footerdict.items()]
# column footer tuples with string repr of each value
footer_tups = [[ ColHeader(format_scalar(fval),1,0) for fval in frow] for frow in footerrows]
# build all column header tuples
allkeys = leftkeys + cols_with_footer
header_tups = [[ ColHeader(k,1,0) for k in allkeys ]]
# all arrays in one list
array_data = leftcols + sortcols + maincols + rightcols
return header_tups, array_data, footer_tups
# -------------------------------------------------------
def __str__(self):
return self.make_table(DS_DISPLAY_TYPES.STR)
# -------------------------------------------------------
def __repr__(self):
#if Struct._lastreprhtml != 0 and Struct._lastrepr > Struct._lastreprhtml and TypeRegister.DisplayOptions.HTML_DISPLAY:
# # this is an ODD condition
# print("HMTL is on, but repr called back to back. consider rt.Display.display_html(False)")
Struct._lastrepr =GetTSC()
# this will be called before _repr_html_ in jupyter
if TypeRegister.DisplayOptions.HTML_DISPLAY is False:
result= self.make_table(DS_DISPLAY_TYPES.STR)
# always turn off sorting once displayed
self.sorts_off()
else:
result =self.make_table(DS_DISPLAY_TYPES.REPR)
return result
# -------------------------------------------------------
def _repr_html_(self):
Struct._lastreprhtml =GetTSC()
if TypeRegister.DisplayOptions.HTML_DISPLAY is False:
plainstring = self.make_table(DS_DISPLAY_TYPES.STR)
# TJD this is a hack that needs to be reviewed
# Believe it exists to display ds in a list
print(DisplayString(plainstring))
# jupyter lab will turn plain string into non-monospace font
result = ""
else:
result =self.make_table(DS_DISPLAY_TYPES.HTML)
# always turn off sorting once displayed
self.sorts_off()
return result
# -------------------------------------------------------
def add_matrix(self, arr, names: Optional[List[str]] = None) -> None:
"""
Add a 2-dimensional matrix as columns in a dataset.
Parameters
----------
arr : 2-d ndarray
names : list of str, optional
optionally provide column names
"""
if names is not None:
if arr.shape[1] != len(names):
raise ValueError(f'Provided names must match number of columns.')
else:
names = ['col_'+str(i) for i in range(arr.shape[1])]
arr = arr.T
for idx, name in enumerate(names):
if name in self:
warnings.warn(f"Overwriting column named {name}.")
setattr(self, name, arr[idx])
# -------------------------------------------------------
def transpose(self, colnames: Optional[List[str]] = None, cats: bool = False, gb: bool = False, headername: str = 'Col') -> 'Dataset':
"""
Return a transposed version of the Dataset.
Parameters
----------
colnames : list of str, optional
Set to list of colnames you want transposed; defaults to None, which means all columns are included.
cats : bool
Set to True to include Categoricals in transposition. Defaults to False.
gb : bool
Set to True to include groupby keys (labels) in transposition. Defaults to False.
headername : str
The name of the column which was once all the column names. Defaults to 'Col'.
Returns
-------
Dataset
A transposed version of this Dataset instance.
"""
def col_as_string(colname):
c = self[colname]
if isinstance(c, TypeRegister.Categorical):
# todo should use expand_dict or categoricals should have a new routine
return c.expand_array
else:
return c.astype('U')
oldlabels = self.label_get_names()
# first homogenize all the data to same dtype, and make 2d matrix
t_array, colnames = self.imatrix_make(colnames =colnames, cats=cats, gb=gb, inplace=False, retnames=True)
# rotate the matrix 90
t_array = t_array.transpose()
# the column names are now the rownames
tds = Dataset({headername:colnames})
numcols = t_array.shape[1]
if len(oldlabels) == 0:
# Just label all the column C0, C1, C2, etc.
colnames = 'C' + arange(numcols).astype('U')
else:
# handle multikey with _ separator
colnames = col_as_string(oldlabels[0])
for i in range(1,len(oldlabels)):
colnames = colnames + '_' + col_as_string(oldlabels[i])
# extract each column in the 2d matrix
for i in range(numcols):
tds[colnames[i]] = t_array[:,i]
# takes the column names running horiz, and makes them vertical
tds.label_set_names([headername])
return tds
# -------------------------------------------------------
def show_all(self, max_cols: int = 8) -> None:
"""
Display all rows and up to the specified number of columns.
Parameters
----------
max_cols : int
The maximum number of columns to display.
Notes
-----
TODO: This method currently displays the data using 'print'; it should be deprecated or adapted
to use our normal display code so it works e.g. in a Jupyter notebook.
"""
i = 0
num_cols = self.get_ncols()
while i < num_cols:
print(self[:, i:i + max_cols])
i += max_cols
# -------------------------------------------------------
def sample(
self, N: int = 10, filter: Optional[np.ndarray] = None,
seed: Optional[Union[int, Sequence[int], np.random.SeedSequence, np.random.Generator]] = None
) -> 'Dataset':
"""
Select N random samples from `Dataset` or `FastArray`.
Parameters
----------
N : int, optional, defaults to 10
Number of rows to sample.
filter : array-like (bool or rownums), optional, defaults to None
Filter for rows to sample.
seed : {None, int, array_like[ints], SeedSequence, Generator}, optional, defaults to None
A seed to initialize the `Generator`. If None, the generator is initialized using
fresh, random entropy data gathered from the OS.
See the docstring for `np.random.default_rng` for additional details.
Returns
-------
Dataset
"""
return sample(self, N=N, filter=filter, seed=seed)
# -------------------------------------------------------
def _get_columns(self, cols: Union[str, Iterable[str]]) -> List[FastArray]:
"""internal routine used to create a list of one or more columns"""
if not isinstance(cols, list):
if isinstance(cols, str):
cols=[cols]
else:
raise TypeError(f'The argument for accum2 or cat must be a list of column name(s) or a single column name.')
cols = [self[colname] for colname in cols]
return cols
# -------------------------------------------------------
def _makecat(self, cols):
if not isinstance(cols, np.ndarray):
cols = self._get_columns(cols)
# if just one item in the list, extract it
if len(cols)==1:
cols = cols[0]
return cols
# -------------------------------------------------------
def cat(self, cols: Union[str, Iterable[str]], **kwargs) -> 'Categorical':
"""
Parameters
----------
cols : str or list of str
A single column name or list of names to indicate which columns to build the categorical from
or a numpy array to build the categoricals from
kwargs : any valid keywords in the categorical constructor
Returns
-------
Categorical
A categorical with dataset set to self for groupby operations.
Examples
--------
>>> np.random.seed(12345)
>>> ds = rt.Dataset({'strcol': np.random.choice(['a','b','c'],4), 'numcol': rt.arange(4)})
>>> ds
# strcol numcol
- ------ ------
0 c 0
1 b 1
2 b 2
3 a 3
>>> ds.cat('strcol').sum()
*strcol numcol
------- ------
a 3
b 3
c 0
"""
cols = self._makecat(cols)
if not isinstance(cols, TypeRegister.Categorical):
cols = TypeRegister.Categorical(cols, **kwargs)
cols._dataset = self
return cols
# -------------------------------------------------------
def cat2keys(
self,
cat_rows: Union[str, List[str]],
cat_cols: Union[str, List[str]],
filter: Optional[np.ndarray] = None,
ordered: bool = True,
sort_gb: bool = False,
invalid: bool = False,
fuse: bool = False
) -> 'Categorical':
"""
Creates a :class:`~rt.rt_categorical.Categorical` with two sets of keys which have all possible unique combinations.
Parameters
----------
cat_rows : str or list of str
A single column name or list of names to indicate which columns to build the categorical from
or a numpy array to build the categoricals from.
cat_cols : str or list of str
A single column name or list of names to indicate which columns to build the categorical from
or a numpy array to build the categoricals from.
filter : ndarray of bools, optional
only valid when invalid is set to True
ordered : bool, default True
only applies when `key1` or `key2` is not a categorical
sort_gb : bool, default False
only applies when `key1` or `key2` is not a categorical
invalid : bool, default False
Specifies whether or not to insert the invalid when creating the n x m unique matrix.
fuse : bool, default False
When True, forces the resulting categorical to have 2 keys, one for rows, and one for columns.
Returns
-------
Categorical
A categorical with at least 2 keys dataset set to self for groupby operations.
Examples
--------
>>> ds = rt.Dataset({_k: list(range(_i * 2, (_i + 1) * 2)) for _i, _k in enumerate(["alpha", "beta", "gamma"])}); ds
# alpha beta gamma
- ----- ---- -----
0 0 2 4
1 1 3 5
[2 rows x 3 columns] total bytes: 24.0 B
>>> ds.cat2keys(['alpha', 'beta'], 'gamma').sum(rt.arange(len(ds)))
*alpha *beta *gamma col_0
------ ----- ------ -----
0 2 4 0
1 3 4 0
0 2 5 0
1 3 5 1
[4 rows x 4 columns] total bytes: 80.0 B
See Also
--------
rt_numpy.cat2keys
rt_dataset.accum2
"""
cat_rows = self._makecat(cat_rows)
cat_cols = self._makecat(cat_cols)
result = cat2keys(cat_rows, cat_cols, filter = filter, ordered=ordered, sort_gb=sort_gb, invalid=invalid, fuse=fuse)
result._dataset = self
return result
# -------------------------------------------------------
def accum1(self, cat_rows: List[str], filter=None, showfilter:bool=False, ordered:bool=True, **kwargs) -> GroupBy:
"""
Returns the :class:`~rt.rt_groupby.GroupBy` object constructed from the Dataset
with a 'Totals' column and footer.
Parameters
----------
cat_rows : list of str
The list of column names to group by on the row axis. These columns will be
made into a :class:`~rt.rt_categorical.Categorical`.
filter : ndarray of bools, optional
This parameter is unused.
showfilter : bool, default False
This parameter is unused.
ordered : bool, default True
This parameter is unused.
sort_gb : bool, default True
Set to False to change the display order.
kwargs
May be any of the arguments allowed by the Categorical constructor
Returns
-------
GroupBy
Examples
--------
>>> ds.accum1('symbol').sum(ds.TradeSize)
"""
cat_rows = self.cat(cat_rows)
return GroupBy(self, cat_rows, totals=True, **kwargs)
# -------------------------------------------------------
def accum2(
self, cat_rows, cat_cols, filter=None, showfilter: bool = False,
ordered: Optional[bool] = None, lex: Optional[bool] = None, totals: bool = True
) -> 'Accum2':
"""
Returns the Accum2 object constructed from the dataset.
Parameters
----------
cat_rows : list
The list of column names to group by on the row axis. This will be made into a categorical.
cat_cols : list
The list of column names to group by on the column axis. This will be made into a categorical.
filter
TODO
showfilter : bool
Used in Accum2 to show filtered out data.
ordered : bool, optional
Defaults to None. Set to True or False to change the display order.
lex : bool
Defaults to None. Set to True for high unique counts. It will override `ordered` when set to True.
totals : bool, default True
Set to False to not show Total column.
Returns
-------
Accum2
Examples
--------
>>> ds.accum2('symbol', 'exchange').sum(ds.TradeSize)
>>> ds.accum2(['symbol','exchange'], 'date', ordered=True).sum(ds.TradeSize)
"""
cat_rows = self.cat(cat_rows, ordered=ordered, lex=lex)
cat_cols = self.cat(cat_cols, ordered=ordered, lex=lex)
# calling with rows, cols to match unstack() more closely
result = TypeRegister.Accum2(cat_rows, cat_cols, filter= filter, showfilter = showfilter, ordered=ordered, totals=totals)
# attach dataset to accum2 object so argument can be ommitted during calculation
result._dataset = self
return result
# -------------------------------------------------------
def groupby(self, by: Union[str, List[str]], **kwargs) -> GroupBy:
"""
Returns an :class:`~rt.rt_groupby.GroupBy` object constructed from the dataset.
This function can accept any keyword arguments (in `kwargs`) allowed by the :class:`~rt.rt_groupby.GroupBy` constructor.
Parameters
----------
by: str or list of str
The list of column names to group by
Other Parameters
----------------
filter: ndarray of bool
Pass in a boolean array to filter data. If a key no longer exists after filtering
it will not be displayed.
sort_display : bool
Defaults to True. set to False if you want to display data in the order of appearance.
lex : bool
When True, use a lexsort to the data.
Returns
-------
GroupBy
Examples
--------
All calculations from GroupBy objects will return a Dataset. Operations can be called in the following ways:
Initialize dataset and groupby a single key:
>>> #TODO: Need to call np.random.seed(12345) here to deterministically init the RNG used below
>>> d = {'strings':np.random.choice(['a','b','c','d','e'], 30)}
>>> for i in range(5): d['col'+str(i)] = np.random.rand(30)
>>> ds = rt.Dataset(d)
>>> gb = ds.groupby('strings')
Perform operation on all columns:
>>> gb.sum()
*strings col0 col1 col2 col3 col4
-------- ---- ---- ---- ---- ----
a 2.67 3.35 3.74 3.46 4.20
b 1.36 1.53 2.59 1.24 0.73
c 3.91 2.00 2.76 2.62 2.10
d 4.76 5.13 4.30 3.46 2.21
e 4.18 2.86 2.95 3.22 3.14
Perform operation on a single column:
>>> gb['col1'].mean()
*strings col1
-------- ----
a 0.48
e 0.38
d 0.40
d 0.64
c 0.48
Perform operation on multiple columns:
>>> gb[['col1','col2','col4']].min()
*strings col1 col2 col4
-------- ---- ---- ----
a 0.05 0.03 0.02
e 0.02 0.24 0.02
d 0.03 0.15 0.16
d 0.17 0.19 0.05
c 0.00 0.03 0.28
Perform specific operations on specific columns:
>>> gb.agg({'col1':['min','max'], 'col2':['sum','mean']})
col1 col2
*strings Min Max Sum Mean
-------- ---- ---- ---- ----
a 0.05 0.92 3.74 0.53
b 0.02 0.72 2.59 0.65
c 0.03 0.73 2.76 0.55
d 0.17 0.96 4.30 0.54
e 0.00 0.82 2.95 0.49
GroupBy objects can also be grouped by multiple keys:
>>> gbmk = ds.groupby(['strings', 'col1'])
>>> gbmk
*strings *col1 Count
-------- ----- -----
a 0.05 1
. 0.11 1
. 0.16 1
. 0.55 1
. 0.69 1
... ...
e 0.33 1
. 0.36 1
. 0.68 1
. 0.68 1
. 0.82 1
"""
return GroupBy(self, by, **kwargs)
# -------------------------------------------------------
def gb(self, by, **kwargs):
"""Equivalent to :meth:`~rt.rt_dataset.Dataset.groupby`"""
return self.groupby(by, **kwargs)
# -------------------------------------------------------
def gbu(self, by, **kwargs):
"""Equivalent to :meth:`~rt.rt_dataset.Dataset.groupby` with sort=False"""
kwargs['sort_display'] = False
return self.groupby(by, **kwargs)
#--------------------------------------------------------------------------
def gbrows(self, strings:bool=False, dtype=None, **kwargs) -> GroupBy:
"""
Create a GroupBy object based on "computable" rows or string rows.
Parameters
----------
strings : bool
Defaults to False. Set to True to process strings.
dtype : str or numpy.dtype, optional
Defaults to None. When set, all columns will be cast to this dtype.
kwargs
Any other kwargs will be passed to ``groupby()``.
Returns
-------
GroupBy
Examples
--------
>>> ds = rt.Dataset({'a': rt.arange(3), 'b': rt.arange(3.0), 'c':['Jim','Jason','John']})
>>> ds.gbrows()
GroupBy Keys ['RowNum'] @ [2 x 3]
ikey:True iFirstKey:False iNextKey:False nCountGroup:False _filter:False _return_all:False
<BLANKLINE>
*RowNum Count
------- -----
0 2
1 2
2 2
>>> ds.gbrows().sum()
*RowNum Row
------- ----
0 0.00
1 2.00
2 4.00
<BLANKLINE>
[3 rows x 2 columns] total bytes: 36.0 B
Example usage of the string-processing mode of ``gbrows()``:
>>> ds.gbrows(strings=True)
GroupBy Keys ['RowNum'] @ [2 x 3]
ikey:True iFirstKey:False iNextKey:False nCountGroup:False _filter:False _return_all:False
<BLANKLINE>
*RowNum Count
------- -----
0 1
1 1
2 1
"""
if strings:
rowlist = list(self.noncomputable().values())
else:
rowlist = list(self.computable().values())
# use our hstack
hs = hstack(rowlist, dtype=dtype)
#create a categorical of integers so we can group by
arng = arange(self._nrows)
cat = TypeRegister.Categorical(tile(arng, len(rowlist)), arng, base_index=0)
#create a dataset with two columns
ds=Dataset({'Row':hs,'RowNum':cat})
return ds.groupby('RowNum', **kwargs)
# -------------------------------------------------------
# Reduction functions.
def reduce(self, func, axis: Optional[int] = 0, as_dataset: bool = True, fill_value=None, **kwargs) -> Union['Dataset', Struct, FastArray, np.generic]:
"""
Returns calculated reduction along axis.
.. note::
Behavior for ``axis=None`` differs from pandas!
The default `fill_value` is ``None`` (drop) to ensure the most sensible default
behavior for ``axis=None`` and ``axis=1``. As a thought problem, consider all
three axis behaviors for func=sum or product.
Parameters
----------
func : reduction function (e.g. numpy.sum, numpy.std, ...)
axis : int, optional
* 0: reduce over columns, returning a Struct (or Dataset) of scalars.
Reasonably cheap. String synonyms: ``c``, ``C``, ``col``, ``COL``, ``column``, ``COLUMN``.
* 1: reduce over rows, returning an array of scalars.
Could well be expensive/slow. String synonyms: ``r``, ``R``, ``row``, ``ROW``.
* ``None``: reduce over rows and columns, returning a scalar.
Could well be very expensive/slow. String synonyms: ``all``, ``ALL``.
as_dataset : bool
When `axis` is 0, this flag specifies a Dataset should be returned instead of a Struct. Defaults to False.
fill_value
* fill_value=None (default) -> drop all non-computable type columns from result
* fill_value=alt_func -> force computation with alt_func
(for axis=1 must work on indiv. elements)
* fill_value=scalar -> apply as uniform fill value
* fill_value=dict (defaultdict) of colname->fill_value, where
None (or absent if not a defaultdict) still means drop column
and an alt_func still means force compute via alt_func.
kwargs
all other kwargs are passed to `func`
Returns
-------
Struct or Dataset or array or scalar
"""
def _reduce_fill_values( fill_value):
"""
return two lists:
fvals: set to None if computable, set to fill value if noncomputable
noncomp: set to True if not computable, otherwise False
"""
noncomp = [False] * self.get_ncols()
fvals = [None] * self.get_ncols()
for colnum, colname in enumerate(self.keys()):
_v = self.col_get_value(colname)
if not _v.iscomputable():
noncomp[colnum] = True
if isinstance(fill_value, dict):
# try/catch instead of get() to support defaultdict usage
try:
fvals[colnum] = fill_value[colname]
except KeyError:
pass
else:
fvals[colnum] = fill_value
return fvals, noncomp
axis = self._axis_key(axis)
cond_rtn_type = type(self) if as_dataset else Struct
fvals, noncomp = _reduce_fill_values(fill_value)
if axis == 0:
od = {}
# remove axis from kwargs
kwargs.pop('axis', None)
for _i, _k in enumerate(self.keys()):
_v = self.col_get_value(_k)
#print("func", func, 'colname', _k, 'dtype', _v.dtype, "v", _v, "kwargs:", kwargs)
# not all arrays are computable, such as the std of a string array
fval = fvals[_i]
if not noncomp[_i]:
od[_k] = func(_v, **kwargs)
elif callable(fval):
od[_k] = fval(_v, **kwargs)
elif fval is not None:
od[_k] = fval
return cond_rtn_type(od)
if axis == 1:
if fill_value is None:
# new fast path
return func(self.imatrix_make(), axis=1, **kwargs)
if not any(noncomp):
# does not respect noncomputable cols.
# 2.74 ms ± 6.18 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
# return np.array([func(np.array(self[_r, :].tolist()), **kwargs) for _r in range(self.get_nrows())])
# 267 µs ± 2 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
return FastArray([func(_r, **kwargs) for _r in self.asrows(as_type='array')])
# respects noncomputable cols.
# 448 µs ± 1.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
def _row(_i):
_r = [arr[_i] for arr in self.values()]
_keep = np.ones(len(_r), dtype=bool)
for _i, _nc in enumerate(noncomp):
if _nc:
fval = fvals[_i]
if callable(fval):
_r[_i] = fval(_r[_i], **kwargs)
elif fval is not None:
_r[_i] = fval
else:
_keep[_i] = False
if _keep.all():
return _r
return [_x for _i, _x in enumerate(_r) if _keep[_i]] # cannot use np.take!!!
# TJD this code is slow and needs review
return np.array([func(_row(_i), **kwargs) for _i in range(self.get_nrows())])
if axis is None:
if not any(noncomp):
# does not respect noncomputable cols.
# np.ravel doc suggests this to be the most likely to be efficient
# 34.9 µs ± 57.9 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
return func(np.reshape([self.col_get_value(_k) for _k in self.keys()], -1), **kwargs)
# respects noncomputable cols.
# 290 µs ± 1.86 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
bycols = self.reduce(func, axis=0, as_dataset=True, fill_value=fill_value, **kwargs)
return func(np.array(list(bycols.values())))
raise NotImplementedError('Dataset.reduce(axis=<0, 1, None>)')
def argmax(self, axis=0, as_dataset=True, fill_value=None):
return self.reduce(argmax, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def argmin(self, axis=0, as_dataset=True, fill_value=None):
return self.reduce(argmin, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def normalize_zscore(self, axis=0, as_dataset=True, fill_value=None):
return self.reduce(normalize_zscore, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def normalize_minmax(self, axis=0, as_dataset=True, fill_value=None):
return self.reduce(normalize_minmax, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def sum(self, axis=0, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(sum, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def mean(self, axis=0, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(mean, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def var(self, axis=0, ddof=1, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(var, axis=axis, as_dataset=as_dataset, fill_value=fill_value, ddof=ddof)
def std(self, axis=0, ddof=1, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(std, axis=axis, as_dataset=as_dataset, fill_value=fill_value, ddof=ddof)
def median(self, axis=0, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(median, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def min(self, axis=0, as_dataset=True, fill_value=min):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(min, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def max(self, axis=0, as_dataset=True, fill_value=max):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(max, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def count(self, axis=0, as_dataset=True, fill_value=len):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
# We should have another counting the non-no-data elements, but need to wait on safe-arrays.
return self.reduce(len, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
#---NAN FUNCS--------------------------------------------------------------
def nanargmax(self, axis=0, as_dataset=True, fill_value=None):
return self.reduce(nanargmax, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def nanargmin(self, axis=0, as_dataset=True, fill_value=None):
return self.reduce(nanargmin, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def nansum(self, axis=0, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(nansum, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def nanmean(self, axis=0, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(nanmean, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def nanvar(self, axis=0, ddof=1, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(nanvar, axis=axis, as_dataset=as_dataset, fill_value=fill_value, ddof=ddof)
def nanstd(self, axis=0, ddof=1, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(nanstd, axis=axis, as_dataset=as_dataset, fill_value=fill_value, ddof=ddof)
def nanmedian(self, axis=0, as_dataset=True, fill_value=None):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(nanmedian, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def nanmin(self, axis=0, as_dataset=True, fill_value=min):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(nanmin, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
def nanmax(self, axis=0, as_dataset=True, fill_value=max):
"""See documentation of :meth:`~rt.rt_dataset.Dataset.reduce`"""
return self.reduce(nanmax, axis=axis, as_dataset=as_dataset, fill_value=fill_value)
#--------------------------------------------------------------------------
def quantile(self, q: Optional[List[float]] = None, fill_value=None):
"""
Parameters
----------
q: defaults to [0.50], list of quantiles
fill_value: optional place-holder value for non-computable columns
Returns
-------
Dataset.
"""
if q is None:
q = [0.50]
# TODO NW Should be a String
labels = np.asanyarray(q)
if not isinstance(fill_value, (list, np.ndarray, dict, type(None))):
fill_value = [fill_value] * len(labels)
retval = self.reduce(quantile, q=q, as_dataset=True, fill_value=fill_value)
retval.Stats = labels
retval.col_move_to_front(['Stats'])
retval.label_set_names(['Stats'])
return retval
#--------------------------------------------------------------------------
def describe(self, q: Optional[List[float]] = None, fill_value = None) -> 'Dataset':
"""
Similar to pandas describe; columns remain stable, with extra column (Stats) added for names.
.. Caution:: This routine can be expensive if the dataset is large.
Parameters
----------
q : list of float, optional
List of quantiles to calculate.
If not specified, defaults to ``[0.10, 0.25, 0.50, 0.75, 0.90]``.
fill_value: optional
Optional place-holder value for non-computable columns.
Returns
-------
Dataset
A Dataset containing the calculated, per-column quantile values.
See Also
--------
FastArray.describe()
"""
return describe(self, q=q, fill_value=fill_value)
#--------------------------------------------------------------------------
def melt(self, id_vars=None, value_vars=None, var_name:Optional[str]=None, value_name:str='value', trim:bool=False) -> 'Dataset':
"""
"Unpivots" a Dataset from wide format to long format, optionally leaving identifier
variables set.
This function is useful to massage a Dataset into a format where one or more columns
are identifier variables (id_vars), while all other columns, considered measured variables
(value_vars), are "unpivoted" to the row axis, leaving just two non-identifier columns,
'variable' and 'value'.
Parameters
----------
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that are not set as id_vars.
var_name : str, optional
Name to use for the 'variable' column. If None it uses 'variable'.
value_name : str
Name to use for the 'value' column. Defaults to 'value'.
trim : bool
defaults to False. Set to True to drop zeros or nan (trims a dataset)
Notes
-----
BUG: the current version does not handle categoricals correctly.
"""
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
else:
value_vars = list(value_vars)
tempdict = self[id_vars + value_vars].asdict()
else:
tempdict = self.asdict()
if var_name is None:
var_name = 'variable'
N = self._nrows
K = len(tempdict) - len(id_vars)
#create an empty dataset
mdata = type(self)({})
# reexpand any categoricals
for col in id_vars:
id_data = tempdict.pop(col)
if TypeRegister.is_binned_array(id_data):
# note: multikey categorical expands to a tuple of arrays
# previously raised an error on expand array
id_data = id_data.expand_array
mdata[col] = np.tile(id_data._np,K)
mdata[var_name] = FastArray(list(tempdict.keys())).repeat(N)
mdata[value_name] = hstack(tempdict.values())
if trim:
goodmask = ~mdata[value_name].isnanorzero()
mdata=mdata[goodmask,:]
return mdata
#--------------------------------------------------------------------------
@classmethod
def hstack(cls, ds_list, destroy: bool = False) -> 'Dataset':
"""
Stacks columns from multiple datasets.
See Also
--------
Dataset.concat_rows
"""
return cls.concat_rows(ds_list, destroy=destroy)
#--------------------------------------------------------------------------
@classmethod
def concat_rows(cls, ds_list: Iterable['Dataset'], destroy: bool = False) -> 'Dataset':
"""
Stacks columns from multiple datasets.
If a dataset is missing a column that appears in others, it will fill the gap with the default invalid for that column's type.
Categoricals will be merged and stacked.
Column types will be checked to make sure they can be safely stacked - no general type mismatch allowed.
Columns of the same name must have the same number of dimension in each dataset (1 or 2 dimensions allowed)
Parameters
----------
ds_list : iterable of Dataset
The Datasets to be concatenated
destroy : bool
Set to True to destroy any dataset in the list to save memory. Defaults to False.
Returns
-------
Dataset
A new Dataset created from the concatenated rows of the input Datasets.
Examples
--------
Basic:
>>> ds1 = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(3)})
>>> ds2 = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(3)})
>>> ds1
# col_0 col_1 col_2
- ----- ----- -----
0 0.39 0.80 0.64
1 0.54 0.80 0.36
2 0.14 0.75 0.86
3 0.05 0.61 0.95
4 0.37 0.39 0.03
>>> ds2
# col_0 col_1 col_2
- ----- ----- -----
0 0.09 0.75 0.37
1 0.90 0.34 0.17
2 0.52 0.32 0.78
3 0.37 0.20 0.34
4 0.73 0.69 0.41
>>> rt.Dataset.concat_rows([ds1, ds2])
# col_0 col_1 col_2
- ----- ----- -----
0 0.39 0.80 0.64
1 0.54 0.80 0.36
2 0.14 0.75 0.86
3 0.05 0.61 0.95
4 0.37 0.39 0.03
5 0.09 0.75 0.37
6 0.90 0.34 0.17
7 0.52 0.32 0.78
8 0.37 0.20 0.34
9 0.73 0.69 0.41
With columns missing in one from some datasets:
>>> ds1 = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(3)})
>>> ds2 = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(2)})
>>> rt.Dataset.concat_rows([ds1, ds2])
# col_0 col_1 col_2
- ----- ----- -----
0 0.78 0.64 0.98
1 0.61 0.87 0.85
2 0.57 0.42 0.90
3 0.82 0.50 0.60
4 0.19 0.16 0.23
5 0.69 0.83 nan
6 0.07 0.82 nan
7 0.58 0.34 nan
8 0.69 0.38 nan
9 0.89 0.07 nan
With categorical column:
>>> ds1 = rt.Dataset({'cat_col': rt.Categorical(['a','a','b','c','a']),
... 'num_col': np.random.rand(5)})
>>> ds2 = rt.Dataset({'cat_col': rt.Categorical(['b','b','a','c','d']),
... 'num_col': np.random.rand(5)})
>>> rt.Dataset.concat_rows([ds1, ds2])
# cat_col num_col
- ------- -------
0 a 0.38
1 a 0.71
2 b 0.84
3 c 0.47
4 a 0.18
5 b 0.18
6 b 0.47
7 a 0.16
8 c 0.96
9 d 0.88
Multiple dimensions (note: numpy v-stack will be used to concatenate 2-dimensional columns):
>>> ds1 = rt.Dataset({'nums': rt.ones((4,4))})
>>> ds1
# nums
- ------------------------
0 [1.00, 1.00, 1.00, 1.00]
1 [1.00, 1.00, 1.00, 1.00]
2 [1.00, 1.00, 1.00, 1.00]
3 [1.00, 1.00, 1.00, 1.00]
>>> ds2 = rt.Dataset({'nums': rt.zeros((4,4))})
>>> ds2
# nums
- ------------------------
0 [0.00, 0.00, 0.00, 0.00]
1 [0.00, 0.00, 0.00, 0.00]
2 [0.00, 0.00, 0.00, 0.00]
3 [0.00, 0.00, 0.00, 0.00]
>>> rt.Dataset.concat_rows([ds1, ds2])
# nums
- ------------------------
0 [1.00, 1.00, 1.00, 1.00]
1 [1.00, 1.00, 1.00, 1.00]
2 [1.00, 1.00, 1.00, 1.00]
3 [1.00, 1.00, 1.00, 1.00]
4 [0.00, 0.00, 0.00, 0.00]
5 [0.00, 0.00, 0.00, 0.00]
6 [0.00, 0.00, 0.00, 0.00]
7 [0.00, 0.00, 0.00, 0.00]
Multiple dimensions with missing columns (sentinels/invalids will be flipped to final vstack dtype)
>>> ds1 = rt.Dataset({'nums': rt.ones((5,5)), 'nums2': rt.zeros((5,5), dtype=np.float64)})
>>> ds2 = rt.Dataset({'nums': rt.ones((5,5))})
>>> ds3 = rt.Dataset({'nums': rt.ones((5,5)), 'nums2': rt.zeros((5,5), dtype=np.int8)})
>>> rt.Dataset.concat_rows([ds1, ds2, ds3])
# nums nums2
-- ------------------------------ ------------------------------
0 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
1 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
2 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
3 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
4 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
5 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
6 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
7 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
8 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
9 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
10 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
11 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
12 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
13 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
14 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
"""
return hstack_any(ds_list, cls, Dataset, destroy=destroy)
#--------------------------------------------------------------------------
@classmethod
def concat_columns(cls, dsets, do_copy:bool, on_duplicate:str='raise', on_mismatch:str='warn'):
r"""
Concatenates a list of Datasets or Structs horizontally.
Parameters
----------
cls : class
The class (Dataset)
dsets : iterable
An iterable of Datasets
do_copy : bool
Makes deep copies of arrays if set to True
on_duplicate : {'raise', 'first', 'last'}
Governs behavior in case of duplicate columns.
on_mismatch : {'warn', 'raise', 'ignore'}
Optional, governs behavior for allowed duplicate column names, how to
address mismatched column values; can be 'warn' (default), 'raise' or 'ignore'.
Returns
-------
Dataset
The resulting dataset after concatenation.
Examples
--------
With the ``'last'`` `on_duplicate` option:
>>> N = 5
>>> dset1 = rt.Dataset(dict(A=rt.arange(N), B=rt.ones(N), C=N*['c']))
>>> dset2 = rt.Dataset(dict(A=rt.arange(N, 2*N, 1), B=rt.zeros(N), D=N*['d']))
>>> dsets = [dset1, dset2]
>>> rt.Dataset.concat_columns(dsets, do_copy=True, on_duplicate='last')
# A B C D
- - ---- - -
0 5 0.00 c d
1 6 0.00 c d
2 7 0.00 c d
3 8 0.00 c d
4 9 0.00 c d
<BLANKLINE>
[5 rows x 4 columns] total bytes: 70.0 B
With the default (``'raise'``) for the `on_duplicate` option:
>>> rt.Dataset.concat_columns(dsets, do_copy=True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\ProgramData\Anaconda3\envs\riptable-dev37\lib\site-packages\riptable-0.0.0-py3.7-win-amd64.egg\riptable\rt_dataset.py", line 4308, in concat_columns
raise KeyError(f"Duplicate column '{column}'")
KeyError: "Duplicate column 'A'"
"""
# check that all Datasets have the same number of rows
if on_duplicate not in ('raise', 'first', 'last'):
raise ValueError(f"Invalid on_duplicate '{on_duplicate}'")
if on_mismatch not in ('raise', 'warn', 'ignore'):
raise ValueError(f"Invalid on_mismatch '{on_mismatch}'")
# if there are no Datasets ...
if len(dsets) == 0:
raise ValueError("No Datasets to concatenate")
if len(dsets) == 1 and not do_copy:
return dsets[0]
#try to convert any structs to dsets
newdset=[]
for d in dsets:
# check if even a dataset, if not try to convert it
try:
# test to see if a dataset
rownum = d._nrows
except:
#try to convert to a dataset (probably from struct)
try:
d = Dataset(d)
except:
#for c in d:
# print("col", c, type(d[c]), len(d[c]), d[c])
raise ValueError(f"Unable to convert {d!r} to a Dataset")
newdset.append(d)
dsets = newdset
# check for same length
rownum_set = set([d.shape[0] for d in dsets])
if len(rownum_set) != 1:
raise ValueError(f'Inconsistent Dataset lengths {rownum_set}')
# create dictionary
dict_retval = {}
columns = set()
dups = set()
for column, a in [(c, v) for d in dsets for c, v in d.items()]:
if column in columns:
if on_mismatch != 'ignore':
# print(f'on_mismatch={on_mismatch} column={column}')
dups.add(column)
if on_duplicate == 'raise':
raise KeyError(f"Duplicate column '{column}'")
elif on_duplicate == 'first':
pass
else:
dict_retval[column] = a.copy() if do_copy else a
else:
columns.add(column)
dict_retval[column] = a.copy() if do_copy else a
if on_mismatch != 'ignore':
if len(dups) > 0:
if on_mismatch == 'warn':
warnings.warn(f'concat_columns() duplicate column mismatch: {dups!r}')
if on_mismatch == 'raise':
raise RuntimeError(f'concat_columns() duplicate column mismatch: {dups!r}')
return cls(dict_retval)
# TODO: get .char and check list
#--------------------------------------------------------------------------
def _is_float_encodable(self, xtype):
return xtype in (int, float, np.integer, np.floating,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64)
#--------------------------------------------------------------------------
def _ipython_key_completions_(self):
return self.keys()
#--------------------------------------------------------------------------
def _normalize_column(self, x, field_key):
original_type = x.dtype
category_values = None
is_categorical = False
if self._is_float_encodable(original_type):
if isinstance(x, TypeRegister.Categorical):
category_values = x._categories
is_categorical = True
vals = x.astype(np.float64)
else:
if field_key is None:
category_values, vals = unique(x, return_inverse=True)
vals = vals.astype(np.float64)
else:
category_values = field_key
isValid, vals = ismember(x, category_values, 1)
vals = vals.astype(np.float64)
vals[~isValid] = np.nan
return vals, original_type, is_categorical, category_values
#--------------------------------------------------------------------------
def as_matrix(self, save_metadata=True, column_data={}):
columns = list(self.keys())
nrows = self.shape[0]
ncols = self.shape[1] # TODO: may expand this for 64-bit columns
out_array = empty((nrows, ncols), dtype=np.float64)
column_info = {}
for col in range(ncols):
field_key = column_data.get(columns[col])
out_array[:, col], original_type, is_categorical, category_values = self._normalize_column(
self[columns[col]], field_key)
column_info[columns[col]] = {'dtype': original_type, 'category_values': category_values,
'is_categorical': is_categorical}
if save_metadata:
return out_array, column_info
else:
return out_array
# -------------------------------------------------------------------
def as_recordarray(self):
"""
Convert Dataset to one array (record array).
Wrapped class arrays such as Categorical and DateTime will lose their type
TODO: Expand categoricals
Examples
--------
>>> ds = rt.Dataset({'a': rt.arange(3), 'b': rt.arange(3.0), 'c':['Jim','Jason','John']})
>>> ds.as_recordarray()
rec.array([(0, 0., b'Jim'), (1, 1., b'Jason'), (2, 2., b'John')],
dtype=[('a', '<i4'), ('b', '<f8'), ('c', 'S5')])
>>> ds.as_recordarray().c
array([b'Jim', b'Jason', b'John'], dtype='|S5')
See Also
--------
numpy.core.records.array
"""
# TODO: optionally? expand categoricals
vals = self.values()
names = self.keys()
ra=np.core.records.fromarrays(list(vals), names=names)
return ra
# -------------------------------------------------------------------
def as_struct(self):
# TJD: NOTE need test for this
"""
Convert a dataset to a struct.
If the dataset is only one row, the struct will be of scalars.
Returns
-------
Struct
"""
mydict = self.asdict()
if self._nrows == 1:
olddict=mydict
mydict={}
# copy over just first and only element
for colname, array in olddict.items():
mydict[colname]=array[0]
return TypeRegister.Struct(mydict)
# -------------------------------------------------------------------
def apply_rows(self, pyfunc, *args, otypes=None, doc=None, excluded =None, cache=False, signature=None):
"""
Will convert the dataset to a recordarray and then call np.vectorize
Applies a vectorized function which takes a nested sequence of objects or
numpy arrays as inputs and returns an single or tuple of numpy array as
output. The vectorized function evaluates `pyfunc` over successive tuples
of the input arrays like the python map function, except it uses the
broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
Example
-------
>>> ds = rt.Dataset({'a':arange(3), 'b':arange(3.0), 'c':['Jim','Jason','John']}, unicode=True)
>>> ds.apply_rows(lambda x: x[2] + str(x[1]))
rec.array(['Jim0.0', 'Jason1.0', 'John2.0'], dtype=<U8)
"""
vfunc = np.vectorize(pyfunc, otypes=otypes, doc=doc, excluded=excluded, cache=cache, signature=signature)
ra = self.as_recordarray()
result=vfunc(ra, *args)
return result
# -------------------------------------------------------------------
def apply_rows_numba(self, *args, otype=None, myfunc="myfunc"):
"""
Usage:
-----
Prints to screen an example numba signature for the apply function.
You can then copy this example to build your own numba function.
Inputs:
------
Can pass in multiple test arguments.
Examples
--------
>>> ds = rt.Dataset({'a':rt.arange(10), 'b': rt.arange(10)*2, 'c': rt.arange(10)*3})
>>> ds.apply_rows_numba()
Copy the code snippet below and rename myfunc
---------------------------------------------
import numba
@numba.jit
def myfunc(data_out, a, b, c):
for i in range(len(a)):
data_out[i]=a[i] #<-- put your code here
<BLANKLINE>
---------------------------------------------
Then call
data_out = rt.empty_like(ds.a)
myfunc(data_out, ds.a, ds.b, ds.c)
>>> import numba
>>> @numba.jit
... def myfunc(data_out, a, b, c):
... for i in range(len(a)):
... data_out[i]=a[i]+b[i]+c[i]
>>> data_out = rt.empty_like(ds.a)
>>> myfunc(data_out, ds.a, ds.b, ds.c)
>>> ds.data_out=data_out
>>> ds
# a b c data_out
- - -- -- --------
0 0 0 0 0
1 1 2 3 6
2 2 4 6 12
"""
preamble = "import numba\n@numba.jit\n"
list_inputs = ""
list_inputs_tostring = ""
firstinput = None
for c in self.keys():
if len(list_inputs) > 0:
list_inputs = list_inputs + ', '
list_inputs_tostring = list_inputs_tostring + ', '
else:
firstinput = c
list_inputs=list_inputs + c
if self[c].dtype.char in ['U','S']:
list_inputs_tostring=list_inputs_tostring + "ds." + c + ".numbastring"
else:
list_inputs_tostring=list_inputs_tostring + "ds." + c
code=f"def {myfunc}(data_out, {list_inputs}):\n for i in range(len({firstinput})):\n data_out[i]={firstinput}[i] #<-- put your code here\n"
exec = preamble+code
print("Copy the code snippet below and rename myfunc")
print("---------------------------------------------")
print(exec)
print("---------------------------------------------")
print(f"Then call ")
print(f"data_out = rt.empty_like(ds.{firstinput})")
print(f"{myfunc}(data_out, {list_inputs_tostring})")
#return exec
# -------------------------------------------------------------------
def apply(self, funcs, *args, check_op: bool = True, **kwargs):
"""
The apply method returns a Dataset the same size
as the current dataset. The transform function is applied
column-by-column. The transform function must:
* Return an array that is the same size as the input array.
* Not perform in-place operations on the input array. Arrays
should be treated as immutable, and changes to an array may
produce unexpected results.
Parameters
----------
funcs : callable or list of callable
the function or list of functions applied to each column.
check_op : bool
Defaults to True. Whether or not to check if dataset has its own version, like ``sum``.
Returns
-------
Dataset or Multiset
Examples
--------
>>> ds = rt.Dataset({'a': rt.arange(3), 'b': rt.arange(3.0).tile(7), 'c':['Jim','Jason','John']})
>>> ds.apply(lambda x: x+1)
# a b c
- - ----- ------
0 1 1.00 Jim1
1 2 8.00 Jason1
2 3 15.00 John1
In the example below sum is not possible for a string so it is removed.
>>> ds.apply([rt.sum, rt.min, rt.max])
a b c
# Sum Min Max Sum Min Max Min Max
- --- --- --- ----- ---- ----- ----- ----
0 3 0 2 21.00 0.00 14.00 Jason John
"""
if not isinstance(funcs, list):
funcs = [funcs]
if len(funcs)==0:
raise ValueError("The second argument funcs must not be empty")
for f in funcs:
if not callable(f):
raise TypeError(f"{f} is not callable. Could not be applied to dataset.")
results = {}
# loop over all the functions supplied
# if more than one function supplied, we will return a multiset
for f in funcs:
ds = type(self)()
dsname =f.__name__.capitalize()
call_user_func = True
if check_op:
# check to see if dataset has its own version of the operation)
try:
ds= getattr(self, f.__name__)()
call_user_func = False
except:
pass
if call_user_func:
# the dataset does not have its own version
# call the user supplied function
for colname, array in self.items():
ds[colname] = f(array, *args, **kwargs)
results[dsname]=ds
if len(funcs)==1:
return ds
else:
return TypeRegister.Multiset(results)
# -------------------------------------------------------------------
@classmethod
def from_tagged_rows(cls, rows_iter):
"""
Create a Dataset from an iterable of 'rows', each to be a dict, Struct, or named_tuple of
scalar values.
Parameters
----------
rows_iter : iterable of dict, Struct or named_tuple of scalars
Returns
-------
Dataset
A new Dataset.
Notes
-----
Still TODO: Handle case w/ not all rows having same keys. This is waiting on SafeArray
and there are stop-gaps to use until that point.
Examples
--------
>>> ds1 = rt.Dataset.from_tagged_rows([{'a': 1, 'b': 11}, {'a': 2, 'b': 12}])
>>> ds2 = rt.Dataset({'a': [1, 2], 'b': [11, 12]})
>>> (ds1 == ds2).all(axis=None)
True
"""
keys = Counter()
rows = []
n_have_getitem = 0
for row in rows_iter:
if isinstance(row, tuple) and hasattr(row, '_fields'): # proxy for a namedtuple
keys.update(row._fields)
row = row._asdict()
elif isinstance(row, (Struct, dict)):
keys.update(row.keys())
else:
raise TypeError(f'{cls.__name__}.from_tagged_rows: input must be iterable of dict or Struct.')
n_have_getitem += hasattr(row, '__getitem__')
rows.append(row)
if len(rows) == 0 or len(keys) == 0:
return cls({})
if len(set(keys.values())) != 1:
raise NotImplementedError(f'{cls.__name__}.from_tagged_rows(): All rows must have same keys.')
retval = {_k: [] for _k in sorted(keys)} # no reason to priv. the key order of any one row
if n_have_getitem == 0:
for row in rows:
for _k in row:
retval[_k].append(getattr(row, _k))
elif n_have_getitem == len(rows):
for row in rows:
for _k in row:
retval[_k].append(row[_k])
else:
for row in rows:
for _k in row:
retval[_k].append(row[_k] if hasattr(row, '__getitem__') else getattr(row, _k))
return cls(retval)
@classmethod
def from_rows(cls, rows_iter, column_names):
"""
Create a Dataset from an iterable of 'rows', each to be an iterable of scalar values,
all having the same length, that being the length of column_names.
Parameters
----------
rows_iter : iterable of iterable of scalars
column_names : list of str
list of column names matching length of each row
Returns
-------
Dataset
A new Dataset
Examples
--------
>>> ds1 = rt.Dataset.from_rows([[1, 11], [2, 12]], ['a', 'b'])
>>> ds2 = rt.Dataset({'a': [1, 2], 'b': [11, 12]})
>>> (ds1 == ds2).all(axis=None)
True
"""
ncols = len(column_names)
if ncols == 0:
return cls({})
cols = [[] for _k in column_names]
for row in rows_iter:
if isinstance(row, (dict, Struct, Dataset)): # other dict types?
raise TypeError(f'{cls.__name__}.from_rows: rows can not be "dictionaries".')
if len(row) != ncols:
raise ValueError(f'{cls.__name__}.from_rows: all rows must have same length as column_names.')
for _i, _e in enumerate(row):
cols[_i].append(_e)
return cls(dict(zip(column_names, cols)))
@classmethod
def from_jagged_rows(cls, rows, column_name_base='C', fill_value=None):
"""
Returns a Dataset from rows of different lengths. All columns in Dataset will be bytes or unicode. Bytes will be used if possible.
Parameters
----------
rows
list of numpy arrays, lists, scalars, or anything that can be turned into a numpy array.
column_name_base : str
columns will by default be numbered. this is an optional prefix which defaults to 'C'.
fill_value : str, optional
custom fill value for missing cells. will default to the invalid string
Notes
-----
*performance warning*: this routine iterates over rows in non-contiguous memory to fill in final column values.
TODO: maybe build all final columns in the same array and fill in a snake-like manner like Accum2.
"""
# get final dataset dims, flip all input to array
nrows = len(rows)
# always favor bytestrings
dt = 'S'
for i, r in enumerate(rows):
# re-expand categoricals
# note: multikey categorical expands to a tuple of arrays
# previously raised an error on expand array
if TypeRegister.is_binned_array(r):
r = r.expand_array
# possibly flip all arrays/lists/scalars to string arrays
flip_to_fa = False
if not isinstance(r, np.ndarray):
flip_to_fa = True
elif r.dtype.char not in 'US':
flip_to_fa = True
if flip_to_fa:
r = TypeRegister.FastArray(r, dtype='S')
rows[i] = r
# final dtype will be unicode
if rows[i].dtype.char == 'U':
dt = 'U'
ncols = len(max(rows, key=len))
# get the string itemsize so the max string fits
width = max(rows, key= lambda x: x.itemsize).itemsize
# set fill value
if fill_value is not None:
# match to dtype
if isinstance(fill_value, str):
if dt == 'S':
inv = fill_value.encode()
elif isinstance(fill_value, bytes):
if dt == 'U':
inv = fill_value.decode()
else:
inv = str(fill_value)
else:
# use default
inv = INVALID_DICT[np.dtype(dt).num]
# make sure final array itemsize can fit all strings
if dt == 'U':
width /= 4
final_dt = dt+str(width)
# build final dict, column by column
# this is slow for larger data because it has to loop over rows
final = {}
for i in range(ncols):
col = empty(nrows, dtype=final_dt)
for j, r in enumerate(rows):
# if there are no more items in the column, fill with invalid
if i >= len(r):
fill = inv
else:
fill = rows[j][i]
col[j] = fill
# column name will be a number
final[column_name_base+str(i)]=col
return cls(final)
@classmethod
def from_jagged_dict(cls, dct, fill_value=None, stacked=False):
"""
Creates a Dataset from a dict where each key represents a column name base and each value
an iterable of 'rows'. Each row in the values iterable is, in turn, a scalar or an
iterable of scalar values having variable length.
Parameters
----------
dct
a dictionary of columns that are to be formed into rows
fill_value
value to fill missing values with, or if None, with the NODATA value
of the type of the first value from the first row with values for the given key
stacked : bool
Whether to create stacked rows in the output when an input row
in one of the input values objects contains an iterable.
Returns
-------
Dataset
A new Dataset.
Notes
-----
For a given key, if each row in the corresponding values iterable is a scalar, a
single column will be created with a column name equal to the key name.
If for a given key, a row in the corresponding values iterable is an iterable, the
behavior is determined by the stacked parameter.
If stacked is False (the default), as many columns will be created as necessary to
contain the maximum number of scalar values in the value rows. The column names will
be the key name plus a zero based index. Any empty elements in a row will be filled with
the specified fill_value, or if None, with a NODATA value of the type corresponding to the
first value from the first row with values for the given key.
If stacked is True, one column will be created for each input key, and for each row
of input values, a row will be created in the output for every combination of
value elements from each column in the input row.
Examples
--------
>>> d = {'name': ['bob', 'mary', 'sue', 'john'],
... 'letters': [['A', 'B', 'C'], ['D'], ['E', 'F', 'G'], 'H']}
>>> ds1 = rt.Dataset.from_jagged_dict(d)
>>> nd = rt.INVALID_DICT[np.dtype(str).num]
>>> ds2 = rt.Dataset({'name': ['bob', 'mary', 'sue', 'john'],
... 'letters0': ['A','D','E','H'], 'letters1': ['B',nd,'F',nd],
... 'letters2': ['C',nd,'G',nd]})
>>> (ds1 == ds2).all(axis=None)
True
>>> ds3 = rt.Dataset.from_jagged_dict(d, stacked=True)
>>> ds4 = rt.Dataset({'name': ['bob', 'bob', 'bob', 'mary', 'sue', 'sue', 'sue', 'john'],
... 'letters': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']})
>>> (ds3 == ds4).all(axis=None)
True
"""
# Determine how many input rows and assure all columns conform
num_in_rows = 0
for k, v in dct.items():
if num_in_rows == 0:
num_in_rows = len(v)
else:
if len(v) != num_in_rows:
raise ValueError(f'{cls.__name__}.from_jagged_ rows: all values must ' +
'have same length.')
# If not stacked, concatenate columns constructed from each key/value
if not stacked:
ds = cls()
for k, v in dct.items():
ids = Dataset.from_jagged_rows(v, column_name_base=k, fill_value=fill_value)
for ik in ids.keys():
ds[ik] = ids[ik]
return ds
# If stacked
else:
# Determine total number of output rows
num_rows_ar = np.ones(num_in_rows, dtype=np.int64)
for vals in dct.values():
for i, r in enumerate(vals):
num_rows_ar[i] *= len(r) if is_list_like(r) else 1
num_rows = num_rows_ar.sum()
# Determine the type of each output column by creating arrays
# (necessary to run through full, flattened list to get max string size)
type_cols = []
for vals in dct.values():
type_cols.append(np.array([item for sublist in vals for item in
(sublist if is_list_like(sublist) else [sublist])]))
# Allocate the output columns, as necessary
cols = [0] * len(type_cols)
col_done = [0] * len(type_cols)
for j, type_col in enumerate(type_cols):
(cols[j], col_done[j]) = (type_col, True) if len(type_col) == num_rows\
else (np.zeros(num_rows, type_col.dtype), False)
# Fill the output columns, as necessary
column_names = list(dct.keys())
out_row_num = 0
for in_row_num in range(num_in_rows):
num_repeats = 1
num_out_rows = num_rows_ar[in_row_num]
for j, vals in enumerate(dct.values()):
if col_done[j]:
continue
val = vals[in_row_num]
if not is_list_like(val):
val = [val]
num_tiles = int(num_out_rows/(num_repeats*len(val)))
col_row_num = out_row_num
for tile_num in range(num_tiles):
for v in val:
for repeat_num in range(num_repeats):
cols[j][col_row_num] = v
col_row_num += 1
num_repeats *= len(val)
out_row_num += num_out_rows
return cls(dict(zip(column_names, cols)))
# -------------------------------------------------------
def trim(
self,
func: Optional[Callable[[np.ndarray], np.ndarray]] = None,
zeros: bool = True,
nans: bool = True,
rows: bool = True,
keep: bool = False,
ret_filters: bool = False
) -> Union['Dataset', Tuple['Dataset', np.ndarray, np.ndarray]]:
"""
Returns a Dataset with columns removed that contain all zeros or all nans (or either).
If `rows` is True (the default), any rows which are all zeros or all nans will also be removed.
If `func` is set, it will bypass the zeros and nan check and instead call `func`.
- any column that contains all True after calling `func` will be removed.
- any row that contains all True after calling `func` will be removed if `rows` is True.
Parameters
----------
func
A function which inputs an array and returns a boolean mask.
zeros : bool
Defaults to True. Values must be non-zero.
nans : bool
Defaults to True. Values cannot be nan.
rows : bool
Defaults to True. Reduce rows also if entire row filtered.
keep : bool
Defaults to False. When set to True, does the opposite.
ret_filters : bool
If True, return row and column filters based on the comparisons
Returns
-------
Dataset or (Dataset, row_filter, col_filter)
Example
-------
>>> ds = rt.Dataset({'a': rt.arange(3), 'b': rt.arange(3.0)})
>>> ds.trim()
# a b
- - ----
0 1 1.00
1 2 2.00
>>> ds.trim(lambda x: x > 1)
# a b
- - ----
0 0 0.00
1 1 1.00
>>> ds.trim(isfinite)
Dataset is empty (has no rows).
"""
def iszero(arr):
return arr == 0
# Remove columns that don't pass
col_filter = []
col_filter_mask = []
if func is None:
if zeros and nans:
func = isnanorzero
elif zeros:
func = iszero
elif nans:
func = isnan
else:
raise ValueError("func must be set, or zeros or nans must be true")
labels = self.label_get_names()
colboolmask = np.zeros(self._ncols, dtype='?')
# loop through all computable columns
for i, (col, arr) in enumerate(self.items()):
if col not in labels and arr.iscomputable():
result=func(arr)
if result.dtype.num ==0:
if keep:
# check if all FALSE
addcol = sum(result) != 0
else:
# check if all TRUE
#print('**col ', col, sum(result), len(arr))
addcol = sum(result) != len(arr)
if addcol:
col_filter_mask.append(result)
col_filter.append(col)
colboolmask[i]=True
else:
#add because did not return bool
col_filter.append(col)
colboolmask[i]=True
else:
#add non-computable
col_filter.append(col)
colboolmask[i]=True
# check for empty dataset?
rowmask = None
if rows:
for arr in col_filter_mask:
if rowmask is None:
# first one, just set the value
rowmask = arr
else:
# timed, didn't seem to make much difference
#if keep: rowmask = mask_ori(col_filter_mask)
#else: rowmask = mask_andi(col_filter_mask)
# inplace OR on boolean mask
if keep:
rowmask += arr
else:
# inplace AND on boolean mask
# print('**and', col, sum(arr), sum(rowmask))
rowmask *= arr
# remove rows that are all true
applyrowmask = None
if rowmask is not None:
if keep:
# check if anything to filter on
if sum(rowmask) != len(rowmask):
#reduce all the rows
applyrowmask = rowmask
else:
# check if anything to negatively filter on
#print('**col', col, sum(rowmask))
if sum(rowmask) != 0:
#reduce all the rows
applyrowmask = ~rowmask
# remove cols that are not in list
# remove rows that are all False
if applyrowmask is not None:
newds=self[applyrowmask, col_filter]
else:
newds = self[col_filter]
# If we had summary, we need to apply the col_filter
# and recalculate the totals
if ret_filters:
return (newds, applyrowmask, col_filter)
else:
return newds
# -------------------------------------------------------
def keep(self, func, rows:bool= True):
"""
`func` must be set. Examples of `func` include ``isfinite``, ``isnan``, ``lambda x: x==0``
- any column that contains all False after calling `func` will be removed.
- any row that contains all False after calling `func` will be removed if `rows` is True.
Parameters
----------
func : callable
A function which accepts an array and returns a boolean mask of the same shape as the input.
rows : bool
If `rows` is True (the default), any rows which are all zeros or all nans will also be removed.
Returns
-------
Dataset
Example
-------
>>> ds = rt.Dataset({'a': rt.arange(3), 'b': rt.arange(3.0)})
>>> ds.keep(lambda x: x > 1)
# a b
- - ----
2 2 2.00
>>> ds.keep(rt.isfinite)
# a b
- - ----
0 0 0.00
1 1 1.00
2 2 2.00
"""
return self.trim(func=func, rows=rows, keep=True)
# -------------------------------------------------------
def pivot(
self, labels=None, columns=None, values=None, ordered: bool = True, lex: Optional[bool] = None, filter=None
) -> Union['Dataset', 'Multiset']:
"""
Return reshaped Dataset or Multiset organized by labels / column values.
Uses unique values from specified `labels` / `columns` to form axes of the
resulting Dataset. This function does not support data aggregation,
multiple values will result in a Multiset in the columns.
Parameters
----------
labels : str or list of str, optional
Column to use to make new labels. If None, uses existing labels.
columns : str
Column to use to make new columns.
values : str or list of str, optional
Column(s) to use for populating new values. If not
specified, all remaining columns will be used and the result will
have a Multiset.
ordered: bool, defaults to True
lex: bool, defaults to None
filter: ndarray of bool, optional
Returns
-------
Dataset or Multiset
Raises
------
ValueError:
When there are any `labels`, `columns` combinations with multiple values.
Examples
--------
>>> ds = rt.Dataset({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> ds
# foo bar baz zoo
- --- --- --- ---
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> ds.pivot(labels='foo', columns='bar', values='baz')
foo A B C
--- -- -- --
one 1 2 3
two 4 5 6
"""
if labels is None:
# see if existing labels exist
labels = self.labels_get_names()
elif np.isscalar(labels):
labels=[labels]
if not isinstance(labels, list) or len(labels) ==0:
raise ValueError('The parameter "labels" must exist and be passed as a string or list of strings.')
if columns is None or not isinstance(columns, (str, list)):
raise ValueError('The parameter "columns" must exist and be passed as a string or list of strings.')
if np.isscalar(columns):
columns = [columns]
if not isinstance(columns, list) or len(columns) ==0:
raise ValueError('The parameter "columns" must exist and be passed as a list of one or more strings.')
if values is None:
values = []
allkeys=labels+columns
for colname in self.keys():
if colname not in allkeys:
values.append(colname)
elif np.isscalar(values):
values=[values]
if not isinstance(values, list) or len(values) ==0:
raise ValueError(f'The parameter "values" could not be used {values!r}.')
# build similar to Accum2
grows = self.cat(labels, ordered=ordered, lex=lex).grouping
gcols = self.cat(columns, ordered=ordered, lex=lex).grouping
g = combine2groups(grows, gcols, filter=filter)
# need ifirstkey to pull from original into matrix
ifirstkey = g.ifirstkey
# make labels
crd=grows.uniquedict
ccd=gcols.uniquedict
# make a dataset with the cat_rows as labels
ds_crd = Dataset(crd)
ds_crd.label_set_names(labels)
# +1 to include the filter (0 bin) since used combine2groups
row_len=len(ds_crd)+1
# check for duplicates
ncountgroup = g.ncountgroup
pos = ncountgroup.argmax()
if ncountgroup[pos] > 1:
# find out where a duplicate is
raise ValueError(f'Duplicates exist, cannot reshape. Duplicate count is {ncountgroup[pos]}. Pos is {pos!r}.')
#=========================================
# sub function to slice up original arrays
def make_dataset(coldict, val, newds):
# colnames must be unicode
colnames = [colstr.astype('U') for colstr in coldict.values()]
innerloop = len(colnames)
outerloop= len(colnames[0])
# if this is multikey columns (if len(coldict) > 1) we may need to create a tuple of value pairings
# pull into one long array
arr_long = val[ifirstkey]
start=row_len
# this loops adds the colname + the value
for i in range(0, outerloop):
for j in range(0, innerloop):
if j==0:
c=colnames[j][i]
else:
# multikey name, insert underscore
c=c+'_'+colnames[j][i]
# slice up the one long array
newds[c] = arr_long[start:start + row_len -1]
start = start + row_len
return newds
# if just 1, make a dataset, otherwise multiset
ms= {}
for colname in values:
ds_ms=ds_crd.copy(False)
val = self[colname]
# make a dataset per values key passed in
ms[colname] = make_dataset(ccd, val, ds_ms)
if len(ms) == 1:
# return the one dataset
return ms.popitem()[1]
ms = TypeRegister.Multiset(ms)
# make sure labels on left are lifted up for multiset
ms.label_set_names(labels)
return ms
# -------------------------------------------------------
def equals(self, other, axis: Optional[int] = None, labels: bool = False, exact: bool = False):
"""
Test whether two Datasets contain the same elements in each column.
NaNs in the same location are considered equal.
Parameters
----------
other : Dataset or dict
another dataset or dict to compare to
axis : int, optional
* None: returns a True or False for all columns
* 0 : to return a boolean result per column
* 1 : to return an array of booleans per column
labels : bool
Indicates whether or not to include column labels in the comparison.
exact : bool
When True, the exact order of all columns (including labels) must match
Returns
-------
bool or Dataset
Based on the value of `axis`, a boolean or Dataset containing the equality comparison results.
See Also
--------
Dataset.crc, ==, >=, <=, >, <
Examples
--------
>>> ds = rt.Dataset({'somenans': [0., 1., 2., nan, 4., 5.]})
>>> ds2 = rt.Dataset({'somenans': [0., 1., nan, 3., 4., 5.]})
>>> ds.equals(ds)
True
>>> ds.equals(ds2, axis=0)
# somenans
- --------
0 False
>>> ds.equals(ds, axis=0)
# somenans
- --------
0 True
>>> ds.equals(ds2, axis=1)
# somenans
- --------
0 True
1 True
2 False
3 False
4 True
5 True
>>> ds.equals(ds2, axis=0, exact=True)
FastArray([False])
>>> ds.equals(ds, axis=0, exact=True)
FastArray([True])
>>> ds.equals(ds2, axis=1, exact=True)
FastArray([[ True],
[ True],
[False],
[False],
[ True],
[ True]])
"""
if not isinstance(other, Dataset):
try:
# try to make it a dataset
other = Dataset(other)
except:
other = False
# check if all the nans are in the same place
def ds_isnan(ds):
# call isnan in the order
result = []
for v in ds.values():
try:
if v.dtype.char not in 'SU':
result.append(v.isnan())
else:
# if it has no nan, then no nans
result.append(np.zeros(v.shape, '?'))
except Exception:
# if it has no nan, then no nans
result.append(np.zeros(v.shape, '?'))
return vstack(result, order='F')
if exact:
try:
# create a nan mask -- where both are nans
# this does an inplace and
result = ds_isnan(self)
result *= ds_isnan(other)
# now make the comparions, the column order must be the same (names are ignored)
result2=[v1 == v2 for v1,v2 in zip(self.values(), other.values())]
result |= vstack(result2, order='F')
except Exception:
# anything went wrong, assume nothing matches
result = False
if axis != 1:
result=np.zeros(1, dtype='?')
if axis != 1:
result = np.all(result, axis=axis)
else:
try:
result = self.apply_cols(isnan, labels=labels) & other.apply_cols(isnan, labels=labels)
result |= (self == other)
except:
result = False
if axis != 1:
result=np.zeros(1, dtype='?')
if axis != 1:
result = result.all(axis=axis)
return result
_RIPTABLE_TO_PANDAS_TZ = {
'UTC': 'UTC',
'NYC': 'US/Eastern',
'DUBLIN': 'Europe/Dublin',
'GMT': 'GMT'
}
_PANDAS_TO_RIPTABLE_TZ = dict([(v, k) for (k, v) in _RIPTABLE_TO_PANDAS_TZ.items()])
# keep this as the last line
from .rt_enum import TypeRegister
TypeRegister.Dataset = Dataset
| StarcoderdataPython |
395057 | <filename>tests/utils/test_slugify.py
# -*- coding: utf-8 -*-
import unittest
from unittest import skipUnless
from pulsar.utils.slugify import slugify, unidecode
@skipUnless(unidecode, 'Requires unidecode package')
class TestSlugify(unittest.TestCase):
def test_manager(self):
txt = "This is a test ---"
r = slugify(txt)
self.assertEqual(r, "this-is-a-test")
txt = "This -- is a ## test ---"
r = slugify(txt)
self.assertEqual(r, "this-is-a-test")
txt = 'C\'est déjà l\'été.'
r = slugify(txt)
self.assertEqual(r, "cest-deja-lete")
txt = 'Nín hǎo. Wǒ shì zhōng guó rén'
r = slugify(txt)
self.assertEqual(r, "nin-hao-wo-shi-zhong-guo-ren")
txt = 'Компьютер'
r = slugify(txt)
self.assertEqual(r, "kompiuter")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt)
self.assertEqual(r, "jaja-lol-mememeoo-a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=9)
self.assertEqual(r, "jaja-lol")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=15)
self.assertEqual(r, "jaja-lol-mememe")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=50)
self.assertEqual(r, "jaja-lol-mememeoo-a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=15, word_boundary=True)
self.assertEqual(r, "jaja-lol-a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=19, word_boundary=True)
self.assertEqual(r, "jaja-lol-mememeoo")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=20, word_boundary=True)
self.assertEqual(r, "jaja-lol-mememeoo-a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=20, word_boundary=True, separator=".")
self.assertEqual(r, "jaja.lol.mememeoo.a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=20, word_boundary=True, separator="ZZZZZZ")
self.assertEqual(r, "jajaZZZZZZlolZZZZZZmememeooZZZZZZa")
| StarcoderdataPython |
5181034 | """This module contains the general information for BiosVfCPUPerformance ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfCPUPerformanceConsts:
VP_CPUPERFORMANCE_CUSTOM = "custom"
VP_CPUPERFORMANCE_ENTERPRISE = "enterprise"
VP_CPUPERFORMANCE_HIGH_THROUGHPUT = "high-throughput"
VP_CPUPERFORMANCE_HPC = "hpc"
VP_CPUPERFORMANCE_PLATFORM_DEFAULT = "platform-default"
class BiosVfCPUPerformance(ManagedObject):
"""This is BiosVfCPUPerformance class."""
consts = BiosVfCPUPerformanceConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfCPUPerformance", "biosVfCPUPerformance", "CPU-Performance", VersionMeta.Version151f, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"]),
"modular": MoMeta("BiosVfCPUPerformance", "biosVfCPUPerformance", "CPU-Performance", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_cpu_performance": MoPropertyMeta("vp_cpu_performance", "vpCPUPerformance", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["custom", "enterprise", "high-throughput", "hpc", "platform-default"], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_cpu_performance": MoPropertyMeta("vp_cpu_performance", "vpCPUPerformance", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["custom", "enterprise", "high-throughput", "hpc", "platform-default"], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"vpCPUPerformance": "vp_cpu_performance",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"vpCPUPerformance": "vp_cpu_performance",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.status = None
self.vp_cpu_performance = None
ManagedObject.__init__(self, "BiosVfCPUPerformance", parent_mo_or_dn, **kwargs)
| StarcoderdataPython |
3494309 | <gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 11 06:37:50 2016
@author: i026e
"""
import re
import sys
from sys import argv
if sys.version_info >= (3, 0):
import tkinter as tk
from tkinter import font
from tkinter import filedialog
else:
import Tkinter as tk
import tkFont as font
import tkFileDialog as filedialog
import locale
TRANSLATIONS = {
"en_US" :{},
"ru_RU":{
"Unknown Record":"Неизвестная запись",
"Global Positioning System Fix Data" : "Данные о последнем определении местоположения",
"Geographic Position - Latitude/Longitude" : "Координаты, широта/долгота",
"GNSS DOP and Active Satellites" : "DOP (GPS) и активные спутники",
"GNSS Satellites in View" : "Наблюдаемые спутники",
"Recommended Minimum Specific GNSS Data" : "Рекомендуемый минимум навигационных данных",
"Course Over Ground and Ground Speed" : "Курс и скорость движения" ,
"Time and Date" : "Время и дата",
"Accuracy" : "Точность",
"Mode" : "Режим",
"UTC Time" : "Время UTC",
"UTC Date" : "Дата UTC",
"Latitude" : "Широта",
"Longitude" : "Долгота",
"Altitude" : "Высота" ,
"Azimuth" : "Азимут",
"Quality indicator" : "Индикатор качества",
"Satellites Used" : "Использовано спутников",
"Type":"Тип",
"Number of messages":"Число сообщений",
"Sequence number" : "Номер сообщения" ,
"Satellites in view" : "Видимых спутников"
}
}
lang = locale.getdefaultlocale()[0]
LANGUAGE = lang if lang in TRANSLATIONS else "en_US"
def translate(str_):
return TRANSLATIONS[LANGUAGE].get(str_, str_)
class Record(object):
new_line = "\n"
info = "Unknown Record"
def __init__(self, list_record):
self.fields = []
self.list_record = list_record
if len(list_record) > 0:
last = self.ast_split(self.list_record.pop(-1))
for elm in last:
self.list_record.append(elm)
try:
self._init_data_()
except:
self.fields.append(("Unexpected Error", "Invalid Record"))
def _init_data_(self):
#hook
pass
def __str__(self):
return translate(self.info) + self.new_line*2 + \
self.new_line.join(translate(key) + " : " + translate(val) \
for (key, val) in self.fields)
def ast_split(self, entry):
return entry.split('*')
class GGA(Record):
info = "Global Positioning System Fix Data"
def _init_data_(self):
self.fields.append(("UTC Time", self.list_record[1]))
self.fields.append(("Latitude", self.list_record[2] + self.list_record[3]))
self.fields.append(("Longitude", self.list_record[4] + self.list_record[5]))
indicators = {"0": "position fix unavailable",
"1": "valid position fix, SPS mode",
"2": "valid position fix, differential GPS mode"}
self.fields.append(("Quality indicator", \
indicators.get(self.list_record[6], '')))
self.fields.append(("Satellites Used", self.list_record[7]))
self.fields.append(("HDOP", self.list_record[8]))
self.fields.append(("Altitude", self.list_record[9] + self.list_record[10]))
self.fields.append(("Geoidal Separation", self.list_record[11] + self.list_record[12]))
self.fields.append(("Checksum", self.list_record[-1]))
class GLL(Record):
info = "Geographic Position - Latitude/Longitude"
def _init_data_(self):
self.fields.append(("Latitude", self.list_record[1] + self.list_record[2]))
self.fields.append(("Longitude", self.list_record[3] + self.list_record[4]))
self.fields.append(("UTC Time", self.list_record[5]))
statuses = {"V":"Data not valid",
"A":"Data Valid"}
self.fields.append(("Status", statuses.get(self.list_record[6], "")))
self.fields.append(("Checksum", self.list_record[-1]))
class GSA(Record):
info = "GNSS DOP and Active Satellites"
def _init_data_(self):
types = {"P":"GPS", "L":"GLONASS"}
self.fields.append(("Type", types.get(self.list_record[0][2], "")))
modes = {"A": "Automatic", "M" : "Manual"}
self.fields.append(("Mode", modes.get(self.list_record[1], "")))
fixes = {"1":"Fix not available", "2":"2D", "3":"3D"}
self.fields.append(("Fix mode", fixes.get(self.list_record[2], "")))
ids = ", ".join(id_ for id_ in self.list_record[3:15] if id_)
self.fields.append(("Satellite IDs", ids))
self.fields.append(("PDOP", self.list_record[15]))
self.fields.append(("HDOP", self.list_record[16]))
self.fields.append(("VDOP", self.list_record[17]))
self.fields.append(("Checksum", self.list_record[-1]))
class GSV(Record):
info = "GNSS Satellites in View"
def _init_data_(self):
types = {"P":"GPS", "L":"GLONASS"}
self.fields.append(("Type", types.get(self.list_record[0][2], "")))
self.fields.append(("Number of messages", self.list_record[1]))
self.fields.append(("Sequence number", self.list_record[2]))
self.fields.append(("Satellites in view", self.list_record[3]))
self.fields.append(("Checksum", self.list_record[-1]))
satellites = list(self.list_record[4:-1])
if len(satellites) >= 4:
#group by 4
satellites = [satellites[i:i+4] for i in range(0, len(satellites), 4)]
for sat in satellites:
if len(sat) == 4:
self.fields.append(("", ""))
self.fields.append(("Satellite ID", sat[0]))
self.fields.append(("Elevation", sat[1]))
self.fields.append(("Azimuth", sat[2]))
self.fields.append(("SNR", sat[3]))
class RMC(Record):
info = "Recommended Minimum Specific GNSS Data"
def _init_data_(self):
self.fields.append(("UTC Time", self.list_record[1]))
statuses = {"V":"Navigation receiver warning",
"A":"Data Valid"}
self.fields.append(("Status", statuses.get(self.list_record[2], "")))
self.fields.append(("Latitude", self.list_record[3] + self.list_record[4]))
self.fields.append(("Longitude", self.list_record[5] + self.list_record[6]))
self.fields.append(("Speed, knots", self.list_record[7]))
self.fields.append(("Course, deg", self.list_record[8]))
self.fields.append(("UTC Date", self.list_record[9]))
modes = {"N":"Data not valid",
"A":"Autonomous",
"D":"Differential",
"E":"Estimated (dead reckoning)"}
self.fields.append(("Mode", modes.get(self.list_record[-2], "")))
self.fields.append(("Checksum", self.list_record[-1]))
class VTG(Record):
info = "Course Over Ground and Ground Speed"
def _init_data_(self):
self.fields.append(("Course, deg True", self.list_record[1] + \
self.list_record[2]))
self.fields.append(("Course, deg Magnetic", self.list_record[3] + \
self.list_record[4]))
self.fields.append(("Speed, knots", self.list_record[5] + \
self.list_record[6]))
self.fields.append(("Speed, km/hr", self.list_record[7] + \
self.list_record[8]))
modes = {"N":"Data not valid",
"A":"Autonomous",
"D":"Differential",
"E":"Estimated (dead reckoning)"}
self.fields.append(("Mode", modes.get(self.list_record[9], "")))
self.fields.append(("Checksum", self.list_record[-1]))
class ZDA(Record):
info = "Time and Date"
def _init_data_(self):
self.fields.append(("UTC Time", self.list_record[1]))
self.fields.append(("UTC Day", self.list_record[2]))
self.fields.append(("UTC Month", self.list_record[3]))
self.fields.append(("UTC Year", self.list_record[4]))
self.fields.append(("Local zone hours", self.list_record[5]))
self.fields.append(("Local zone minutes", self.list_record[6]))
self.fields.append(("Checksum", self.list_record[-1]))
class ACCURACY(Record):
info = "Accuracy"
def _init_data_(self):
self.fields.append(("Accuracy", self.list_record[1]))
self.fields.append(("Checksum", self.list_record[-1]))
class NMEA:
parsers = {"GGA": GGA, "GLL" : GLL, "GSA" : GSA,
"GSV" : GSV, "RMC" : RMC, "VTG" : VTG,
"ZDA" : ZDA, "ACCURACY" : ACCURACY}
new_line = "\n"
def __init__(self, filepath):
self.records = []
try:
with open(filepath, 'r') as input_:
for line in input_:
self.records.append(line.strip())
except:
pass
def get_info(self, record_ind):
if record_ind >= 0 and record_ind < len(self.records):
return NMEA.get_str_info(self.records[record_ind])
@staticmethod
def get_str_info(record_str):
s = record_str + NMEA.new_line*2 + NMEA._record_info(record_str)
return s
@staticmethod
def _record_info(record_str):
record = record_str.split(",")
parser = Record
if len(record) > 0:
if len(record[0]) >= 5:
parser = NMEA.parsers.get(record[0][3:], Record)
return(str(parser(record)))
class GUI:
def __init__(self, filepath = None, filter_=""):
self.root = tk.Tk()
self.root.wm_title("NMEA Viewer")
custom_font = font.Font(family="Helvetica", size=10)
#Menu
menu = tk.Menu(self.root, font=custom_font, tearoff=0)
self.root.config(menu=menu)
file_menu = tk.Menu(menu, font=custom_font, tearoff=0)
menu.add_cascade(label="File", menu=file_menu)
file_menu.add_command(label="Open", command = self.on_open_cmd,
font=custom_font)
#Frames
main_frame = tk.Frame(self.root)
main_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=tk.YES)
info_frame = tk.Frame(main_frame)
info_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
records_frame = tk.Frame(main_frame)
records_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
list_frame = tk.Frame(records_frame)
list_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=tk.YES)
filter_frame = tk.Frame(records_frame)
filter_frame.pack(side=tk.TOP, fill=tk.BOTH)
#Left Textbox
self.txtbox = tk.Text(info_frame,
font=custom_font,
wrap=tk.WORD, width = 80)
self.txtbox.pack(side=tk.LEFT, expand=tk.YES, fill=tk.BOTH)
txt_scrollbar = tk.Scrollbar(info_frame)
txt_scrollbar.pack(side=tk.LEFT, fill=tk.Y)
self.txtbox.config(yscrollcommand=txt_scrollbar.set)
txt_scrollbar.config(command=self.txtbox.yview)
#Right List
self.listbox = tk.Listbox(list_frame, font=custom_font, width=50)
self.listbox.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
self.listbox.bind("<<ListboxSelect>>", self.on_record_select)
list_scrollbar = tk.Scrollbar(list_frame)
list_scrollbar.pack(side=tk.LEFT, fill=tk.Y)
self.listbox.config(yscrollcommand=list_scrollbar.set)
list_scrollbar.config(command=self.listbox.yview)
#Filter
self.filter_var = tk.StringVar(value=filter_)
self.filter_var.trace("w", self.reload_nmea)
filterbox = tk.Entry(filter_frame, font=custom_font, width=1,
textvariable = self.filter_var)
filterbox.pack(side=tk.BOTTOM, expand=tk.YES, fill=tk.X)
#load file
if filepath is not None:
self.load_nmea(filepath)
def load_nmea(self, filepath):
self.nmea = NMEA(filepath)
self.reload_nmea()
def reload_nmea(self, *args):
self.listbox.delete(0, tk.END)
for record in self.filtered_nmea():
#print(record)
self.listbox.insert(tk.END, record)
def filtered_nmea(self):
expr = self.filter_var.get()
print("filter", expr, len(expr))
if len(expr) > 0:
regex = re.compile(expr, re.IGNORECASE)
for line in self.nmea.records:
if regex.search(line):
yield line
else:
for line in self.nmea.records:
yield line
def on_record_select(self, evt):
# Note here that Tkinter passes an event object
w = evt.widget
if len(w.curselection()) > 0 and self.nmea is not None:
index = int(w.curselection()[0])
record = w.get(index)
text = NMEA.get_str_info(record)
self.txtbox.delete(1.0, tk.END)
self.txtbox.insert(1.0, text)
def on_open_cmd(self):
filetypes = [('text files', '.txt'), ('all files', '.*')]
filepath = filedialog.askopenfilename(filetypes = filetypes )
print("open", filepath)
if filepath:
self.load_nmea(filepath)
def show(self):
self.root.mainloop()
def main(*args):
filepath = args[1] if len(args) >= 2 else None
filter_ = args[2] if len(args) >= 3 else ""
gui = GUI(filepath, filter_)
gui.show()
if __name__ == "__main__":
# execute only if run as a script
main(argv)
| StarcoderdataPython |
3206008 | <reponame>hpatel1567/pymatgen<filename>pymatgen/entries/__init__.py
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains entry related tools. Essentially, entries are containers
for calculated information, which is used in many analyses.
"""
| StarcoderdataPython |
3553802 | first=0
second=1
n=int(input("How many steps did you want to execute?"))
def fibonacci(num):
if num==0:
return 0
elif num==1:
return 1
else:
return fibonacci(num-1)+fibonacci(num-2)
print("Fibonacci Series are")
for i in range(0,n):
print(fibonacci(i))
| StarcoderdataPython |
6593652 | from distutils.core import setup
from setuptools import find_packages
setup(
name='AWSLeR',
author='<NAME>',
author_email='<EMAIL>',
install_requires=['boto3', 'docopt'],
long_description=open('README.md').read(),
packages=find_packages(exclude=['docs', 'tests']),
url='https://github.com/forestmonster/AWSLeR',
license='Apache 2.0',
version='0.3',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Security',
]
)
| StarcoderdataPython |
11215181 |
import math
i = 2
N = 1500450271
iPrime = False
while i < math.sqrt(N):
if N % i == 0:
isPrime = False
i+=1
# Time taken by B's program = 1ms * number of divisions
# = 1ms * square root of 1500450271
# = approximately 40000ms = 40 seconds. | StarcoderdataPython |
6519134 | from keras.models import Model
from keras.layers import Input, Reshape, merge, dot, Activation
from keras.layers.embeddings import Embedding
import keras.initializers
from keras.utils import Sequence
from keras.preprocessing import sequence
import wordvectors.physicaldata.tools as tools
import wordvectors.physicaldata.creation as creation
from gensim.models import Word2Vec
from gensim.models.callbacks import CallbackAny2Vec
from enum import Enum
import numpy as np
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import random
import os
class Neural_Mode(Enum):
physical2D = 1
text = 2
physical2Dperiodic = 3
physical2DIsing = 4
class Dimension_Reduction_Method(Enum):
tsne = 1
PCA = 2
class PlotMode(Enum):
noPlotting =1
answerOnly =2
complete=3
#TODO: Introduce option to load data into RAM to get faster calculations if the file is not to big
"""
class Skipgram_Generator(Sequence):
def __init__(self, file, batch_size):
stream = open(file,mode='r')
self.batch_size = batch_size
def __getitem__(self, idx):
"""
class base_Thing2vec():
"""
Base class for thing2vec child classes. Introduces general functions that will be used or be
overwritten by the child classes.
"""
#TODO: Introduce standard place for saving models and loading data
def __init__(self, vocab_size, window_size, vector_dim, negative_samples, neural_mode, file=None, properties=None):
"""
Parameters
----------
vocab_size :
size of vocabularity which defines the dimensionality of the one hot encoding
window_size :
maximum distance of a current and context word for creating skipgrams
vector_dim :
dimensionality of the embedded layer that will define the word vectors
negative_samples :
how many negative samples are created per positive sample
neural_mode :
defines the input for the neural net
file :
string which points to the file with the corpus of text
This is only needed if neural_mode is set to text
properties:
Properties for the data creator that will be loaded.
This is only needed if neural_mode is set to physical2D or subclass
"""
self.vocab_size = vocab_size
self.window_size = window_size
self.vector_dim = vector_dim
self.negative_samples = negative_samples
self.neural_mode = neural_mode
self.file = file
if (self.neural_mode == Neural_Mode.physical2D or
self.neural_mode == Neural_Mode.physical2Dperiodic or
self.neural_mode == Neural_Mode.physical2DIsing):
if self.neural_mode == Neural_Mode.physical2D:
self.datacreator = creation.DataCreator2DGrid(file=None)
elif self.neural_mode == Neural_Mode.physical2Dperiodic:
self.datacreator = creation.DataCreator2DGridPeriodic(file=None)
elif self.neural_mode == Neural_Mode.physical2DIsing:
self.datacreator = creation.DataCreator2DIsingModel(file=None)
self.datacreator.Load_properties(properties)
if self.file == None:
self.file = self.datacreator.file
self.seperator = self.datacreator._seperator
elif self.neural_mode == Neural_Mode.text:
self.seperator = ' '
if self.file != None:
count, dictionary, reverse_dictionary = tools.Create_dic_from_file(self.file, vocab_size, seperator=self.seperator)
self.count = count
self.dictionary = dictionary
self.reverse_dictionary = reverse_dictionary
self.vocab_size = min(self.vocab_size, len(self.count)) #if there are less words than given vocab size
#TODO: For optimization a second iterable would be usefull that does not need a dictionary and just
# uses the tokens from the file without conversion (more infos jupyter "setting up Thing2Vec")
class Thing2String(object):
"""
Class that reads the given file via an iterator and translates the read words via the
dictionary for the underlying neuralnet.
"""
def __init__(self, file, dictionary, sep=" ", transformation=str):
"""
Parameters
----------
file :
string which points to the file with the corpus of text
dictionary :
dictionary which is used for conversion of the words in the file text
sep:
token which is used to seperate the lines in single words
transformation:
type of the yielded output
"""
self.file = file
self.dictionary = dictionary
self.sep = sep
self.transformation = transformation
def __iter__ (self):
stream = open(self.file, 'r')
for line in stream:
sentence = []
for element in line.split(sep=self.sep):
if element in self.dictionary:
sentence.append(self.transformation(self.dictionary[element]))
else:
sentence.append(self.transformation("0")) #The UNK word
yield sentence
stream.close()
def len (self):
stream = open(self.file, 'r')
k = 0
for s in stream:
k += 1
stream.close()
return k
def get_word_vector (self, word, normalized = True):
raise NotImplementedError()
def get_word_vectors(self, normalized = False):
raise NotImplementedError()
def Train(self, file, epochs, batch_size, initial_epoch):
raise NotImplementedError()
def index2word(self, idx):
raise NotImplementedError()
def save_model(self, path):
raise NotImplementedError()
def load_model(self,path):
raise NotImplementedError()
def similarity(self, w1, w2):
raise NotImplementedError()
def most_similar(self, positive, negative, number):
raise NotImplementedError()
def most_similar_cosmul(self, positive, negative, number):
raise NotImplementedError()
def visualize_most_similar(self, positive = [], negative=[], max_number=200, perplexity=40, n_iter=2500):
"""
Visualizes the vector space using the tsne method. The space is restricted around the
resulting vector by the given positive and negative words.
Parameters
----------
positive :
list of positive words which will be given to the most_similar method
negative :
list of negative words which will be given to the most_similar method
max_number :
maximal number of visualized words
perplexity :
parameter for the tsne method
n_iter :
parameter for the tsne method
"""
most_similar = self.most_similar(positive=positive, negative=negative, number=max_number-1)
custom_words = list(map(lambda x: x[0], most_similar))
custom_points = []
if len(positive) == 1 and len(negative) == 0:
custom_points.append([self.get_word_vector(positive[0],normalized=False),positive[0].upper()])
return self.visualize_vector_space(custom_words=custom_words, custom_points=custom_points, max_number=max_number, perplexity=40, n_iter=2500)
def visualize_most_similar_cosmul(self, positive = [], negative=[], max_number=200, perplexity=40, n_iter=2500):
"""
Visualizes the vector space using the tsne method. The space is restricted around the
resulting vector by the given positive and negative words.
Parameters
----------
positive :
list of positive words which will be given to the most_similar_cosmul method
negative :
list of negative words which will be given to the most_similar_cosmul method
max_number :
maximal number of visualized words
perplexity :
parameter for the tsne method
n_iter :
parameter for the tsne method
"""
most_similar = self.most_similar_cosmul(positive=positive, negative=negative, number=max_number)
custom_words = list(map(lambda x: x[0], most_similar))
custom_points = []
if len(positive) == 1 and len(negative) == 0:
custom_points.append([self.get_word_vector(positive[0],normalized=False),positive[0].upper()])
return self.visualize_vector_space(custom_words=custom_words, custom_points=custom_points, max_number=max_number, perplexity=40, n_iter=2500)
#TODO: For better results one should introduce an PCA before applying tsne
def visualize_vector_space(self, custom_words = [], custom_points = [], method=Dimension_Reduction_Method.tsne, max_number = 200, perplexity=40, n_iter=2500, hide_plot=False):
"""
Visualizes the vector space using the tsne method.
Parameters
----------
custom_words :
optional list of words which are taken into account for visualization
custom_points :
optional list of lists of the form [point, label] which will be added in the
visualization
method :
Specifies which method form dimension reduction is used
max_number :
maximal number of visualized words (including custom_words)
hide_plot :
if True the plot will not be shown (can be useful if the plot shall be saved directly)
"""
[new_values,labels] = self._apply_dimension_reduction(custom_words, custom_points,method = method,number_to_train=max_number, perplexity=perplexity, n_iter=n_iter)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
fig = plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i],y[i], color='blue')
plt.annotate(labels[i],xy=(x[i], y[i]),xytext=(5, 2),textcoords='offset points',ha='right',va='bottom')
if not hide_plot:
plt.show()
else:
plt.close()
return fig
def _apply_dimension_reduction(self,trainables, custom_points= [], number_to_train = 0,number_of_learning=500,method=Dimension_Reduction_Method.tsne, perplexity=40, n_iter=2500):
"""
Applies a dimension reduction, using the chosen method, onto the given data.
Parameters
----------
trainables :
list of words (or general things) which are contained in the vocabulary and
whose embedding vectors shall be lowered in dimension
custom_points :
list of custom points which shall be lowered in dimension
number_to_train :
number of words (or general things) which will be outputted after dimension
reduction
number_of_learning :
number of points which will minimal be used for dimension reduction. A higher
number improves the results when using tsne
method :
method that will be used for dimension reduction
perplexity :
parameter for tsne (see documentation)
n_iter :
parameter for tsne (see documentation)
"""
labels = []
tokens = []
_skipped = 0
for word in trainables:
try:
tokens.append(self.get_word_vector(word,normalized=False))
labels.append(word)
except KeyError:
_skipped += 1
for point,label in custom_points:
tokens.append(point)
labels.append(label)
vectors = self.get_word_vectors(normalized = False)
vectors_labels = [[vectors[k], self.index2word(k)] for k in range(len(vectors)) if self.index2word(k) not in trainables]
vectors_labels = vectors_labels[:max(0,number_of_learning-len(trainables)-len(custom_points))]
for v in vectors_labels:
tokens.append(v[0])
labels.append(v[1])
if method == Dimension_Reduction_Method.tsne:
tsne_model = TSNE(perplexity=perplexity, n_components=2, init='pca', n_iter=n_iter, random_state=23)
new_values = tsne_model.fit_transform(tokens)
else :
pca_model = PCA(n_components=2)
new_values = pca_model.fit(tokens).transform(tokens)
#final = [new_values[k] for k in range(len(labels)) if labels[k] is in trainables]
return [new_values[:max(number_to_train, len(trainables)+ len(custom_points))], labels[:max(number_to_train, len(trainables)+len(custom_points))]]
def visualize_categories(self, categories, label_categories, method = Dimension_Reduction_Method.tsne, perplexity=40, n_iter=2500, labeling=False, hide_plot=False):
"""
Visualizes the given categories in a two-dimensional space by reducing the dimension
of the embedding space.
Parameters
----------
categories :
A list of categories which shall be visualized
label_categories :
List of strings which label the given categories
method :
method that will be used for dimension reduction
perplexity :
parameter for tsne (see documentation)
n_iter :
parameter for tsne (see documentation)
labeling :
if False the labels of the categories will be omitted
hide_plot :
if True the plot of the visualization will not be shown
"""
#tokens = []
labels = []
for category in categories:
for element in category:
#tokens.append(self.get_word_vector(element,normalized=False))
labels.append(element)
#tsne_model = TSNE(perplexity=perplexity, n_components=2, init='pca', n_iter=n_iter, random_state=23)
#new_values = tsne_model.fit_transform(tokens)
[new_values,labels] = self._apply_dimension_reduction(labels,method=method, perplexity=perplexity, n_iter=n_iter)
x_cat,y_cat = [],[]
akt_idx = 0
for k in range(len(categories)):
new_idx = akt_idx + len(categories[k])
x_cat.append([new_values[k][0] for k in range(akt_idx,new_idx)])
y_cat.append([new_values[k][1] for k in range(akt_idx,new_idx)])
akt_idx = new_idx
fig = plt.figure(figsize=(16,16))
for i in range(len(categories)):
plt.scatter(x_cat[i],y_cat[i])
plt.plot(x_cat[i],y_cat[i])
plt.legend(label_categories)
if labeling:
for k in range(len(new_values)):
plt.annotate(labels[k],xy=(new_values[k][0],new_values[k][1]),xytext=(5, 2),textcoords='offset points',ha='right',va='bottom')
if not hide_plot:
plt.show()
else:
plt.close()
return fig
class Thing2VecGensim(base_Thing2vec):
"""
Child class of base_Thing2vec which uses the gensim module for calculating word vectors.
"""
def __init__(self, neural_mode, vocab_size=10000, window_size=5, vector_dim=100, negative_samples=5, file=None, min_count=5, workers=3, properties=None):
"""
Parameters
----------
neural_mode :
defines the input for the neural net
vocab_size :
size of vocabularity which defines the dimensionality of the one hot encoding
window_size :
maximum distance of a current and context word for creating skipgrams
vector_dim :
dimensionality of the embedded layer that will define the word vectors
negative_samples :
how many negative samples are created per positive sample
file :
string which points to the file with the corpus of text
min_count :
minimal number of occurances for a word to be taken into account for the dictionary
workers :
number of workers that will be created for calculating the word vectors
properties:
Properties for the data creator that will be loaded.
This is only needed if neural_mode is set to physical2D
"""
super().__init__(vocab_size,window_size, vector_dim, negative_samples, neural_mode, file, properties)
self.min_count = min_count
self.workers = workers
iterator = self.Thing2String(self.file, self.dictionary, self.seperator)
self.model = Word2Vec(iterator, size = self.vector_dim, window=self.window_size,
min_count= self.min_count, workers=self.workers, negative=self.negative_samples,sg=0, iter=0,
max_vocab_size=self.vocab_size)
#TODO: One could introduce a system that saves the progress of training, meaning the actual epoch for recalculation
# the needed alpha value. With that the user would give the desired number of iterations in the initialization
# process and with Train method how long the training should be for the moment.
def Train(self, file=None, epochs=10):
"""
Trains the neural net for deriving word vectors using the gensim module.
Parameters
----------
file :
string which points to the file with the corpus of text, if None the
given file in the init function is used
epochs :
number of epochs/iterations over the whole corpus of text
"""
callbacks = [self.EpochLogger(epochs)]
if file == None:
file = self.file
iterator = self.Thing2String(file, self.dictionary,self.seperator)
self.model.train(iterator, total_examples=self.model.corpus_count, epochs=epochs, callbacks=callbacks)
def get_word_vector(self, word, normalized = True):
"""
Returns the word vector that corresponds to the given word.
Parameters
----------
word :
The word which vector is wanted
normalized :
if =True the word vector will be normalized
"""
self.model.wv.init_sims() #in order to compute normalized matrix
return self.model.wv.word_vec(str(self.dictionary[word]),use_norm=normalized)
def get_word_vectors(self, normalized = False):
"""
Returns a list of all word vectors trained by the neural net.
Attention
---------
The list has not to be in any order that corresponds to the internal dictionary
Parameters
----------
normalized :
if =True the word vectors will be normalized
"""
self.model.wv.init_sims() #in order to compute normalized matrix
if normalized:
return self.model.wv.vectors_norm
else:
return self.model.wv.vectors
def similarity(self, w1, w2):
"""
Gives back the cosine similarity between two words.
Parameters
----------
w1 :
Input word
w2 :
Input word
"""
return self.model.wv.similarity(str(self.dictionary[w1]), str(self.dictionary[w2]))
def similar_by_vector(self, vector, number=10, plot=True):
"""
Gives back the words with the most similar word vectors to the given vector.
Parameters
----------
vector :
origin, from which the search after similar word vectors is started
number :
number of most similar words that is given back
plot :
if True the results will be plotted if possible
"""
result =self.model.wv.similar_by_vector(vector,topn=number)
result_final = list(map(lambda x: [self.reverse_dictionary[int(x[0])],x[1]], result))
if (self.neural_mode == Neural_Mode.physical2D or self.neural_mode == Neural_Mode.physical2Dperiodic) and plot:
particles = list(map(lambda x: self.reverse_dictionary[int(x[0])], result))
titles = list(map(lambda x: x[1], result))
self.datacreator.plot_states(particles, titles=titles)
return result_final
#TODO: Expand function in order to get plots of all positive and negative states if wanted from the user
#with short title like "positive1", "negative2"
def most_similar(self, positive=[], negative=[], number=10, plot = PlotMode.complete):
"""
Gives back the most similar words. Positive words contribute positivly, negative words
negativly. For measuring similarity cosine similarity is used as described in the original
paper.
Parameters
----------
positive :
list of positive words which will be given to the most_similar method
negative :
list of negative words which will be given to the most_similar method
number :
number of most similar words that is given back
plot :
mode of plotting, wheather and how will be plotted
"""
positive_dic = list(map(lambda x: str(self.dictionary[x]), positive))
negative_dic = list(map(lambda x: str(self.dictionary[x]), negative))
result = self.model.wv.most_similar(positive=positive_dic, negative=negative_dic, topn=number)
result_final = list(map(lambda x: [self.reverse_dictionary[int(x[0])],x[1]], result))
if (self.neural_mode in [Neural_Mode.physical2D, Neural_Mode.physical2Dperiodic, Neural_Mode.physical2DIsing]) and plot!=PlotMode.noPlotting:
if plot==PlotMode.complete:
titles_input = ["positive"] * len(positive) + ["negative"] * len(negative)
self.datacreator.plot_states(positive + negative, titles=titles_input)
particles = list(map(lambda x: self.reverse_dictionary[int(x[0])], result))
titles = list(map(lambda x: x[1], result))
self.datacreator.plot_states(particles, titles=titles)
return result_final
def most_similar_cosmul(self, positive=[], negative=[], number=10, plot = PlotMode.noPlotting):
"""
Gives back the most similar words. Positive words contribute positivly, negative words
negativly. For measuring similarity the multiplicative combination objective is used,
see <http://www.aclweb.org/anthology/W14-1618>.
Parameters
----------
positive :
list of positive words which will be given to the most_similar method
negative :
list of negative words which will be given to the most_similar method
number :
number of most similar words that is given back
plot :
if True the results will be plotted if possible
"""
positive_dic = list(map(lambda x: str(self.dictionary[x]), positive))
negative_dic = list(map(lambda x: str(self.dictionary[x]), negative))
result = self.model.wv.most_similar_cosmul(positive=positive_dic, negative=negative_dic, topn=number)
result_final = list(map(lambda x: [self.reverse_dictionary[int(x[0])],x[1]], result))
if (self.neural_mode in [Neural_Mode.physical2D, Neural_Mode.physical2Dperiodic, Neural_Mode.physical2DIsing]) and plot!=PlotMode.noPlotting:
if PlotMode.complete:
titles_input = ["positive"] * len(positive) + ["negative"] * len(negative)
self.datacreator.plot_states(positive + negative, titles=titles_input)
particles = list(map(lambda x: self.reverse_dictionary[int(x[0])], result))
titles = list(map(lambda x: x[1], result))
self.datacreator.plot_states(particles, titles=titles)
return result_final
def index2word(self, idx):
"""
Returns the word to the given index.
Parameters
----------
idx : index of a word in the internal gensim implementation
"""
return self.reverse_dictionary[int(self.model.wv.index2word[idx])]
def save_model(self, path):
"""
Saves the model at the given path.
"""
self.model.save(path)
def load_model(self, path):
"""
Loads the model with the data given at path.
"""
self.model = Word2Vec.load(path)
def is_in_dictionary(self, word):
"""
Gives back wheater the given word is in dictionary or not.
Parameters
----------
word :
word that shall be tested
"""
if word in self.dictionary:
#this has to be tested for small datasets as can be seen in jupyter notebook thing2vec with physical data #small dataset
if str(self.dictionary[word]) in self.model.wv.vocab:
return True
return False
class EpochLogger(CallbackAny2Vec):
"""
Logs the status of the gensim learning process by using Callback methods.
The status is update at the end of every epoch.
"""
def __init__(self, epochs):
self.akt_epoch = 0
self.logger = tools.progress_log(epochs)
def on_epoch_end(self, model):
self.akt_epoch += 1
self.logger.update_progress(self.akt_epoch)
def on_train_end(self, model):
self.logger.finished()
class Thing2VecKeras(base_Thing2vec):
"""
Child class of base_Thing2vec that uses the keras backend for calculating word vectors.
Attention
---------
The class is not finished yet!
"""
def __init__(self, vocab_size, window_size, vector_dim, negative_samples, neural_mode, file, sg_file, optimizer, properties=None):
"""
Parameters
----------
vocab_size :
size of vocabularity which defines the dimensionality of the one hot encoding
window_size :
maximum distance of a current and context word for creating skipgrams
vector_dim :
dimensionality of the embedded layer that will define the word vectors
negative_samples :
how many negative samples are created per positive sample
neural_mode :
defines the input for the neural net
file :
string which points to the file with the corpus of text
sg_file :
string which points to the location with the skipgrams created with the corpus of text
optimizer :
optimizer which shall be used for the keras neural net
properties:
Properties for the data creator that will be loaded.
This is only needed if neural_mode is set to physical2D
"""
super().__init__(vocab_size,window_size, vector_dim, negative_samples, neural_mode, file, properties)
self.optimizer = optimizer
self.file = file
self.sg_file = sg_file
self.SetupNeuralnet()
def SetupNeuralnet(self):
"""
Creates the neural network by using the keras module.
"""
#print("Setup neural net...")
# create some input variables
input_target = Input((1,))
input_context = Input((1,))
#initialization values are originated in the gensim code
embedding = Embedding(self.vocab_size, self.vector_dim, input_length=1, name='embedding_word') #Create embedding layer
embedding_context = Embedding(self.vocab_size, self.vector_dim, input_length=1, name='embedding_context', embeddings_initializer=keras.initializers.RandomUniform(minval=-0.5/self.vector_dim,maxval=0.5/self.vector_dim)) #extra embedding layer for context
target = embedding(input_target) #calculate the word vector of the target word
target = Reshape((self.vector_dim, 1))(target)
context = embedding_context(input_context) #calculate the word vector of the possible context word
context = Reshape((self.vector_dim, 1))(context)
# now perform the dot product operation to get a similarity measure
dot_product = dot([target, context], axes = 1, normalize = False)
dot_product = Reshape((1,))(dot_product)
output = Activation('sigmoid')(dot_product) #With that approach there is no additional parameter that can be learned
# create the primary training model
model = Model(inputs=[input_target, input_context], outputs=output)
model.compile(loss='binary_crossentropy', optimizer=self.optimizer) #optimizer='SGD' #optimizer='rmsprop'
#create a model which gives back the word_vector representation of the context words
word_vector_model = Model(inputs=[input_context],outputs=context)
self.model = model
self.word_vector_model = word_vector_model
def batchgenerator(self, batch_size):
"""
Generates batch from the skip gram file given at the initialization.
Parameters
----------
batch_size :
Number of skipgram pairs in one batch that is given to the neural net for training
"""
def set_to_zero():
return [np.zeros(batch_size, dtype='int32'), np.zeros(batch_size, dtype='int32'), np.zeros(batch_size, dtype='int32')]
stream = open(self.sg_file, mode='r')
while True:
word_target, word_context, labels = set_to_zero()
act_idx = 0
for idx, line in enumerate(stream):
k = idx - act_idx
word_target[k], word_context[k], labels[k] = line.replace("\n","").split(" ")
if k == batch_size - 1:
yield ([word_target, word_context], labels)
word_target, word_context, labels = set_to_zero()
act_idx = idx
stream.seek(0)
def __number_of_skipgrams_in_file(self):
"""
Counts the number of lines in the skip gram file to determine the number of skipgrams.
"""
stream = open(self.sg_file, mode='r')
length = 0
while stream.readline() != "":
length += 1
stream.close()
return length
def Train(self, epochs, batch_size, initial_epoch=0):
"""
Trains the model by using the keras api.
Parameters
----------
epochs :
Number of final epoch that will be trained.
batch_size :
Number of skipgrams that will be given as one batch for training the neural net
initial_epoch :
Last learned epoch. So for starting learning the value is 0.
"""
number = self.__number_of_skipgrams_in_file()
self.model.fit_generator(self.batchgenerator(batch_size), epochs=epochs,steps_per_epoch = number//batch_size, verbose=1, initial_epoch=initial_epoch)
def get_word_vector (self, word, normalized = True):
"""
Returns the word vector that corresponds to the given word.
Parameters
----------
word :
The word which vector is wanted
normalized :
if =True the word vector will be normalized
"""
in_word = np.zeros(1)
if type(word) == int:
in_word[0] = word
else:
in_word[0] = self.dictionary[word]
vector = np.ndarray.flatten(self.word_vector_model.predict_on_batch(in_word)[0])
if normalized:
vector /= np.linalg.norm(vector)
return vector
def get_word_vectors(self, normalized = False):
"""
Returns a list of all word vectors trained by the neural net.
Attention
---------
The list has not to be in any order that corresponds to the internal dictionary
Parameters
----------
normalized :
if =True the word vectors will be normalized
"""
in_batch = np.array([k for k in range (self.vocab_size)])
vectors = self.word_vector_model.predict_on_batch(in_batch)
return np.squeeze(vectors, axis=2)
def index2word(self, idx):
"""
Returns the word to the given index.
Parameters
----------
idx : index of a word in the internal gensim implementation
"""
return self.reverse_dictionary[idx]
#TODO: Add visalization for 2d systems as in gensim class
def most_similar(self, positive=[], negative=[], number=10):
"""
Gives back the most similar words. Positive words contribute positivly, negative words
negativly. For measuring similarity cosine similarity is used as described in the original
paper.
Parameters
----------
positive :
list of positive words which will be given to the most_similar method
negative :
list of negative words which will be given to the most_similar method
number :
number of most similar words that is given back
"""
vectors = []
for i in positive:
vectors.append(self.get_word_vector(i,normalized=True))
for i in negative:
vectors.append((-1) * self.get_word_vector(i,normalized=True))
if vectors == []:
raise ValueError("cannot compute nearest words with no input")
final_vec = np.mean(np.array(vectors),axis=0)
norm_vec = final_vec / np.linalg.norm(final_vec)
in_batch = np.array([k for k in range (self.vocab_size)])
vectors = self.word_vector_model.predict_on_batch(in_batch)
for v in vectors:
v /= np.linalg.norm(v)
similarity = [[self.reverse_dictionary[k],(np.transpose(vectors[k])@norm_vec)[0]] for k in range(len(vectors)) if self.reverse_dictionary[k] not in positive+negative]
return sorted(similarity,reverse=True, key=tools.takeSecond)[:number]
def most_similar_cosmul(self, positive=[],negative=[],number=10):
"""
Gives back the most similar words. Positive words contribute positivly, negative words
negativly. For measuring similarity the multiplicative combination objective is used,
see <http://www.aclweb.org/anthology/W14-1618>.
Parameters
----------
positive :
list of positive words which will be given to the most_similar method
negative :
list of negative words which will be given to the most_similar method
number :
number of most similar words that is given back
"""
in_batch = np.array([k for k in range (self.vocab_size)])
vectors = self.word_vector_model.predict_on_batch(in_batch)
for v in vectors:
v /= np.linalg.norm(v)
pos_dist, neg_dist = [], []
for i in positive:
pos_dist.append((1+np.dot(np.squeeze(vectors,axis=2), self.get_word_vector(i,normalized=True)))/2)
for i in negative:
neg_dist.append((1+np.dot(np.squeeze(vectors,axis=2), self.get_word_vector(i,normalized=True)))/2)
dist = np.prod(pos_dist,axis=0) / (np.prod(neg_dist, axis=0) + 0.000001)
similarity = [[self.reverse_dictionary[k],dist[k]] for k in range(len(dist)) if self.reverse_dictionary[k] not in positive+negative]
return sorted(similarity,reverse=True, key=tools.takeSecond)[:number]
#TODO: Save the whole model? In order to just go on without again initialisation
def save_model(self, path):
"""
Saves the model at the given path.
"""
self.model.save_weights(path)
def load_model(self, path):
"""
Loads the model with the data given at path.
"""
self.model.load_weights(path)
def make_cum_table(self, domain=2**31 - 1):
"""
Calculates the noise distribution that is used for sampling of negative samples.
The code is adopted from the gensim library. The distribution follows the stated
one in the original paper.
"""
cum_table = np.zeros(self.vocab_size-1, dtype=np.uint32) #in order to ignore UNK -1
# compute sum of all power (Z in paper)
train_words_pow = 0.0
for word_index in range(1,self.vocab_size): #To ignore the UNK start with 1
train_words_pow += self.count[word_index][1]**(0.75)
cumulative = 0.0
for word_index in range(1,self.vocab_size):
cumulative += self.count[word_index][1]**(0.75)
cum_table[word_index-1] = round(cumulative / train_words_pow * domain)
return cum_table
def load_embedding_matrix_from_gensim(self, thing2vecgensim):
"""
Loads the embedding vectors from the gensim model word2vec into the
own model.
"""
def get_matrix(wvmatrix):
wv_matrix = (np.random.rand(self.vocab_size, self.vector_dim) - 0.5) / 5.0
for i in self.reverse_dictionary:
if i >= self.vocab_size:
continue
try:
index = thing2vecgensim.model.wv.vocab[str(thing2vecgensim.dictionary[self.reverse_dictionary[i]])].index
embedding_vector = wvmatrix[index]
# words not found in embedding index will be all-zeros.
wv_matrix[i] = embedding_vector
except:
pass
return wv_matrix
syn1neg = get_matrix(thing2vecgensim.model.trainables.syn1neg)
syn0 = get_matrix(thing2vecgensim.model.wv.vectors)
self.model.set_weights([syn1neg,syn0])
def Generate_skipgrams(self, replace_context=False):
if os.path.isfile(self.sg_file):
print("Skipgram file already exists!")
return None
cum_table = self.make_cum_table()
sentences = self.Thing2String(self.file, self.dictionary, transformation=int)
sampling_table = sequence.make_sampling_table(self.vocab_size)
self.skipgrams_sampled(sentences, self.sg_file, sampling_table=sampling_table, replace_context=replace_context, cum_table=cum_table)
#TODO: This function has to be changed due just taking a file "sequence" which is not data_idx so
# dictionary has to be applied.
def skipgrams_sampled_old(self, sequence, vocabulary_size,
window_size=4, negative_samples=1., shuffle=True,
categorical=False, sampling_table=None, seed=None,
unigram_distribution = True, replace_context=False, count = []):
"""
Generates skipgram word pairs.
Function originally from keras package with added functionality
sampling words with different distances to the original word.
With unigram_distribution the negative samples are sampled due
to a unigram distribution.
replace_context defines wheater the negtive samples are created
by replacing the context word or the "goal" word.
"""
couples = []
labels = []
for i, wi in enumerate(sequence):
if not wi:
continue
if sampling_table is not None:
if sampling_table[wi] < random.random():
continue
reduced_window = random.randint(0,window_size) #Added code
window_start = max(0, i - window_size + reduced_window)
window_end = min(len(sequence), i + window_size + 1 - reduced_window)
for j in range(window_start, window_end):
if j != i:
wj = sequence[j]
if not wj:
continue
couples.append([wi, wj])
if categorical:
labels.append([0, 1])
else:
labels.append(1)
if negative_samples > 0:
num_negative_samples = int(len(labels) * negative_samples)
if replace_context:
words = [c[0] for c in couples]
else:
words = [c[1] for c in couples]
if shuffle:
random.shuffle(words)
if unigram_distribution:
if count == []:
raise ValueError("Need count variable to create unigram distribution")
cum_table = self.make_cum_table(count)
if replace_context:
couples += [[words[i % len(words)],
int(cum_table.searchsorted(random.randint(0,cum_table[-1]))+1)] #+1 because of ignoring UNK, the int lowers memory consumption when saving variable with pickle
for i in range(num_negative_samples)]
else:
couples += [[int(cum_table.searchsorted(random.randint(0,cum_table[-1]))+1), #+1 because of ignoring UNK, the int lowers memory consumption when saving variable with pickle
words[i % len(words)]]
for i in range(num_negative_samples)]
else:
if replace_context:
couples += [[words[i % len(words)],
random.randint(1, vocabulary_size - 1)]
for i in range(num_negative_samples)]
else:
couples += [[random.randint(1, vocabulary_size - 1),
words[i % len(words)]]
for i in range(num_negative_samples)]
if categorical:
labels += [[1, 0]] * num_negative_samples
else:
labels += [0] * num_negative_samples
if shuffle:
if seed is None:
seed = random.randint(0, 10e6)
random.seed(seed)
random.shuffle(couples)
random.seed(seed)
random.shuffle(labels)
return couples, labels
#TODO: One could speedup this procedure by implementing several threads that do the same thing with different sentences
#TODO: Problem: created file needs way too much space on hard drive (around ~40x more than the original file(depending on parameters))
#TODO: For optimizing runtime the Cython compiler could be used https://cython.org/
def skipgrams_sampled(self, sentences, result_file, sampling_table=None, replace_context=False, cum_table = None):
"""
Generates skipgram word pairs and saves them to a file.
Parameters
----------
sentences :
iterable that gives a list of words of the individual sentences
result_file :
the file to which the resulting skipgrams shall be saved
sampling_table :
table for sampling occuring words in sentences. So more frequent words
will be downsampled. Use keras function for creating sampling_table
replace_context :
if true for negative samples the context word will be replaced by a negative sampled word
cum_table :
table for sampling negative samples. Use make_cum_table for generation.
"""
stream = open(result_file, 'w')
logger = None
if type(sentences) == self.Thing2String:
logger = tools.progress_log(sentences.len())
for idx,sentence in enumerate(sentences):
if logger != None:
logger.update_progress(idx)
if sampling_table is not None:
idx_vocabs = [word for word in sentence if word != 0 and sampling_table[word] < random.random()]
else:
idx_vocabs = [word for word in sentence if word != 0]
for pos, word in enumerate(idx_vocabs):
reduced_window = random.randint(0,self.window_size) #Added code
window_start = max(0, pos - self.window_size + reduced_window)
window_end = min(len(idx_vocabs), pos + self.window_size + 1 - reduced_window)
for pos2, word2 in enumerate(idx_vocabs[window_start:window_end],window_start):
if pos2 != pos:
stream.write(str(word) + " " + str(word2) + " 1\n")
for i in range(self.negative_samples):
if type(cum_table) != type(None):
if replace_context:
stream.write(str(word) + " " + str(cum_table.searchsorted(random.randint(0,cum_table[-1]))+1) + " 0\n")
else:
stream.write(str(cum_table.searchsorted(random.randint(0,cum_table[-1]))+1) + " " + str(word2) + " 0\n")
else:
if replace_context:
stream.write(str(word) + " " + str(random.randint(1, self.vocab_size - 1)) + " 0\n")
else:
stream.write(str(random.randint(1, self.vocab_size - 1)) + " " + str(word2) + " 0\n")
stream.close()
if logger != None:
logger.finished()
| StarcoderdataPython |
4814822 | import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import zipfile
def check_call(command, *args, **kwargs):
command = list(command)
print('Launching: ')
for arg in command:
print(' {}'.format(arg))
return subprocess.check_call(command, *args, **kwargs)
def build(artifact, script, root):
temporary_path = tempfile.mkdtemp()
try:
build_path = os.path.join(temporary_path, 'build')
os.mkdir(build_path)
if sys.platform.startswith('darwin'):
pydistutils_cfg = os.path.expanduser(
os.path.join('~', '.pydistutils.cfg'),
)
with open(pydistutils_cfg, 'w') as f:
f.write('[install]\n')
f.write('prefix=\n')
destination_path = os.path.join(temporary_path, 'destination')
os.mkdir(destination_path)
to_install = (
('--requirement', os.path.join(root, 'requirements.txt')),
)
for target in to_install:
check_call(
(
sys.executable,
'-m', 'pip',
'download',
'--no-deps',
'--dest', destination_path,
) + target,
cwd=build_path,
)
check_call(
(
sys.executable,
os.path.join(root, 'setup.py'),
'sdist',
'--dist-dir', destination_path,
),
cwd=root,
)
to_install = [
os.path.join(destination_path, name)
for name in os.listdir(destination_path)
]
check_call(
(
sys.executable,
'-m', 'pip',
'install',
'--target', build_path,
) + tuple(p for p in to_install),
cwd=build_path,
)
shutil.copyfile(
script,
os.path.join(build_path, '__main__.py'),
)
with zipfile.ZipFile(file=str(artifact), mode='w') as zip:
for root, directories, files in os.walk(str(build_path)):
for name in files:
path = os.path.join(root, name)
archive_name = os.path.relpath(
path=path,
start=build_path,
)
zip.write(filename=path, arcname=archive_name)
finally:
shutil.rmtree(build_path)
def main():
this = os.path.abspath(__file__)
here = os.path.dirname(this)
default_artifact_name = 'cipi.pyz'
default_artifact_path = os.path.join(os.getcwd(), default_artifact_name)
build(
artifact=default_artifact_path,
script=os.path.join(here, 'src', 'cipi', '__main__.py'),
root=here,
)
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
5011825 | <reponame>jlconlin/PhDThesis
__id__ = "$Id: powerMC.py 163 2007-10-05 12:35:38Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 163 $"
__date__ = "$Date: 2007-10-05 06:35:38 -0600 (Fri, 05 Oct 2007) $"
import random
import math
import time
import Gnuplot
import scipy.stats
import Markov
import fissionBank
import fissionSource
class powerMC(Markov.Markov):
"""
powerMC performs a simple Monte Carlo Power Method to find the dominant
eigenvalue.
"""
def __init__(self, geo, xs, inactive, active, histories=1000):
"""
geo: Geometry of the simulation
xs: Cross sections for the simulation
inactive: Number of Monte Carlo generations to skip
active: Number of active Monte Carlo generations
histories: Number of histories to run in each cycle
storeVectors: Whether the dominant eigenvector should be stored between
iterations
"""
self.active = active
self.inactive = inactive
Markov.Markov.__init__(self, geo, xs, histories)
def power(self, source):
"""
power is the main method for this algorithm
source: Initial guess of fission source
"""
# Initialize
self.k = 1
self.cycle_k = [] # list of eigenvalues per iteration
self.convergence = []
self.sd = [] # list of standard deviaiton per iterations
self.k_inactive = []
self.vectorStorage = []
self.source = source
start = time.time()
elapsed = 0
totaltime = 0
for i in xrange(1, self.inactive+1):
self.nextBank = fissionBank.fissionBank()
self.transport(self.source)
self.k = self.k*len(self.nextBank)/float(self.histories)
self.k_inactive.append(self.k)
totaltime = time.time()-start
print "iteration: %5i, eigenvalue = %8.6f," %(i, self.k),
print " time: %8.3f sec" %(totaltime)
self.source = self.nextBank
print "------- Starting active cycles -------"
for self.i in xrange(1, self.active+1):
self.nextBank = fissionBank.fissionBank()
self.transport(self.source)
self.k = (self.k*len(self.nextBank)/float(self.histories))
self.cycle_k.append(self.k)
self.convergence.append(scipy.mean(self.cycle_k))
self.sd.append((1/math.sqrt(self.i))*scipy.std(self.cycle_k))
totaltime = time.time()-start
print "iteration: %5i, eigenvalue = %8.6f," %(self.i, self.k),
print " std.dev = %6.4f, time: %8.3f sec" %(
scipy.std(self.convergence), totaltime)
self.source = self.nextBank
Y = fissionSource.histogramSource(self.source,self.geo)
Y = Y/sum(Y)
self.vectorStorage.append(Y)
def _estVar(self):
"""
"""
if self.i > 1:
self.vark = scipy.stats.var(self.convergence)
else:
self.vark = 0.0
def score(self, history):
"""
score provides the functionality for scoring tallies in a Markov
process. This is meant to be overridden by subclasses.
history: Particle to be tracked.
bank: fissionBank where particles are added for next generation
k: Estimate of Multiplication factor
"""
ran = random.random()
N = math.floor(history.weight*((1.0/self.k)*(self.xs.nu*self.xs.xF/self.xs.xT)) + ran)
self.nextBank.append(history, N) # Contribute to fission source
| StarcoderdataPython |
1996376 | <reponame>certik/sympy-oldcore
import sys
sys.path.append("..")
from sympy.numerics import *
from sympy.numerics.utils_ import *
from sympy.numerics.constants import pi_float
import math
from time import clock
def display_fraction(digits, skip=0, colwidth=10, columns=5):
perline = colwidth * columns
printed = 0
for linecount in range((len(digits)-skip) // (colwidth * columns)):
line = digits[skip+linecount*perline:skip+(linecount+1)*perline]
for i in range(columns):
print line[i*colwidth : (i+1)*colwidth],
print ":", (linecount+1)*perline
if (linecount+1) % 10 == 0:
print
printed += colwidth*columns
rem = (len(digits)-skip) % (colwidth * columns)
if rem:
buf = digits[-rem:]
s = ""
for i in range(columns):
s += buf[:colwidth].ljust(colwidth+1, " ")
buf = buf[colwidth:]
print s + ":", printed + colwidth*columns
def calculateit(func, base, n, tofile):
Float.setprec(100)
intpart = small_numeral(int(float(func())), base)
if intpart == 0:
skip = 0
else:
skip = len(intpart)
Float.setprec(int(n*math.log(base,2))+10)
print "Step 1 of 2: calculating binary value..."
t = clock()
a = func()
step1_time = clock() - t
print "Step 2 of 2: converting to specified base..."
t = clock()
d = bin_to_radix(a.man, -a.exp, base, n)
d = fixed_to_str(d, base, n)
step2_time = clock() - t
print "\nWriting output...\n"
if tofile:
out_ = sys.stdout
sys.stdout = tofile
print "%i base-%i digits of pi:\n" % (n, base)
print intpart, ".\n"
display_fraction(d, skip, colwidth=10, columns=5)
if tofile:
sys.stdout = out_
print "\nFinished in %f seconds (%f calc, %f convert)" % \
((step1_time + step2_time), step1_time, step2_time)
def interactive():
print "Compute digits of pi with SymPy\n"
base = input("Which base? (2-36, 10 for decimal) \n> ")
digits = input("How many digits? (enter a big number, say, 10000)\n> ")
tofile = raw_input("Output to file? (enter a filename, or just press enter\nto print directly to the screen) \n> ")
if tofile:
tofile = open(tofile, "w")
global_options["verbose"] = True
global_options["verbose_base"] = base
calculateit(pi_float, base, digits, tofile)
raw_input("\nPress enter to close this script.")
interactive()
| StarcoderdataPython |
5045795 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="soso-event",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="A simple event handling library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/sohailsomani/soso-event",
packages=['soso.event'],
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
python_requires=">=3.8",
zip_safe=False
)
| StarcoderdataPython |
3528947 | <gh_stars>0
#check for divisibilty between two numbers using if-else
number1 = int(input("Enter first number: "))
number2 = int(input("Enter second number: "))
if number1 % number2 == 0:
print(str(number1)+" is divisible by "+str(number2))
else:
print(str(number1)+" is not divisible by "+str(number2))
| StarcoderdataPython |
3538913 | import os, json, pickle
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
np.random.seed(13)
fp = '../../data/SCOTUS/'
case_path = fp+'dvectors/sco50d/'
total_cases = (len(os.listdir(case_path))/2)
train_cases = (total_cases//10)*9
print("# total cases:" , total_cases)
print("# of training:", train_cases)
trn_seq_lst = []
trn_cluster_lst = []
test_seq_lst = []
test_cluster_lst = []
verbose = True
if verbose:
print("\n", "="*50, "\n Processing SVE d-vec")
#load 5 case-embedded dvecs (with directory holding raw files)
i = 0
for case in os.listdir(case_path):
if case[-7:] == 'seq.npy':
case_id = case.split('/')[-1].split('_')[0]
train_sequence = np.load(case_path+case)
train_clus = np.load(case_path+case_id+'_id.npy')
if verbose:
if i > train_cases:
print("-- Stored as test case --")
else:
print("-- Stored as train case --")
print('Processed case:', case_id)
print('emb shape:', np.shape(train_sequence))
print('label shape:', np.shape(train_clus))
#add to training or testing list (for multiple cases
if i < train_cases:
trn_seq_lst.append(train_sequence)
trn_cluster_lst.append(train_clus)
else:
test_seq_lst.append(train_sequence)
test_cluster_lst.append(train_clus)
i+=1
# Only Judge Embeddings
# Training & Test set Generation
judge_seq = []
judge_id = []
test_seq = []
test_id = []
for i, case in enumerate(trn_cluster_lst):
case_seq = []
case_id = []
for j, emb in enumerate(case):
if emb<20:
case_seq.append(trn_seq_lst[i][j])
case_id.append(emb)
judge_seq.append(case_seq)
judge_id.append(case_id)
for i, case in enumerate(test_cluster_lst):
case_seq = []
case_id = []
for j, emb in enumerate(case):
if emb<20:
case_seq.append(test_seq_lst[i][j])
case_id.append(emb)
test_seq.append(case_seq)
test_id.append(case_id)
limit = 30
X = np.concatenate([case for case in judge_seq[:limit]])
Y = np.concatenate([id for id in judge_id[:limit]])
num = len(np.unique(Y))
print('Number of speakers in training set:', num)
#1 test case
test = test_seq[0]
test_lab = test_id[0]
print('-- K-Means --')
print('-- Training --')
model = KMeans(n_clusters=num, random_state=0)
model.fit(X)
print('-- Inference --')
infer = model.predict(test)
if verbose:
print(len(infer), type(infer))
print(len(test), type(test_lab))
print('Visualize Prediction')
print('='*50)
print(test_lab[80:120])
print(infer[80:120])
print('--- Centroid Array ---')
print(np.shape(model.cluster_centers_))
print('-- complete ---') | StarcoderdataPython |
3390467 | """Standard setuptools.
"""
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
README = path.join(here, 'README.txt')
if path.isfile(README):
with open(README) as f:
long_description = f.read()
else:
long_description = ''
setup(
name='django-madcap-flare',
version='0.0.3',
description='Integrate Madcap Flare docs into your Django project',
long_description=long_description,
url='https://github.com/mypebble/django-madcap-flare',
author='Pebble',
author_email='<EMAIL>',
license='MIT',
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='django madcap flare web development',
packages=find_packages(),
install_requires=['django'],
extras_require={
'dev': [],
'test': [],
},
)
| StarcoderdataPython |
1806939 | <gh_stars>0
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import warnings
import requests
from ...session import new_session
from .config import MarsSchedulerSpecConfig, MarsWorkerSpecConfig, MarsWebSpecConfig, \
MarsJobConfig
try:
from kubernetes.client.rest import ApiException as K8SApiException
except ImportError: # pragma: no cover
K8SApiException = None
KUBEDL_API_VERSION = 'kubedl.io/v1alpha1'
KUBEDL_MARS_PLURAL = 'marsjobs'
class KubeDLClusterClient:
def __init__(self, cluster):
self._cluster = cluster
self._endpoint = None
self._session = None
@property
def endpoint(self):
return self._endpoint
@property
def namespace(self):
return self._cluster.namespace
@property
def session(self):
return self._session
def start(self):
self._endpoint = self._cluster.start()
self._session = new_session(self._endpoint, verify_ssl=self._cluster.verify_ssl).as_default()
def stop(self, wait=False, timeout=0):
self._cluster.stop(wait=wait, timeout=timeout)
class KubeDLCluster:
def __init__(self, kube_api_client=None, image=None, job_name=None, namespace=None,
scheduler_num=1, scheduler_cpu=None, scheduler_mem=None,
worker_num=1, worker_cpu=None, worker_mem=None, worker_spill_paths=None,
worker_cache_mem=None, min_worker_num=None,
web_num=1, web_cpu=None, web_mem=None,
slb_endpoint=None, verify_ssl=True, timeout=None, **kwargs):
from kubernetes import client as kube_client
self._kube_api_client = kube_api_client
self._custom_api = kube_client.CustomObjectsApi(kube_api_client)
self._slb_endpoint = slb_endpoint.rstrip("/")
self._verify_ssl = verify_ssl
self._job_name = job_name
self._mars_endpoint = None
self._namespace = namespace or 'default'
self._image = image
self._timeout = timeout
self._extra_volumes = kwargs.pop('extra_volumes', ())
self._pre_stop_command = kwargs.pop('pre_stop_command', None)
self._log_when_fail = kwargs.pop('log_when_fail', False)
self._node_selectors = kwargs.pop('node_selectors', None)
extra_modules = kwargs.pop('extra_modules', None) or []
extra_modules = extra_modules.split(',') if isinstance(extra_modules, str) \
else extra_modules
extra_envs = kwargs.pop('extra_env', None) or dict()
def _override_modules(updates):
modules = set(extra_modules)
updates = updates.split(',') if isinstance(updates, str) \
else updates
modules.update(updates)
return sorted(modules)
def _override_envs(updates):
ret = extra_envs.copy()
ret.update(updates)
return ret
self._scheduler_num = scheduler_num
self._scheduler_cpu = scheduler_cpu
self._scheduler_mem = scheduler_mem
self._scheduler_extra_modules = _override_modules(kwargs.pop('scheduler_extra_modules', []))
self._scheduler_extra_env = _override_envs(kwargs.pop('scheduler_extra_env', None) or dict())
self._worker_num = worker_num
self._worker_cpu = worker_cpu
self._worker_mem = worker_mem
self._worker_spill_paths = worker_spill_paths
self._worker_cache_mem = worker_cache_mem
self._min_worker_num = min_worker_num or worker_num
self._worker_extra_modules = _override_modules(kwargs.pop('worker_extra_modules', []))
self._worker_extra_env = _override_envs(kwargs.pop('worker_extra_env', None) or dict())
self._web_num = web_num
self._web_cpu = web_cpu
self._web_mem = web_mem
self._web_extra_modules = _override_modules(kwargs.pop('web_extra_modules', []))
self._web_extra_env = _override_envs(kwargs.pop('web_extra_env', None) or dict())
@property
def verify_ssl(self):
return self._verify_ssl
def _create_service(self):
scheduler_cfg = MarsSchedulerSpecConfig(
self._image, self._scheduler_num, cpu=self._scheduler_cpu, memory=self._scheduler_mem,
node_selectors=self._node_selectors, modules=self._scheduler_extra_modules,
)
scheduler_cfg.add_simple_envs(self._scheduler_extra_env)
worker_cfg = MarsWorkerSpecConfig(
self._image, self._worker_num, cpu=self._worker_cpu, memory=self._worker_mem,
cache_mem=self._worker_cache_mem, spill_dirs=self._worker_spill_paths,
node_selectors=self._node_selectors, modules=self._worker_extra_modules
)
worker_cfg.add_simple_envs(self._worker_extra_env)
web_cfg = MarsWebSpecConfig(
self._image, self._web_num, cpu=self._web_cpu, memory=self._web_mem,
node_selectors=self._node_selectors, modules=self._web_extra_modules
)
web_cfg.add_simple_envs(self._web_extra_env)
job_cfg = MarsJobConfig(
job_name=self._job_name, scheduler_config=scheduler_cfg, worker_config=worker_cfg,
web_config=web_cfg, web_host=self._slb_endpoint
)
api, version = KUBEDL_API_VERSION.rsplit('/', 1)
cfg_json = job_cfg.build()
cfg_json['apiVersion'] = KUBEDL_API_VERSION
response = self._custom_api.create_namespaced_custom_object(
api, version, self._namespace, KUBEDL_MARS_PLURAL, cfg_json)
self._job_name = response['metadata']['name']
def _wait_service_ready(self):
self._mars_endpoint = f'{self._slb_endpoint}/mars/{self._namespace}/{self._job_name}-webservice-0'
check_start_time = time.time()
worker_count_url = self._mars_endpoint + '/api/worker?action=count'
while True:
try:
if self._timeout and time.time() - check_start_time > self._timeout:
raise TimeoutError('Check Mars service start timeout')
if not self._verify_ssl:
try:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError: # pragma: no cover
pass
api, version = KUBEDL_API_VERSION.rsplit('/', 1)
service_obj = self._custom_api.get_namespaced_custom_object_status(
api, version, self._namespace, KUBEDL_MARS_PLURAL, self._job_name)
if len(service_obj.get('status', dict()).get('conditions', [])) > 0:
if service_obj['status']['conditions'][-1]['type'] == 'Failed':
raise SystemError(service_obj['status']['conditions'][-1]['message'])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='Unverified HTTPS request')
resp = requests.get(worker_count_url, timeout=1, verify=self._verify_ssl)
if int(resp.text) >= self._min_worker_num:
break
except (requests.Timeout, ValueError) as ex:
if not isinstance(ex, requests.Timeout):
time.sleep(0.1)
pass
def start(self):
try:
self._create_service()
self._wait_service_ready()
return self._mars_endpoint
except: # noqa: E722
self.stop()
raise
def stop(self, wait=False, timeout=0):
from kubernetes import client as kube_client
custom_api = kube_client.CustomObjectsApi(self._kube_api_client)
api, version = KUBEDL_API_VERSION.rsplit('/', 1)
custom_api.delete_namespaced_custom_object(
api, version, self._namespace, KUBEDL_MARS_PLURAL, self._job_name)
if wait:
start_time = time.time()
while True:
try:
custom_api.get_namespaced_custom_object(
api, version, self._namespace, KUBEDL_MARS_PLURAL, self._job_name)
except K8SApiException as ex:
if ex.status != 404: # pragma: no cover
raise
break
else:
time.sleep(1)
if timeout and time.time() - start_time > timeout: # pragma: no cover
raise TimeoutError('Check Mars service stop timeout')
def new_cluster(kube_api_client=None, image=None, scheduler_num=1, scheduler_cpu=2,
scheduler_mem=4 * 1024 ** 3, worker_num=1, worker_cpu=8, worker_mem=32 * 1024 ** 3,
worker_spill_paths=None, worker_cache_mem='45%', min_worker_num=None,
web_num=1, web_cpu=1, web_mem=4 * 1024 ** 3, slb_endpoint=None, verify_ssl=True,
job_name=None, timeout=None, **kwargs):
worker_spill_paths = worker_spill_paths or ['/tmp/spill-dir']
cluster = KubeDLCluster(kube_api_client, image=image, scheduler_num=scheduler_num,
scheduler_cpu=scheduler_cpu, scheduler_mem=scheduler_mem,
worker_num=worker_num, worker_cpu=worker_cpu, worker_mem=worker_mem,
worker_spill_paths=worker_spill_paths, worker_cache_mem=worker_cache_mem,
min_worker_num=min_worker_num, web_num=web_num, web_cpu=web_cpu,
web_mem=web_mem, slb_endpoint=slb_endpoint, verify_ssl=verify_ssl,
job_name=job_name, timeout=timeout, **kwargs)
client = KubeDLClusterClient(cluster)
client.start()
return client
| StarcoderdataPython |
4924732 | # Tries: 1
rows = int(input())
boards = []
board = ""
for i in range(4 * rows + 3):
line = input()
if not line:
boards.append(board)
board = ""
else:
board += line
boards.append(board)
def difference(board_in, type=0):
to_return = 0
for i in board_in:
to_return += int(i != str(type))
type = (type + 1) % 2
return to_return
zero_board_diff = [difference(i, 0) for i in boards]
one_board_diff = [rows ** 2 - i for i in zero_board_diff]
possibilities = []
for i in range(4):
for j in range(4):
if i == j:
continue
ones = [0, 1, 2, 3]
ones.remove(i)
ones.remove(j)
possibilities.append(zero_board_diff[i] +
zero_board_diff[j] +
one_board_diff[ones[0]] +
one_board_diff[ones[1]]
)
print(sorted(possibilities)[0])
| StarcoderdataPython |
4044 | from keras.callbacks import ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard
from keras.models import load_model
import random
import numpy as np
from scipy import misc
import gc
from keras.optimizers import Adam
from imageio import imread
from datetime import datetime
import os
import json
import models
from utils import DataLoader, LrPolicy
from config import Config
import argparse
def get_parser():
parser = argparse.ArgumentParser('train')
parser.add_argument('--configPath', '-c', required=True)
return parser
def train(args=None):
parser = get_parser()
args = parser.parse_args(args)
conf=Config()
conf.load(args.configPath)
time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
trainString="%s_%s_%s_%s" % (conf.model,conf.optimizer,str(conf.lr),time)
os.makedirs(conf.logPath+"/"+trainString)
conf.save(conf.logPath+"/"+trainString+'/config.json')
print('Compiling model...')
model_checkpoint = ModelCheckpoint(conf.logPath+"/"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True)
change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay)
tbCallBack=TensorBoard(log_dir=conf.logPath+"/"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True)
model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel)
model.compile(optimizer = conf.optimizer, loss = conf.loss)
data = [conf.trainDataPath+"/"+f for f in os.listdir(conf.trainDataPath) if '.jpg' in f]
random.shuffle(data)
thr=int(len(data)*conf.validationSplit)
trainData=data[thr:]
valData=data[:thr]
trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue)
validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue)
print('Fitting model...')
model.fit_generator(generator=trainDataLoader.generator(),
validation_data=validationDataLoader.generator(),
steps_per_epoch=len(trainData)//conf.batchSize,
validation_steps=len(valData)//conf.batchSize,
epochs=conf.epoches,
verbose=1,
initial_epoch=0,
callbacks = [model_checkpoint, change_lr,tbCallBack]
)
if __name__ == "__main__":
train()
| StarcoderdataPython |
3597684 | """ Tests for CLI doctrans subparser (__main__.py) """
from os import path
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import patch
from cdd.tests.utils_for_tests import mock_function, run_cli_test, unittest_main
class TestCliDocTrans(TestCase):
"""Test class for __main__.py"""
def test_doctrans_fails_with_wrong_args(self) -> None:
"""Tests CLI interface wrong args failure case"""
run_cli_test(
self,
["doctrans", "--wrong"],
exit_code=2,
output="the following arguments are required: --filename, --format\n",
)
def test_doctrans_fails_with_file_missing(self) -> None:
"""Tests CLI interface file missing failure case"""
with patch("cdd.__main__.doctrans", mock_function):
self.assertTrue(
run_cli_test(
self,
[
"doctrans",
"--filename",
"foo",
"--format",
"google",
"--no-type-annotations",
],
exit_code=2,
output="--filename must be an existent file. Got: 'foo'\n",
),
)
def test_doctrans_succeeds(self) -> None:
"""Tests CLI interface gets all the way to the doctrans call without error"""
with TemporaryDirectory() as tempdir:
filename = path.join(tempdir, "foo")
open(filename, "a").close()
with patch("cdd.__main__.doctrans", mock_function):
self.assertTrue(
run_cli_test(
self,
[
"doctrans",
"--filename",
filename,
"--format",
"numpydoc",
"--type-annotations",
],
exit_code=None,
output=None,
),
)
unittest_main()
| StarcoderdataPython |
4955819 | import unittest
from geopar.tf_validator import TF_Validator
from geopar.triangulated_figure_class import TriangulatedFigure
from geopar.triangle_class import Triangle
__author__ = 'satbek'
# URL1:
# https://docs.google.com/presentation/d/1nddxo9JPaoxz-Colod8qd6Yuj_k7LXhBfO3JlVSYXrE/edit?usp=sharing
# URL 2:
# https://docs.google.com/presentation/d/1nddxo9JPaoxz-Colod8qd6Yuj_k7LXhBfO3JlVSYXrE/edit#slide=id.g13a65b87db_0_0
class TestTFValidator(unittest.TestCase):
def setUp(self):
self.validator = TF_Validator()
self.tf_empty = TriangulatedFigure()
# TriangulatedFigure tf1 consists of seven Triangles t1-t7
# Appearance: URL1 at the top
self.t1 = Triangle([1, 2, 5], [20, 10, 150])
self.t2 = Triangle([5, 2, 6], [80, 10, 90])
self.t3 = Triangle([6, 2, 3], [140, 10, 30])
self.t4 = Triangle([4, 6, 3], [80, 70, 30])
self.t5 = Triangle([1, 4, 3], [20, 130, 30])
self.t6 = Triangle([1, 5, 4], [20, 70, 90])
self.t7 = Triangle([4, 5, 6], [60, 60, 60])
self.tf1 = TriangulatedFigure()
self.tf1.add(self.t1)
self.tf1.add(self.t2)
self.tf1.add(self.t3)
self.tf1.add(self.t4)
self.tf1.add(self.t5)
self.tf1.add(self.t6)
self.tf1.add(self.t7)
# TriangulatedFigure tf_simple consists of 1 triangle t_simple
# Appearance: URL2 at the top
self.t_simple = Triangle([1,2,3],[50,70,60])
self.tf_simple = TriangulatedFigure([self.t_simple])
def test_rule_180(self):
self.assertTrue(self.validator.check_180_rule(self.tf1))
def test_rule_360(self):
self.assertTrue(self.validator.check_360_rule(self.tf1))
def test_rule_pairing(self):
self.assertTrue(self.validator.check_pairing(self.tf1))
with self.assertRaises(Exception):
self.validator.check_180_rule(self.tf_empty)
| StarcoderdataPython |
11359 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.common.tensorio import compare_tensor
from tests.common.test_op import triangle
from akg.utils import kernel_exec as utils
from tests.common.gen_random import random_gaussian
def triangle_execute(shape, const_value, lower, dtype, attrs):
support_type = ['float16', 'float32']
assert dtype in support_type
assert len(shape) <= 2
if attrs is None:
attrs = {'enable_pre_poly_loop_partition': False}
attrs['enable_pre_poly_loop_partition'] = False
attrs['enable_post_poly_loop_partition'] = False
attrs['enable_convert_if'] = True
attrs['enable_double_buffer'] = False
output_shape = shape
if len(shape) == 1:
output_shape = [shape[0], shape[0]]
input, bench_mark = gen_data(shape, output_shape, const_value, lower, dtype)
op_attrs = [const_value, lower]
mod = triangle_compile(shape, dtype, op_attrs, attrs)
source_code = mod.imported_modules[0].get_source()
output = np.full(output_shape, np.nan, dtype)
output = utils.mod_launch(mod, (input, output), expect=bench_mark)
# compare result
compare_result = compare_tensor(output, bench_mark, rtol=5e-3, equal_nan=True)
return input, output, bench_mark, compare_result
def triangle_compile(shape, dtype, op_attrs, attrs):
return utils.op_build_test(triangle.triangle, [shape], [dtype], op_attrs, kernel_name='triangle', attrs=attrs)
def gen_data(shape, output_shape, const_value, lower, dtype):
input = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
if len(shape) == 2:
bench_mark = input
else:
bench_mark = np.zeros(output_shape).astype(dtype)
for i in range(output_shape[0]):
bench_mark[i] = input
if lower:
for i in range(output_shape[0]):
bench_mark[i][i + 1:] = const_value
else:
for i in range(output_shape[0]):
bench_mark[i][:i] = const_value
return input, bench_mark
| StarcoderdataPython |
292432 | <filename>compyler/warning.py
#!/usr/bin/python3
# 2019-1-27
# <NAME>
import sys
from termcolor import colored
class Warning:
def __init__(self, module, message, line=None, pos=None):
self.module = module
self.message = message
self.line = line
self.pos = pos
self.warn()
def warn(self):
if self.module is 'main':
print(f"{colored(f'Warning in {self.module}', 'yellow')}: {self.message}")
# Terminate the program
sys.exit()
else:
if self.line is None or self.pos is None:
print(f"{colored(f'Warning in {self.module}', 'yellow')}: {self.message}")
else:
print(f"{colored(f'Warning in {self.module}', 'yellow')}: {self.message} at line:{self.line} col:{self.pos}") | StarcoderdataPython |
1988013 | """Test reading and writing of program data."""
import unittest
from core import *
from google.appengine.ext import db
from named import QualtricsLink
import unit_test_helper
class QualtricsTestCase(unit_test_helper.PopulatedInconsistentTestCase):
def test_stuff(self):
"""Test that QualtricsLink.get_link catches inconsistent results
proving that get_link() handles inconsistency and doesn't return
deleted (already used and thus non-unique) links"""
# Create two links and put them to the db
a = QualtricsLink.create(key_name='test_a', link='test_a',
session_ordinal=1)
b = QualtricsLink.create(key_name='test_b', link='test_b',
session_ordinal=1)
a.put()
b.put()
# db.get by key forces consistency
a, b = db.get([a.key(), b.key()])
# delete link a to put db into inconsistent state.
db.delete(a)
# get_link pulls in alphabetical order by link, so
# a is retrieved first, then b.
b_link = QualtricsLink.get_link(1)
self.assertEquals(b_link, 'test_b')
| StarcoderdataPython |
5123436 | <reponame>fitahol/fitahol
# coding=utf-8
from django.contrib import admin
from django.template.defaultfilters import truncatechars_html
from fitness.models import GoalRecord, InBodyRecords, FitGoal
from fitness.models import FitnessEquipment, FitnessExercise, FitnessPicture, \
FitnessVideo, ExerciseCategory, Muscle, ExerciseRecord
class GoalRecordAdmin(admin.ModelAdmin):
list_display = ("id", "user", "cdate", "fit_goal", "current", "ctime")
admin.site.register(GoalRecord, GoalRecordAdmin)
class InBodyRecordsAdmin(admin.ModelAdmin):
list_display = ("id", "cdate", "weight", "metabolism", "body_fat",
"fat_weight", "skeletal_muscle", "chest", "arm",
"upper_arm", "waistline", "thigh", "crus")
admin.site.register(InBodyRecords, InBodyRecordsAdmin)
class FitnessEquipmentAdmin(admin.ModelAdmin):
list_display = ("id", "name", "desc", "ctime")
admin.site.register(FitnessEquipment, FitnessEquipmentAdmin)
class FitnessExerciseAdmin(admin.ModelAdmin):
list_display = ("id", "name", "icon_img", "cut_desc", "category", "ctime")
search_fields = ("name", "desc")
list_filter = ("category", )
def cut_desc(self, obj):
return truncatechars_html(obj.desc, 30)
cut_desc.short_description = u'内容介绍'
cut_desc.allow_tags = True
def icon_img(self, obj):
return '<img src="%s" alt="icon img" />' % obj.icon.url
icon_img.short_description = u"封面图标"
icon_img.allow_tags = True
admin.site.register(FitnessExercise, FitnessExerciseAdmin)
class FitnessPictureAdmin(admin.ModelAdmin):
list_display = ("id", "picture", "ctime")
admin.site.register(FitnessPicture, FitnessPictureAdmin)
class FitnessVideoAdmin(admin.ModelAdmin):
list_display = ("id", "video", "ctime")
admin.site.register(FitnessVideo, FitnessVideoAdmin)
class MuscleAdmin(admin.ModelAdmin):
list_display = ("id", "name", "en_name", "image_show")
def image_show(self, obj):
return '<img src="%s" alt="icon img" width="100px" />' % obj.image.url
image_show.short_description = u"封面图标"
image_show.allow_tags = True
admin.site.register(Muscle, MuscleAdmin)
class ExerciseCategoryAdmin(admin.ModelAdmin):
list_display = ("id", "name", "en_name", "icon", "user", "ctime")
admin.site.register(ExerciseCategory, ExerciseCategoryAdmin)
class FitGoalAdmin(admin.ModelAdmin):
list_display = ("id", "user", "name", "desc", "is_public", "goal", "ctime")
admin.site.register(FitGoal, FitGoalAdmin)
class ExerciseRecordAdmin(admin.ModelAdmin):
list_display = ("id", "user", "event", "exercise", "value", "number", "ctime")
admin.site.register(ExerciseRecord, ExerciseRecordAdmin)
| StarcoderdataPython |
4910813 | import math
import json
import glob
import string
import os
import pprint
#having trouble with python packages not working, (nltk, numpy, metapy, pandas). Suspect Big Sur compatibility issues. Could also be a file path issue
"""term_vector_rec.py
Module Overview
- Static Inputs: (recipes Corpus, list of user-relevant doc IDs)
- Output: (top k-rated document IDs)
Workflow
- cosine()
- recommender(seen,unseen, k)
seen is the list of documents that describe a user
unseen are all other documents in the Corpus
sorted list(IDs) <- the k top-rated unseen documents to recommend based on cosine similarity to seen documents
"""
def cosine(seen_doc, unseen_doc):
""" cosine(weighted_a, weighted_b) -> float value of cosine similarity of two input vectors
seen_doc = dictionary that is a BM25-weighted document term vector
unseen_doc = dictionary that is a BM25-weighted document term vector
"""
similarity = 0
for w,score in seen_doc.items():
if w in unseen_doc:
similarity += score * unseen_doc[w]
return similarity
def recommender(seen, unseen, k):
""" recommender(seen,unseen) -> list of top-k scores stored as [{ID: score},{ID:score},...]
seen = dict of documents that describe a user's preferences
unseen = dict of all other documents in the Corpus
k = number of recommendations
"""
top_scores = []
recommendations = []
for i in range(k):
top_scores.append(-1)
recommendations.append({"ID":0})
for seen_key,seen_doc in seen.items():
for unseen_key,unseen_doc in unseen.items():
score = cosine(seen_doc,unseen_doc)
i = -1
while (score > top_scores[i]) and (abs(i) <= k):
if abs(i) == k:
top_scores[i] = score
recommendations[i] = {unseen_key:score}
break
elif top_scores[i-1] > score:
top_scores[i] = score
recommendations[i] = {unseen_key:score}
break
else:
i = i - 1
return recommendations
def main():
#TODO(Jon) Figure out how to Pass seen_id_vec dynamically or else hardcode for each recommendation run
#These are ten relevant documents I retreived from ES queries
a = "XetGJ5Ol0bwPahBTuG3gWCTPWU0CDQ"
b = "YAQgjmB48uiqScqstzl/hjVtNAB9pPy"
c = "NtnYhnSlP9xOxjI6WPFI7Lv1wBYOCEq"
d = "yI2gS/CB4Usl4uVsuNMUHisdvJ6lXWW"
e = "<KEY>"
f = "<KEY>"
g = "<KEY>"
h = "ivbCqdV1TE31XZCewCZqi4JM3lDa3EK"
i = "nO98G0dYo2PTel/<KEY>"
j = "<KEY>"
#set parameters Beta and Kappa for BM25, and top-K number of recommendations to return
seen_id_vec = [a,b,c,d,e,f,g,h,i,j]
seen_docs = {}
unseen_docs = {}
top_k = 5
#TODO(Jon) Load up the json file
FILE_PATH = '/Users/jon/recipes-search-engine/dataset/associated/weighted_doc_term_vecs.json'
with open(FILE_PATH, 'r') as doc_open:
doc_vector = json.load(doc_open)
#Split document Corpus into seen and unseen document collections
for id in seen_id_vec:
seen_docs[id] = (doc_vector.pop(id,None))
unseen_docs = doc_vector
#Compares document similarity to user-preferences to create list of recommendations
print("Finding Relevant Documents...\n")
recommendations = recommender(seen_docs,unseen_docs,top_k)
#display the top-k ranked recipe recommendations (as IDs)
print('User Recommendations IDs with scores:\n')
for item in recommendations:
print(item)
#convert list to dict
rec_dict = {}
for item in recommendations:
rec_dict.update(item)
#TODO(Jon) Solve known issue with file format of data
#load up the data to find recipes
inputs = {}
outputs = {}
DATASET_LOC_A = '/Users/jon/recipes-search-engine/dataset/recipes_raw_nosource_ar.json'
DATASET_LOC_B = '/Users/jon/recipes-search-engine/dataset/recipes_raw_nosource_epi.json'
with open(DATASET_LOC_A) as file_a:
with open(DATASET_LOC_B) as file_b:
data_a = json.load(file_a)
data_a = json.load(file_b)
for item in seen_id_vec:
if item in data_a:
inputs[item] = data_a[item]
elif item in data_b:
inputs[item] = data_b[item]
for key,value in rec_dict.items():
if key in data_a:
outputs[key] = data_a[key]
elif key in data_b:
outputs[key] = data_b[key]
pp = pprint.PrettyPrinter(indent=4)
print('----------------- liked RECIPES --------------------')
pp.pprint(inputs)
print('\n----------------- Recommended RECIPES --------------------\n')
pp.pprint(outputs)
if __name__ == "__main__":
main() | StarcoderdataPython |
1950653 | <filename>logquacious/tests/test_backport_configurable_stacklevel.py<gh_stars>10-100
import logging
from unittest import TestCase
import pytest
from logquacious.backport_configurable_stacklevel import PatchedLoggerMixin
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class TestPatchedLoggerMixin:
def test_error_raised_if_no_logger_attribute(self):
class LogManager(PatchedLoggerMixin):
pass
manager = LogManager()
with pytest.raises(AttributeError):
with manager.temp_monkey_patched_logger():
pass
class TestPatchedLoggerMixinLogging(PatchedLoggerMixin, TestCase):
"""TestCase for PatchedLoggerMixin adapted from cpython logger tests.
Adapted from `LoggerTest` class of cpython logger tests.
See https://github.com/python/cpython/blob/master/Lib/test/test_logging.py
"""
def setUp(self):
self.logger = logging.Logger(name='test')
self.recording = RecordingHandler()
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_find_caller_with_stacklevel(self):
"""Test of PatchedLoggerMixin adapted from cpython logger tests.
See https://github.com/python/cpython/pull/7424
"""
the_level = 1
def innermost():
with self.temp_monkey_patched_logger():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
record_1 = records[-1]
assert record_1.funcName == 'innermost'
the_level = 2
outer()
stacklevel_2 = records[-1]
assert stacklevel_2.funcName == 'inner'
assert stacklevel_2.lineno > record_1.lineno
the_level = 3
outer()
stacklevel_3 = records[-1]
assert stacklevel_3.funcName == 'outer'
assert stacklevel_3.lineno > stacklevel_2.lineno
the_level = 4
outer()
stacklevel_4 = records[-1]
assert stacklevel_4.funcName == 'test_find_caller_with_stacklevel'
assert stacklevel_4.lineno > stacklevel_3.lineno
| StarcoderdataPython |
4954112 | from __future__ import print_function
from builtins import range
import json
import random
import time
import itertools
from ethereum import utils
from ethereum.utils import parse_as_bin, big_endian_to_int, is_string
from ethereum.meta import apply_block
from ethereum.common import update_block_env_variables
from ethereum.messages import apply_transaction
import rlp
from rlp.utils import encode_hex
from ethereum.exceptions import InvalidNonce, InsufficientStartGas, UnsignedTransaction, \
BlockGasLimitReached, InsufficientBalance, InvalidTransaction, VerificationFailed
from ethereum.slogging import get_logger, configure_logging
from ethereum.config import Env
from ethereum.state import State, dict_to_prev_header
from ethereum.block import Block, BlockHeader, BLANK_UNCLES_HASH, FakeHeader
from ethereum.pow.consensus import initialize
from ethereum.genesis_helpers import mk_basic_state, state_from_genesis_declaration, \
initialize_genesis_keys
from ethereum.db import RefcountDB
log = get_logger('eth.chain')
config_string = ':info' #,eth.chain:debug'
#config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
configure_logging(config_string=config_string)
class Chain(object):
def __init__(self, genesis=None, env=None,
new_head_cb=None, reset_genesis=False, localtime=None, max_history=1000, **kwargs):
self.env = env or Env()
# Initialize the state
if b'head_hash' in self.db: # new head tag
self.state = self.mk_poststate_of_blockhash(
self.db.get('head_hash'))
self.state.executing_on_head = True
print('Initializing chain from saved head, #%d (%s)' %
(self.state.prev_headers[0].number, encode_hex(self.state.prev_headers[0].hash)))
elif genesis is None:
raise Exception("Need genesis decl!")
elif isinstance(genesis, State):
assert env is None
self.state = genesis
self.env = self.state.env
print('Initializing chain from provided state')
reset_genesis = True
elif "extraData" in genesis:
self.state = state_from_genesis_declaration(
genesis, self.env, executing_on_head=True)
reset_genesis = True
print('Initializing chain from provided genesis declaration')
elif "prev_headers" in genesis:
self.state = State.from_snapshot(
genesis, self.env, executing_on_head=True)
reset_genesis = True
print('Initializing chain from provided state snapshot, %d (%s)' %
(self.state.block_number, encode_hex(self.state.prev_headers[0].hash[:8])))
elif isinstance(genesis, dict):
print('Initializing chain from new state based on alloc')
self.state = mk_basic_state(genesis, {
"number": kwargs.get('number', 0),
"gas_limit": kwargs.get('gas_limit', self.env.config['BLOCK_GAS_LIMIT']),
"gas_used": kwargs.get('gas_used', 0),
"timestamp": kwargs.get('timestamp', 1467446877),
"difficulty": kwargs.get('difficulty', 2**25),
"hash": kwargs.get('prevhash', '00' * 32),
"uncles_hash": kwargs.get('uncles_hash', '0x' + encode_hex(BLANK_UNCLES_HASH))
}, self.env)
reset_genesis = True
assert self.env.db == self.state.db
initialize(self.state)
self.new_head_cb = new_head_cb
if self.state.block_number == 0:
assert self.state.block_number == self.state.prev_headers[0].number
else:
assert self.state.block_number - 1 == self.state.prev_headers[0].number
if reset_genesis:
if isinstance(self.state.prev_headers[0], FakeHeader):
header = self.state.prev_headers[0].to_block_header()
else:
header = self.state.prev_headers[0]
self.genesis = Block(header)
self.state.prev_headers[0] = header
initialize_genesis_keys(self.state, self.genesis)
else:
self.genesis = self.get_block_by_number(0)
self.head_hash = self.state.prev_headers[0].hash
self.time_queue = []
self.parent_queue = {}
self.localtime = time.time() if localtime is None else localtime
self.max_history = max_history
# Head (tip) of the chain
@property
def head(self):
try:
block_rlp = self.db.get(self.head_hash)
if block_rlp == b'GENESIS':
return self.genesis
else:
return rlp.decode(block_rlp, Block)
except Exception as e:
log.error(e)
return None
# Returns the post-state of the block
def mk_poststate_of_blockhash(self, blockhash):
if blockhash not in self.db:
raise Exception("Block hash %s not found" % encode_hex(blockhash))
block_rlp = self.db.get(blockhash)
if block_rlp == b'GENESIS':
return State.from_snapshot(json.loads(
self.db.get(b'GENESIS_STATE')), self.env)
block = rlp.decode(block_rlp, Block)
state = State(env=self.env)
state.trie.root_hash = block.header.state_root
update_block_env_variables(state, block)
state.gas_used = block.header.gas_used
state.txindex = len(block.transactions)
state.recent_uncles = {}
state.prev_headers = []
b = block
header_depth = state.config['PREV_HEADER_DEPTH']
for i in range(header_depth + 1):
state.prev_headers.append(b.header)
if i < 6:
state.recent_uncles[state.block_number - i] = []
for u in b.uncles:
state.recent_uncles[state.block_number - i].append(u.hash)
try:
b = rlp.decode(state.db.get(b.header.prevhash), Block)
except BaseException:
break
if i < header_depth:
if state.db.get(b.header.prevhash) == b'GENESIS':
jsondata = json.loads(state.db.get(b'GENESIS_STATE'))
for h in jsondata["prev_headers"][:header_depth - i]:
state.prev_headers.append(dict_to_prev_header(h))
for blknum, uncles in jsondata["recent_uncles"].items():
if int(blknum) >= state.block_number - \
int(state.config['MAX_UNCLE_DEPTH']):
state.recent_uncles[blknum] = [
parse_as_bin(u) for u in uncles]
else:
raise Exception("Dangling prevhash")
assert len(state.journal) == 0, state.journal
return state
# Gets the parent block of a given block
def get_parent(self, block):
if block.header.number == int(self.db.get(b'GENESIS_NUMBER')):
return None
return self.get_block(block.header.prevhash)
# Gets the block with a given blockhash
def get_block(self, blockhash):
try:
block_rlp = self.db.get(blockhash)
if block_rlp == b'GENESIS':
if not hasattr(self, 'genesis'):
self.genesis = rlp.decode(
self.db.get(b'GENESIS_RLP'), sedes=Block)
return self.genesis
else:
return rlp.decode(block_rlp, Block)
except Exception as e:
log.debug("Failed to get block", hash=blockhash, error=e)
return None
# Add a record allowing you to later look up the provided block's
# parent hash and see that it is one of its children
def add_child(self, child):
try:
existing = self.db.get(b'child:' + child.header.prevhash)
except BaseException:
existing = b''
existing_hashes = []
for i in range(0, len(existing), 32):
existing_hashes.append(existing[i: i + 32])
if child.header.hash not in existing_hashes:
self.db.put(
b'child:' + child.header.prevhash,
existing + child.header.hash)
# Gets the hash of the block with the given block number
def get_blockhash_by_number(self, number):
try:
return self.db.get(b'block:%d' % number)
except BaseException:
return None
# Gets the block with the given block number
def get_block_by_number(self, number):
return self.get_block(self.get_blockhash_by_number(number))
# Get the hashes of all known children of a given block
def get_child_hashes(self, blockhash):
o = []
try:
data = self.db.get(b'child:' + blockhash)
for i in range(0, len(data), 32):
o.append(data[i:i + 32])
return o
except BaseException:
return []
# Get the children of a block
def get_children(self, block):
if isinstance(block, Block):
block = block.header.hash
if isinstance(block, BlockHeader):
block = block.hash
return [self.get_block(h) for h in self.get_child_hashes(block)]
# Get the score (AKA total difficulty in PoW) of a given block
def get_score(self, block):
if not block:
return 0
key = b'score:' + block.header.hash
fills = []
while key not in self.db:
fills.insert(0, (block.header.hash, block.difficulty))
key = b'score:' + block.header.prevhash
block = self.get_parent(block)
score = int(self.db.get(key))
for h, d in fills:
key = b'score:' + h
score = score + d + random.randrange(d // 10**6 + 1)
self.db.put(key, str(score))
return score
# This function should be called periodically so as to
# process blocks that were received but laid aside because
# they were received too early
def process_time_queue(self, new_time=None):
self.localtime = time.time() if new_time is None else new_time
i = 0
while i < len(
self.time_queue) and self.time_queue[i].timestamp <= self.localtime:
log.info('Adding scheduled block')
pre_len = len(self.time_queue)
self.add_block(self.time_queue.pop(i))
if len(self.time_queue) == pre_len:
i += 1
# Call upon receiving a block
def add_block(self, block):
now = self.localtime
# Are we receiving the block too early?
if block.header.timestamp > now:
i = 0
while i < len(
self.time_queue) and block.timestamp > self.time_queue[i].timestamp:
i += 1
self.time_queue.insert(i, block)
log.info('Block received too early (%d vs %d). Delaying for %d seconds' %
(now, block.header.timestamp, block.header.timestamp - now))
return False
# Is the block being added to the head?
if block.header.prevhash == self.head_hash:
log.info('Adding to head',
head=encode_hex(block.header.prevhash[:4]))
self.state.deletes = []
self.state.changed = {}
try:
apply_block(self.state, block)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e:
log.info('Block %d (%s) with parent %s invalid, reason: %s' %
(block.number, encode_hex(block.header.hash[:4]), encode_hex(block.header.prevhash[:4]), str(e)))
return False
self.db.put(b'block:%d' % block.header.number, block.header.hash)
# side effect: put 'score:' cache in db
block_score = self.get_score(block)
self.head_hash = block.header.hash
for i, tx in enumerate(block.transactions):
self.db.put(b'txindex:' +
tx.hash, rlp.encode([block.number, i]))
assert self.get_blockhash_by_number(
block.header.number) == block.header.hash
deletes = self.state.deletes
changed = self.state.changed
# Or is the block being added to a chain that is not currently the
# head?
elif block.header.prevhash in self.env.db:
log.info('Receiving block %d (%s) not on head (%s), adding to secondary post state %s' %
(block.number, encode_hex(block.header.hash[:4]),
encode_hex(self.head_hash[:4]), encode_hex(block.header.prevhash[:4])))
temp_state = self.mk_poststate_of_blockhash(block.header.prevhash)
try:
apply_block(temp_state, block)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e:
log.info('Block %s with parent %s invalid, reason: %s' %
(encode_hex(block.header.hash[:4]), encode_hex(block.header.prevhash[:4]), str(e)))
return False
deletes = temp_state.deletes
block_score = self.get_score(block)
changed = temp_state.changed
# If the block should be the new head, replace the head
if block_score > self.get_score(self.head):
b = block
new_chain = {}
# Find common ancestor
while b.header.number >= int(self.db.get(b'GENESIS_NUMBER')):
new_chain[b.header.number] = b
key = b'block:%d' % b.header.number
orig_at_height = self.db.get(
key) if key in self.db else None
if orig_at_height == b.header.hash:
break
if b.prevhash not in self.db or self.db.get(
b.prevhash) == b'GENESIS':
break
b = self.get_parent(b)
replace_from = b.header.number
# Replace block index and tx indices, and edit the state cache
# Get a list of all accounts that have been edited along the old and
# new chains
changed_accts = {}
# Read: for i in range(common ancestor block number...new block
# number)
for i in itertools.count(replace_from):
log.info('Rewriting height %d' % i)
key = b'block:%d' % i
# Delete data for old blocks
orig_at_height = self.db.get(
key) if key in self.db else None
if orig_at_height:
orig_block_at_height = self.get_block(orig_at_height)
log.info(
'%s no longer in main chain' %
encode_hex(
orig_block_at_height.header.hash))
# Delete from block index
self.db.delete(key)
# Delete from txindex
for tx in orig_block_at_height.transactions:
if b'txindex:' + tx.hash in self.db:
self.db.delete(b'txindex:' + tx.hash)
# Add to changed list
acct_list = self.db.get(
b'changed:' + orig_block_at_height.hash)
for j in range(0, len(acct_list), 20):
changed_accts[acct_list[j: j + 20]] = True
# Add data for new blocks
if i in new_chain:
new_block_at_height = new_chain[i]
log.info(
'%s now in main chain' %
encode_hex(
new_block_at_height.header.hash))
# Add to block index
self.db.put(key, new_block_at_height.header.hash)
# Add to txindex
for j, tx in enumerate(
new_block_at_height.transactions):
self.db.put(b'txindex:' + tx.hash,
rlp.encode([new_block_at_height.number, j]))
# Add to changed list
if i < b.number:
acct_list = self.db.get(
b'changed:' + new_block_at_height.hash)
for j in range(0, len(acct_list), 20):
changed_accts[acct_list[j: j + 20]] = True
if i not in new_chain and not orig_at_height:
break
# Add changed list from new head to changed list
for c in changed.keys():
changed_accts[c] = True
# Update the on-disk state cache
for addr in changed_accts.keys():
data = temp_state.trie.get(addr)
if data:
self.state.db.put(b'address:' + addr, data)
else:
try:
self.state.db.delete(b'address:' + addr)
except KeyError:
pass
self.head_hash = block.header.hash
self.state = temp_state
self.state.executing_on_head = True
# Block has no parent yet
else:
if block.header.prevhash not in self.parent_queue:
self.parent_queue[block.header.prevhash] = []
self.parent_queue[block.header.prevhash].append(block)
log.info('Got block %d (%s) with prevhash %s, parent not found. Delaying for now' %
(block.number, encode_hex(block.hash[:4]), encode_hex(block.prevhash[:4])))
return False
self.add_child(block)
self.db.put(b'head_hash', self.head_hash)
self.db.put(block.hash, rlp.encode(block))
self.db.put(b'changed:' + block.hash,
b''.join([k.encode() if not is_string(k) else k for k in list(changed.keys())]))
print('Saved %d address change logs' % len(changed.keys()))
self.db.put(b'deletes:' + block.hash, b''.join(deletes))
log.debug('Saved %d trie node deletes for block %d (%s)' %
(len(deletes), block.number, utils.encode_hex(block.hash)))
# Delete old junk data
old_block_hash = self.get_blockhash_by_number(
block.number - self.max_history)
if old_block_hash:
try:
deletes = self.db.get(b'deletes:' + old_block_hash)
log.debug(
'Deleting up to %d trie nodes' %
(len(deletes) // 32))
rdb = RefcountDB(self.db)
for i in range(0, len(deletes), 32):
rdb.delete(deletes[i: i + 32])
self.db.delete(b'deletes:' + old_block_hash)
self.db.delete(b'changed:' + old_block_hash)
except KeyError as e:
print(e)
pass
self.db.commit()
assert (b'deletes:' + block.hash) in self.db
log.info('Added block %d (%s) with %d txs and %d gas' %
(block.header.number, encode_hex(block.header.hash)[:8],
len(block.transactions), block.header.gas_used))
# Call optional callback
if self.new_head_cb and block.header.number != 0:
self.new_head_cb(block)
# Are there blocks that we received that were waiting for this block?
# If so, process them.
if block.header.hash in self.parent_queue:
for _blk in self.parent_queue[block.header.hash]:
self.add_block(_blk)
del self.parent_queue[block.header.hash]
return True
def __contains__(self, blk):
if isinstance(blk, (str, bytes)):
try:
blk = rlp.decode(self.db.get(blk), Block)
except BaseException:
return False
try:
o = self.get_block(self.get_blockhash_by_number(blk.number)).hash
assert o == blk.hash
return True
except Exception as e:
return False
def has_block(self, block):
return block in self
def has_blockhash(self, blockhash):
return blockhash in self.db
def get_chain(self, frm=None, to=2**63 - 1):
if frm is None:
frm = int(self.db.get(b'GENESIS_NUMBER')) + 1
chain = []
for i in itertools.islice(itertools.count(), frm, to):
h = self.get_blockhash_by_number(i)
if not h:
return chain
chain.append(self.get_block(h))
# Get block number and transaction index
def get_tx_position(self, tx):
if not isinstance(tx, (str, bytes)):
tx = tx.hash
if b'txindex:' + tx in self.db:
data = rlp.decode(self.db.get(b'txindex:' + tx))
return big_endian_to_int(data[0]), big_endian_to_int(data[1])
else:
return None
def get_transaction(self, tx):
print('Deprecated. Use get_tx_position')
blknum, index = self.get_tx_position(tx)
blk = self.get_block_by_number(blknum)
return blk.transactions[index], blk, index
# Get descendants of a block
def get_descendants(self, block):
output = []
blocks = [block]
while len(blocks):
b = blocks.pop()
blocks.extend(self.get_children(b))
output.append(b)
return output
@property
def db(self):
return self.env.db
# Get blockhashes starting from a hash and going backwards
def get_blockhashes_from_hash(self, blockhash, max_num):
block = self.get_block(blockhash)
if block is None:
return []
header = block.header
hashes = []
for i in range(max_num):
block = self.get_block(header.prevhash)
if block is None:
break
header = block.header
hashes.append(header.hash)
if header.number == 0:
break
return hashes
@property
def config(self):
return self.env.config
| StarcoderdataPython |
8079273 | from sqlalchemy.ext.declarative import declarative_base
import datetime
from sqlalchemy import Column, Integer, String, create_engine ,DateTime
# 宣告對映
Base = declarative_base()
class File(Base):
__tablename__ = 'box'
id = Column(String(50), primary_key=True)
root = Column(String(100), nullable=False)
CreateTime = Column(DateTime(timezone=True))
Value = Column(String(250),nullable=False)
# 連結SQLite3資料庫example.db
engine = create_engine('sqlite:///example.db')
# 建立Schema
Base.metadata.create_all(engine) # 相當於Create Table | StarcoderdataPython |
1898751 | from django.apps import AppConfig
class PunishmentConfig(AppConfig):
name = "punishment"
| StarcoderdataPython |
1601029 | import os
import pytest
import json
try:
from urllib import quote # Python 2.X
except ImportError:
from urllib.parse import quote # Python 3+
from pigskin.pigskin import pigskin
pytest.gp_username = os.getenv('PIGSKIN_USER', '')
pytest.gp_password = os.getenv('PIGSKIN_PASS', '')
scrub_list = []
for i in [ pytest.gp_username, pytest.gp_password ]:
if i:
scrub_list.append(i)
scrub_list.append(quote(i))
def scrub_request(request):
try:
body = request.body.decode()
except (AttributeError, UnicodeDecodeError) as e:
return request
for i in scrub_list:
body = body.replace(i, 'REDACTED')
request.body = body.encode()
return request
def scrub_response(response):
try:
body = response['body']['string'].decode()
except (AttributeError, UnicodeDecodeError) as e:
return response
for i in scrub_list:
body = body.replace(i, 'REDACTED')
response['body']['string'] = body.encode()
try: # load JSON as a python dict so it can be pretty printed
parsed = json.loads(body)
response['body']['pretty'] = parsed
except ValueError as e:
pass
return response
@pytest.fixture
def vcr_config():
return {
'decode_compressed_response': True,
'before_record_request': scrub_request,
'before_record_response': scrub_response,
}
| StarcoderdataPython |
6658439 | <gh_stars>0
version = '1.6.3'
revision = ''
milestone = 'Yoitsu'
release_number = '75'
projectURL = 'https://syncplay.pl/'
| StarcoderdataPython |
1843696 | from clikit.api.args.format import Argument
from clikit.api.args.format import Option
from cleo import argument
from cleo import option
def test_argument():
arg = argument("foo", "Foo")
assert "Foo" == arg.description
assert arg.is_required()
assert not arg.is_optional()
assert not arg.is_multi_valued()
assert arg.default is None
arg = argument("foo", "Foo", optional=True, default="bar")
assert not arg.is_required()
assert arg.is_optional()
assert not arg.is_multi_valued()
assert "bar" == arg.default
arg = argument("foo", "Foo", multiple=True)
assert arg.is_required()
assert not arg.is_optional()
assert arg.is_multi_valued()
assert [] == arg.default
arg = argument("foo", "Foo", optional=True, multiple=True, default=["bar"])
assert not arg.is_required()
assert arg.is_optional()
assert arg.is_multi_valued()
assert ["bar"] == arg.default
def test_option():
opt = option("foo", "f", "Foo")
assert "Foo" == opt.description
assert not opt.accepts_value()
assert not opt.is_value_optional()
assert not opt.is_value_required()
assert not opt.is_multi_valued()
assert opt.default is None
opt = option("foo", "f", "Foo", flag=False)
assert "Foo" == opt.description
assert opt.accepts_value()
assert not opt.is_value_optional()
assert opt.is_value_required()
assert not opt.is_multi_valued()
opt = option("foo", "f", "Foo", flag=False, value_required=False)
assert "Foo" == opt.description
assert opt.accepts_value()
assert opt.is_value_optional()
assert not opt.is_value_required()
assert not opt.is_multi_valued()
opt = option("foo", "f", "Foo", flag=False, multiple=True)
assert "Foo" == opt.description
assert opt.accepts_value()
assert not opt.is_value_optional()
assert opt.is_value_required()
assert opt.is_multi_valued()
assert [] == opt.default
opt = option("foo", "f", "Foo", flag=False, default="bar")
assert "Foo" == opt.description
assert opt.accepts_value()
assert not opt.is_value_optional()
assert opt.is_value_required()
assert not opt.is_multi_valued()
assert "bar" == opt.default
| StarcoderdataPython |
3303216 | <gh_stars>0
from django.db import models
from django.utils import timezone
# Create your models here.
#model for book that asks for page , genre and publish date
class Book(models.Model):
page_number= models.IntegerField()
genre = models.CharField(max_length=100)
publish_date = models.DateField(default=timezone.now())
| StarcoderdataPython |
6564009 | <reponame>jlopezNEU/scikit-learn
import os
import shutil
import tempfile
import warnings
from pickle import loads
from pickle import dumps
from functools import partial
from importlib import resources
import pytest
import numpy as np
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets._base import (
load_csv_data,
load_gzip_compressed_csv_data,
)
from sklearn.preprocessing import scale
from sklearn.utils import Bunch
from sklearn.utils._testing import SkipTest
from sklearn.datasets.tests.test_common import check_as_frame
from sklearn.externals._pilutil import pillow_installed
from sklearn.utils import IS_PYPY
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
@pytest.fixture(scope="module")
def data_home(tmpdir_factory):
tmp_file = str(tmpdir_factory.mktemp("scikit_learn_data_home_test"))
yield tmp_file
_remove_dir(tmp_file)
@pytest.fixture(scope="module")
def load_files_root(tmpdir_factory):
tmp_file = str(tmpdir_factory.mktemp("scikit_learn_load_files_test"))
yield tmp_file
_remove_dir(tmp_file)
@pytest.fixture
def test_category_dir_1(load_files_root):
test_category_dir1 = tempfile.mkdtemp(dir=load_files_root)
sample_file = tempfile.NamedTemporaryFile(dir=test_category_dir1, delete=False)
sample_file.write(b"Hello World!\n")
sample_file.close()
yield str(test_category_dir1)
_remove_dir(test_category_dir1)
@pytest.fixture
def test_category_dir_2(load_files_root):
test_category_dir2 = tempfile.mkdtemp(dir=load_files_root)
yield str(test_category_dir2)
_remove_dir(test_category_dir2)
def test_data_home(data_home):
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=data_home)
assert data_home == data_home
assert os.path.exists(data_home)
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert not os.path.exists(data_home)
# if the folder is missing it will be created again
data_home = get_data_home(data_home=data_home)
assert os.path.exists(data_home)
def test_default_empty_load_files(load_files_root):
res = load_files(load_files_root)
assert len(res.filenames) == 0
assert len(res.target_names) == 0
assert res.DESCR is None
def test_default_load_files(test_category_dir_1, test_category_dir_2, load_files_root):
if IS_PYPY:
pytest.xfail("[PyPy] fails due to string containing NUL characters")
res = load_files(load_files_root)
assert len(res.filenames) == 1
assert len(res.target_names) == 2
assert res.DESCR is None
assert res.data == [b"Hello World!\n"]
def test_load_files_w_categories_desc_and_encoding(
test_category_dir_1, test_category_dir_2, load_files_root
):
if IS_PYPY:
pytest.xfail("[PyPy] fails due to string containing NUL characters")
category = os.path.abspath(test_category_dir_1).split("/").pop()
res = load_files(
load_files_root, description="test", categories=category, encoding="utf-8"
)
assert len(res.filenames) == 1
assert len(res.target_names) == 1
assert res.DESCR == "test"
assert res.data == ["Hello World!\n"]
def test_load_files_wo_load_content(
test_category_dir_1, test_category_dir_2, load_files_root
):
res = load_files(load_files_root, load_content=False)
assert len(res.filenames) == 1
assert len(res.target_names) == 2
assert res.DESCR is None
assert res.get("data") is None
@pytest.mark.parametrize("allowed_extensions", ([".txt"], [".txt", ".json"]))
def test_load_files_allowed_extensions(tmp_path, allowed_extensions):
"""Check the behaviour of `allowed_extension` in `load_files`."""
d = tmp_path / "sub"
d.mkdir()
files = ("file1.txt", "file2.json", "file3.json", "file4.md")
paths = [d / f for f in files]
for p in paths:
p.touch()
res = load_files(tmp_path, allowed_extensions=allowed_extensions)
assert set([str(p) for p in paths if p.suffix in allowed_extensions]) == set(
res.filenames
)
@pytest.mark.parametrize(
"filename, expected_n_samples, expected_n_features, expected_target_names",
[
("wine_data.csv", 178, 13, ["class_0", "class_1", "class_2"]),
("iris.csv", 150, 4, ["setosa", "versicolor", "virginica"]),
("breast_cancer.csv", 569, 30, ["malignant", "benign"]),
],
)
def test_load_csv_data(
filename, expected_n_samples, expected_n_features, expected_target_names
):
actual_data, actual_target, actual_target_names = load_csv_data(filename)
assert actual_data.shape[0] == expected_n_samples
assert actual_data.shape[1] == expected_n_features
assert actual_target.shape[0] == expected_n_samples
np.testing.assert_array_equal(actual_target_names, expected_target_names)
def test_load_csv_data_with_descr():
data_file_name = "iris.csv"
descr_file_name = "iris.rst"
res_without_descr = load_csv_data(data_file_name=data_file_name)
res_with_descr = load_csv_data(
data_file_name=data_file_name, descr_file_name=descr_file_name
)
assert len(res_with_descr) == 4
assert len(res_without_descr) == 3
np.testing.assert_array_equal(res_with_descr[0], res_without_descr[0])
np.testing.assert_array_equal(res_with_descr[1], res_without_descr[1])
np.testing.assert_array_equal(res_with_descr[2], res_without_descr[2])
assert res_with_descr[-1].startswith(".. _iris_dataset:")
@pytest.mark.parametrize(
"filename, kwargs, expected_shape",
[
("diabetes_data_raw.csv.gz", {}, [442, 10]),
("diabetes_target.csv.gz", {}, [442]),
("digits.csv.gz", {"delimiter": ","}, [1797, 65]),
],
)
def test_load_gzip_compressed_csv_data(filename, kwargs, expected_shape):
actual_data = load_gzip_compressed_csv_data(filename, **kwargs)
assert actual_data.shape == tuple(expected_shape)
def test_load_gzip_compressed_csv_data_with_descr():
data_file_name = "diabetes_target.csv.gz"
descr_file_name = "diabetes.rst"
expected_data = load_gzip_compressed_csv_data(data_file_name=data_file_name)
actual_data, descr = load_gzip_compressed_csv_data(
data_file_name=data_file_name,
descr_file_name=descr_file_name,
)
np.testing.assert_array_equal(actual_data, expected_data)
assert descr.startswith(".. _diabetes_dataset:")
def test_load_sample_images():
try:
res = load_sample_images()
assert len(res.images) == 2
assert len(res.filenames) == 2
images = res.images
# assert is china image
assert np.all(images[0][0, 0, :] == np.array([174, 201, 231], dtype=np.uint8))
# assert is flower image
assert np.all(images[1][0, 0, :] == np.array([2, 19, 13], dtype=np.uint8))
assert res.DESCR
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_sample_image():
try:
china = load_sample_image("china.jpg")
assert china.dtype == "uint8"
assert china.shape == (427, 640, 3)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
if pillow_installed:
with pytest.raises(AttributeError):
load_sample_image("blop.jpg")
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes_raw():
"""Test to check that we load a scaled version by default but that we can
get an unscaled version when setting `scaled=False`."""
diabetes_raw = load_diabetes(scaled=False)
assert diabetes_raw.data.shape == (442, 10)
assert diabetes_raw.target.size, 442
assert len(diabetes_raw.feature_names) == 10
assert diabetes_raw.DESCR
diabetes_default = load_diabetes()
np.testing.assert_allclose(
scale(diabetes_raw.data) / (442**0.5), diabetes_default.data, atol=1e-04
)
@pytest.mark.filterwarnings("ignore:Function load_boston is deprecated")
@pytest.mark.parametrize(
"loader_func, data_shape, target_shape, n_target, has_descr, filenames",
[
(load_breast_cancer, (569, 30), (569,), 2, True, ["filename"]),
(load_wine, (178, 13), (178,), 3, True, []),
(load_iris, (150, 4), (150,), 3, True, ["filename"]),
(
load_linnerud,
(20, 3),
(20, 3),
3,
True,
["data_filename", "target_filename"],
),
(load_diabetes, (442, 10), (442,), None, True, []),
(load_digits, (1797, 64), (1797,), 10, True, []),
(partial(load_digits, n_class=9), (1617, 64), (1617,), 10, True, []),
(load_boston, (506, 13), (506,), None, True, ["filename"]),
],
)
def test_loader(loader_func, data_shape, target_shape, n_target, has_descr, filenames):
bunch = loader_func()
assert isinstance(bunch, Bunch)
assert bunch.data.shape == data_shape
assert bunch.target.shape == target_shape
if hasattr(bunch, "feature_names"):
assert len(bunch.feature_names) == data_shape[1]
if n_target is not None:
assert len(bunch.target_names) == n_target
if has_descr:
assert bunch.DESCR
if filenames:
assert "data_module" in bunch
assert all(
[
f in bunch and resources.is_resource(bunch["data_module"], bunch[f])
for f in filenames
]
)
@pytest.mark.parametrize(
"loader_func, data_dtype, target_dtype",
[
(load_breast_cancer, np.float64, int),
(load_diabetes, np.float64, np.float64),
(load_digits, np.float64, int),
(load_iris, np.float64, int),
(load_linnerud, np.float64, np.float64),
(load_wine, np.float64, int),
],
)
def test_toy_dataset_frame_dtype(loader_func, data_dtype, target_dtype):
default_result = loader_func()
check_as_frame(
default_result,
loader_func,
expected_data_dtype=data_dtype,
expected_target_dtype=target_dtype,
)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert bunch_from_pkl["x"] == bunch_from_pkl.x
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key="original")
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a surprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__["key"] = "set from __dict__"
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert bunch_from_pkl.key == "original"
assert bunch_from_pkl["key"] == "original"
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = "changed"
assert bunch_from_pkl.key == "changed"
assert bunch_from_pkl["key"] == "changed"
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert "data" in dir(data)
# FIXME: to be removed in 1.2
def test_load_boston_warning():
"""Check that we raise the ethical warning when loading `load_boston`."""
warn_msg = "The Boston housing prices dataset has an ethical problem"
with pytest.warns(FutureWarning, match=warn_msg):
load_boston()
@pytest.mark.filterwarnings("ignore:Function load_boston is deprecated")
def test_load_boston_alternative():
pd = pytest.importorskip("pandas")
if os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "1":
raise SkipTest(
"This test requires an internet connection to fetch the dataset."
)
boston_sklearn = load_boston()
data_url = "http://lib.stat.cmu.edu/datasets/boston"
try:
raw_df = pd.read_csv(data_url, sep=r"\s+", skiprows=22, header=None)
except ConnectionError as e:
pytest.xfail(f"The dataset can't be downloaded. Got exception: {e}")
data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
target = raw_df.values[1::2, 2]
np.testing.assert_allclose(data, boston_sklearn.data)
np.testing.assert_allclose(target, boston_sklearn.target)
| StarcoderdataPython |
8097257 | <gh_stars>0
WAVFILE = '/home/pi/projects/baby-activity-logger/baby_activity_logger/alert_button/alert.wav'
import pygame
from pygame import *
import sys
from gpiozero import Button
from time import sleep
import os
class AlertButton:
def __init__(self, gpio_pin):
self.alert_on = False
self.play_button = Button(gpio_pin)
pygame.display.init()
screen = pygame.display.set_mode((1,1))
mixer.pre_init(frequency=44100, size=-16, channels=2, buffer=4096)
pygame.init()
# screen=pygame.display.set_mode((400,400),0,32)
def toggle_alert(self):
self.alert_on = not self.alert_on
def play_alert(self):
s = pygame.mixer.Sound(WAVFILE)
ch = s.play()
while ch.get_busy():
pygame.time.delay(100)
def run(self):
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key==K_ESCAPE:
pygame.quit()
sys.exit()
# pygame.display.update()
self.play_button.when_pressed = self.toggle_alert
while self.alert_on:
self.play_alert()
sleep(1)
| StarcoderdataPython |
111850 | <reponame>echim/pySteps
from core.helpers.point import Point
class Circle:
def __init__(self, center: Point, radius: int):
self._center: Point = center
self._radius: int = radius
@property
def center(self):
return self._center
@center.setter
def center(self, new_center: Point):
self._center = new_center
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, new_radius: int):
self._radius = new_radius
| StarcoderdataPython |
1909513 | <gh_stars>0
"""
SPDX-License-Identifier: BSD-3
"""
from enum import Enum, auto
class CallbackType(Enum):
"""Kinds of c callbacks. Typically, their signature differs."""
FAPI_AUTH = auto()
CALLBACK_COUNT = 10
CALLBACK_BASE_NAME = {CallbackType.FAPI_AUTH: "_auth_callback_wrapper_"}
| StarcoderdataPython |
375290 | #!/usr/bin/env python3
"""
Histórico:
2022-01-16 - Criar a versão 0.2.3
2021-12-12 - Alterar o nome de commandlib para cmdlib.
2021-11-07 - Inserir a função is_admin().
"""
from setuptools import setup
import os
import sys
file_setup = os.path.abspath(os.path.realpath(__file__))
dir_of_project = os.path.dirname(file_setup)
sys.path.insert(0, dir_of_project)
from cmdlib.__main__ import (
__version__,
__author__,
__repo__,
__download_file__,
)
DESCRIPTION = 'Trabalha com a linha de comando em sistemas Linux e Windows.'
LONG_DESCRIPTION = 'Trabalha com a linha de comando em sistemas Linux e Windows.'
setup(
name='cmdlib',
version=__version__,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=__author__,
author_email='<EMAIL>',
license='MIT',
packages=['cmdlib'],
zip_safe=False,
url='https://gitlab.com/bschaves/cmd-lib',
project_urls = {
'Código fonte': __repo__,
'Download': __download_file__,
},
)
| StarcoderdataPython |
365476 | from collections import deque
class Solution:
def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:
q = deque()
# traverse each node, get zero node
h, w = len(matrix), len(matrix[0])
max_step = h * w
for y in range(h):
for x in range(w):
node = matrix[y][x]
if node == 0:
q.append((y, x))
if node == 1:
matrix[y][x] = max_step
while q:
y, x = q.popleft()
for next_step in[(y+1, x), (y-1, x), (y, x+1), (y, x-1)]:
next_y, next_x = next_step
if 0 <= next_y < h and 0 <= next_x < w:
if 1 + matrix[y][x] < matrix[next_y][next_x]:
matrix[next_y][next_x] = 1 + matrix[y][x]
q.append(next_step)
return matrix | StarcoderdataPython |
249574 | # %%
import json
import twitter as tw
import pandas as pd
import urllib.parse as p
from io import StringIO
class TwitterScraper:
def __init__(self, access_token_key, access_token_secret, consumer_key, consumer_secret, id_csv_location="./id.csv"):
self.access_token_key = access_token_key
self.access_token_secret = access_token_secret
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.id_csv_location = id_csv_location
self.getApi()
self.getOldTweetIds()
@classmethod
def fromDict(cls, keys, id_csv_location="TimerTrigger1/id.csv"):
access_token_key = keys["access_token_key"]
access_token_secret = keys["access_token_secret"]
consumer_key = keys["consumer_key"]
consumer_secret = keys["consumer_secret"]
return cls(access_token_key, access_token_secret, consumer_key, consumer_secret, id_csv_location)
def getApi(self):
self.api = tw.Api(
access_token_key=self.access_token_key,
access_token_secret=self.access_token_secret,
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret
)
def getTweets(self, oldTweets, filter_terms=["🎟", "ticket", "tickets", "all my bees members"], screen_name="BrentfordFC", count=100):
self.setOldTweets(oldTweets)
allTweets = self.api.GetUserTimeline(
screen_name=screen_name,
exclude_replies=True,
include_rts=False,
count=count
)
filterTweets = [t for t in allTweets if any([x.lower() in t.text.lower() for x in filter_terms])]
self.newTweets = [t for t in filterTweets if not t.id in self.oldTweetIds]
def makeEmailBody(self):
htmlList = []
for nt in self.newTweets:
url = nt.urls[0].expanded_url
htmlList.append(f"<li><a href={url}>{nt.text}</a></li>")
html = "\n".join(htmlList)
return f"""<p><ul>{html}</ul></p>"""
# def writeNewTweets(self):
# with open(self.id_csv_location, 'a', newline="") as f:
# writer_object = DictWriter(f, fieldnames=["id"])
# for t in self.newTweets:
# writer_object.writerow({"id": t.id})
def makeAllTweetIds(self):
allTweetIds = [[t.id] for t in self.newTweets] + [[x] for x in self.oldTweetIds]
return pd.DataFrame(allTweetIds, columns=["id"]).to_csv()
def setOldTweets(self, oldTweets):
oldTweetsStr = StringIO(oldTweets)
try:
self.oldTweetIds = pd.read_csv(oldTweetsStr)["id"].values
except:
self.oldTweetIds = []
def getOldTweetIds(self):
try:
self.oldTweetIds = pd.read_csv(self.id_csv_location)["id"].values
except FileNotFoundError:
self.oldTweetIds = []
# %%
if __name__ == "__main__":
with open("./keys.json", "r") as f:
keys = json.load(f)
ts = TwitterScraper(keys)
ts.getTweets()
html = ts.makeEmailBody()
# %%
| StarcoderdataPython |
1722938 | import sqlite3
# open("Path", "r")
# text in file
conn = sqlite3.connect("Training.db")
c = conn.cursor()
# f.write() or f.read()
c.execute("CREATE TABLE IF NOT EXISTS iceCubeMelting(time INT,"+
"temperature REAL, date TEXT)")
conn.commit()
c.close()
conn.close() | StarcoderdataPython |
3452141 | <filename>pointcloud_feature_visualization_open3d/features_vis_save.py
import os
from datetime import datetime
import numpy as np
import matplotlib as mpl
import matplotlib.cm as cm
'''
Created by DogyoonLee
https://github.com/dogyoonlee/pointcloud_visualizer/tree/main/pointcloud_feature_visualization_open3d
'''
feature_clip = 0.1
class feature_vis():
def __init__(self):
self.save_path = self.save_path_create()
def save_path_create(self):
now = datetime.now()
save_time_str = str(now.year) + str('-') + str(
now.month) + str('-') + str(now.day) + str('-') + str(
now.hour) + str('-') + str(now.minute)
feature_save_path = os.path.join(
os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'feature_vis'), save_time_str)
if not os.path.exists(feature_save_path):
os.makedirs(feature_save_path)
return feature_save_path
def feature_vis_normalization(self, matrix):
# import pdb; pdb.set_trace()
matrix -= np.min(matrix, axis=1, keepdims=True)
matrix /= np.max(matrix, axis=1, keepdims=True)
matrix = np.exp(matrix) - 0.99999999
# matrix = np.clip(matrix, 0, 1)
return matrix
def feature_active_compute(self, features, compute_type='square_sum'):
if compute_type is 'square_sum':
compute_feature = np.sum(features**2, axis=2, keepdims=True)
else:
# Not yet implemented. square_sum duplicated
compute_feature = np.sum(features**2, axis=2, keepdims=True)
normalized_feature = self.feature_vis_normalization(compute_feature)
return normalized_feature
def color_mapping(self, score, color_type='Reds'):
color_r = cm.get_cmap(color_type)
return color_r(score)[:3]
def score_to_rgb(self, feature_score):
B, N, _ = feature_score.shape
rgb_score = np.zeros((B, N, 3))
for i in range(B):
for j in range(N):
rgb_score[i][j][:] = self.color_mapping(
score=feature_score[i][j][0], color_type='OrRd')
return rgb_score
def feature_save(self, xyz, features, layer_name='layer_1'):
'''
input:
xyz: B x N x 3
features: B x N x C
output:
saved_file: B x N x (3 + 3)
'''
xyz = np.array(xyz.cpu())
features = np.array(features.detach().cpu())
B, N, Coord = xyz.shape
_, C, _ = features.shape
features = np.transpose(features, (0, 2, 1))
features_active = self.feature_active_compute(
features, compute_type='square_sum')
features_rgb = self.score_to_rgb(feature_score=features_active)
feature_vis_save = np.concatenate((xyz, features_rgb), axis=2)
filename_base = 'feature_' + layer_name
np.save(os.path.join(self.save_path, filename_base), feature_vis_save)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.