body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def check(self):
' Checks parameters and paths\n '
if ('UUID' not in PAR):
setattr(PAR, 'UUID', str(uuid4()))
if ('SCRATCH' not in PATH):
setattr(PATH, 'SCRATCH', join('/scratch/gpfs', getuser(), 'seisflows', PAR.UUID))
if ('LOCAL' not in PATH):
setattr(PATH, 'LOCAL', '')
super(tiger_md, self).check()
| -5,435,384,274,526,499,000
|
Checks parameters and paths
|
seisflows/system/tiger_md.py
|
check
|
chukren/seisflows
|
python
|
def check(self):
' \n '
if ('UUID' not in PAR):
setattr(PAR, 'UUID', str(uuid4()))
if ('SCRATCH' not in PATH):
setattr(PATH, 'SCRATCH', join('/scratch/gpfs', getuser(), 'seisflows', PAR.UUID))
if ('LOCAL' not in PATH):
setattr(PATH, 'LOCAL', )
super(tiger_md, self).check()
|
def submit(self, *args, **kwargs):
' Submits job\n '
if (not exists(((PATH.SUBMIT + '/') + 'scratch'))):
unix.ln(PATH.SCRATCH, ((PATH.SUBMIT + '/') + 'scratch'))
super(tiger_md, self).submit(*args, **kwargs)
| -6,969,412,769,645,984,000
|
Submits job
|
seisflows/system/tiger_md.py
|
submit
|
chukren/seisflows
|
python
|
def submit(self, *args, **kwargs):
' \n '
if (not exists(((PATH.SUBMIT + '/') + 'scratch'))):
unix.ln(PATH.SCRATCH, ((PATH.SUBMIT + '/') + 'scratch'))
super(tiger_md, self).submit(*args, **kwargs)
|
def cltv_lock_to_height(node, tx, to_address, amount, height=(- 1)):
'Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make\n a transaction that spends it.\n\n This transforms the output script to anyone can spend (OP_TRUE) if the\n lock time condition is valid.\n\n Default height is -1 which leads CLTV to fail\n\n TODO: test more ways that transactions using CLTV could be invalid (eg\n locktime requirements fail, sequence time requirements fail, etc).\n '
height_op = OP_1NEGATE
if (height > 0):
tx.vin[0].nSequence = 0
tx.nLockTime = height
height_op = CScriptNum(height)
tx.vout[0].scriptPubKey = CScript([height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE])
pad_tx(tx)
fundtx_raw = node.signrawtransactionwithwallet(ToHex(tx))['hex']
fundtx = FromHex(CTransaction(), fundtx_raw)
fundtx.rehash()
from_txid = fundtx.hash
inputs = [{'txid': fundtx.hash, 'vout': 0}]
output = {to_address: amount}
spendtx_raw = node.createrawtransaction(inputs, output)
spendtx = FromHex(CTransaction(), spendtx_raw)
pad_tx(spendtx)
return (fundtx, spendtx)
| -6,207,897,528,851,743,000
|
Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make
a transaction that spends it.
This transforms the output script to anyone can spend (OP_TRUE) if the
lock time condition is valid.
Default height is -1 which leads CLTV to fail
TODO: test more ways that transactions using CLTV could be invalid (eg
locktime requirements fail, sequence time requirements fail, etc).
|
test/functional/feature_cltv.py
|
cltv_lock_to_height
|
ComputerCraftr/devault
|
python
|
def cltv_lock_to_height(node, tx, to_address, amount, height=(- 1)):
'Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make\n a transaction that spends it.\n\n This transforms the output script to anyone can spend (OP_TRUE) if the\n lock time condition is valid.\n\n Default height is -1 which leads CLTV to fail\n\n TODO: test more ways that transactions using CLTV could be invalid (eg\n locktime requirements fail, sequence time requirements fail, etc).\n '
height_op = OP_1NEGATE
if (height > 0):
tx.vin[0].nSequence = 0
tx.nLockTime = height
height_op = CScriptNum(height)
tx.vout[0].scriptPubKey = CScript([height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE])
pad_tx(tx)
fundtx_raw = node.signrawtransactionwithwallet(ToHex(tx))['hex']
fundtx = FromHex(CTransaction(), fundtx_raw)
fundtx.rehash()
from_txid = fundtx.hash
inputs = [{'txid': fundtx.hash, 'vout': 0}]
output = {to_address: amount}
spendtx_raw = node.createrawtransaction(inputs, output)
spendtx = FromHex(CTransaction(), spendtx_raw)
pad_tx(spendtx)
return (fundtx, spendtx)
|
def expand_dims(var, dim=0):
' Is similar to [numpy.expand_dims](https://docs.scipy.org/doc/numpy/reference/generated/numpy.expand_dims.html).\n var = torch.range(0, 9).view(-1, 2)\n torch.expand_dims(var, 0).size()\n # (1, 5, 2)\n '
sizes = list(var.size())
sizes.insert(dim, 1)
return var.view(*sizes)
| 123,622,040,983,809,650
|
Is similar to [numpy.expand_dims](https://docs.scipy.org/doc/numpy/reference/generated/numpy.expand_dims.html).
var = torch.range(0, 9).view(-1, 2)
torch.expand_dims(var, 0).size()
# (1, 5, 2)
|
losses/magnet_loss.py
|
expand_dims
|
jiajunhua/HaydenFaulkner-pytorch.repmet
|
python
|
def expand_dims(var, dim=0):
' Is similar to [numpy.expand_dims](https://docs.scipy.org/doc/numpy/reference/generated/numpy.expand_dims.html).\n var = torch.range(0, 9).view(-1, 2)\n torch.expand_dims(var, 0).size()\n # (1, 5, 2)\n '
sizes = list(var.size())
sizes.insert(dim, 1)
return var.view(*sizes)
|
def comparison_mask(a_labels, b_labels):
'Computes boolean mask for distance comparisons'
return torch.eq(expand_dims(a_labels, 1), expand_dims(b_labels, 0))
| 1,893,867,106,700,745,200
|
Computes boolean mask for distance comparisons
|
losses/magnet_loss.py
|
comparison_mask
|
jiajunhua/HaydenFaulkner-pytorch.repmet
|
python
|
def comparison_mask(a_labels, b_labels):
return torch.eq(expand_dims(a_labels, 1), expand_dims(b_labels, 0))
|
def dynamic_partition(X, partitions, n_clusters):
'Partitions the data into the number of cluster bins'
cluster_bin = torch.chunk(X, n_clusters)
return cluster_bin
| 9,038,985,545,027,217,000
|
Partitions the data into the number of cluster bins
|
losses/magnet_loss.py
|
dynamic_partition
|
jiajunhua/HaydenFaulkner-pytorch.repmet
|
python
|
def dynamic_partition(X, partitions, n_clusters):
cluster_bin = torch.chunk(X, n_clusters)
return cluster_bin
|
def bool_from_env(var, default: bool=False) -> bool:
"Helper for converting env string into boolean.\n\n Returns bool True for string values: '1' or 'true', False otherwise.\n "
def str_to_bool(s: str) -> bool:
return (s.lower() in ('1', 'true'))
os_var = os.environ.get(var)
if (os_var is None):
return default
else:
return str_to_bool(os_var)
| 5,744,022,469,535,834,000
|
Helper for converting env string into boolean.
Returns bool True for string values: '1' or 'true', False otherwise.
|
src/ralph/settings/base.py
|
bool_from_env
|
p-bo/ralph
|
python
|
def bool_from_env(var, default: bool=False) -> bool:
"Helper for converting env string into boolean.\n\n Returns bool True for string values: '1' or 'true', False otherwise.\n "
def str_to_bool(s: str) -> bool:
return (s.lower() in ('1', 'true'))
os_var = os.environ.get(var)
if (os_var is None):
return default
else:
return str_to_bool(os_var)
|
def _crypted_transfer(self, load, tries=3, timeout=60):
'\n In case of authentication errors, try to renegotiate authentication\n and retry the method.\n Indeed, we can fail too early in case of a master restart during a\n minion state execution call\n '
def _do_transfer():
data = self.sreq.send(self.crypt, self.auth.crypticle.dumps(load), tries, timeout)
if data:
data = self.auth.crypticle.loads(data)
return data
try:
return _do_transfer()
except salt.crypt.AuthenticationError:
self.auth = salt.crypt.SAuth(self.opts)
return _do_transfer()
| 4,214,069,522,247,119,400
|
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
|
salt/transport/__init__.py
|
_crypted_transfer
|
otrempe/salt
|
python
|
def _crypted_transfer(self, load, tries=3, timeout=60):
'\n In case of authentication errors, try to renegotiate authentication\n and retry the method.\n Indeed, we can fail too early in case of a master restart during a\n minion state execution call\n '
def _do_transfer():
data = self.sreq.send(self.crypt, self.auth.crypticle.dumps(load), tries, timeout)
if data:
data = self.auth.crypticle.loads(data)
return data
try:
return _do_transfer()
except salt.crypt.AuthenticationError:
self.auth = salt.crypt.SAuth(self.opts)
return _do_transfer()
|
def _identifier_split(identifier):
'Return (name, start, end) string tuple from an identifier (PRIVATE).'
(id, loc, strand) = identifier.split(':')
(start, end) = map(int, loc.split('-'))
start -= 1
return (id, start, end, strand)
| -346,816,607,895,191,600
|
Return (name, start, end) string tuple from an identifier (PRIVATE).
|
Bio/AlignIO/MauveIO.py
|
_identifier_split
|
BioinfoCat/biopython
|
python
|
def _identifier_split(identifier):
(id, loc, strand) = identifier.split(':')
(start, end) = map(int, loc.split('-'))
start -= 1
return (id, start, end, strand)
|
def __init__(self, *args, **kwargs):
'Initialize.'
super(MauveWriter, self).__init__(*args, **kwargs)
self._wrote_header = False
self._wrote_first = False
| 6,127,116,914,066,891,000
|
Initialize.
|
Bio/AlignIO/MauveIO.py
|
__init__
|
BioinfoCat/biopython
|
python
|
def __init__(self, *args, **kwargs):
super(MauveWriter, self).__init__(*args, **kwargs)
self._wrote_header = False
self._wrote_first = False
|
def write_alignment(self, alignment):
'Use this to write (another) single alignment to an open file.\n\n Note that sequences and their annotation are recorded\n together (rather than having a block of annotation followed\n by a block of aligned sequences).\n '
count = len(alignment)
self._length_of_sequences = alignment.get_alignment_length()
if (count == 0):
raise ValueError('Must have at least one sequence')
if (self._length_of_sequences == 0):
raise ValueError('Non-empty sequences are required')
if (not self._wrote_header):
self._wrote_header = True
self.handle.write('#FormatVersion Mauve1\n')
for i in range(1, (count + 1)):
self.handle.write(('#Sequence%sEntry\t%s\n' % (i, i)))
for (idx, record) in enumerate(alignment):
self._write_record(record, record_idx=idx)
self.handle.write('=\n')
| 7,175,577,788,833,652,000
|
Use this to write (another) single alignment to an open file.
Note that sequences and their annotation are recorded
together (rather than having a block of annotation followed
by a block of aligned sequences).
|
Bio/AlignIO/MauveIO.py
|
write_alignment
|
BioinfoCat/biopython
|
python
|
def write_alignment(self, alignment):
'Use this to write (another) single alignment to an open file.\n\n Note that sequences and their annotation are recorded\n together (rather than having a block of annotation followed\n by a block of aligned sequences).\n '
count = len(alignment)
self._length_of_sequences = alignment.get_alignment_length()
if (count == 0):
raise ValueError('Must have at least one sequence')
if (self._length_of_sequences == 0):
raise ValueError('Non-empty sequences are required')
if (not self._wrote_header):
self._wrote_header = True
self.handle.write('#FormatVersion Mauve1\n')
for i in range(1, (count + 1)):
self.handle.write(('#Sequence%sEntry\t%s\n' % (i, i)))
for (idx, record) in enumerate(alignment):
self._write_record(record, record_idx=idx)
self.handle.write('=\n')
|
def _write_record(self, record, record_idx=0):
'Write a single SeqRecord to the file (PRIVATE).'
if (self._length_of_sequences != len(record.seq)):
raise ValueError('Sequences must all be the same length')
seq_name = record.name
try:
seq_name = str(int(record.name))
except ValueError:
seq_name = str((record_idx + 1))
if (('start' in record.annotations) and ('end' in record.annotations)):
suffix0 = ('/%s-%s' % (str(record.annotations['start']), str(record.annotations['end'])))
suffix1 = ('/%s-%s' % (str((record.annotations['start'] + 1)), str(record.annotations['end'])))
if (seq_name[(- len(suffix0)):] == suffix0):
seq_name = seq_name[:(- len(suffix0))]
if (seq_name[(- len(suffix1)):] == suffix1):
seq_name = seq_name[:(- len(suffix1))]
if (('start' in record.annotations) and ('end' in record.annotations) and ('strand' in record.annotations)):
id_line = ID_LINE_FMT.format(seq_name=seq_name, start=(record.annotations['start'] + 1), end=record.annotations['end'], strand=('+' if (record.annotations['strand'] == 1) else '-'), file=(record.name + '.fa'), ugly_hack=record.id)
lacking_annotations = False
else:
id_line = ID_LINE_FMT.format(seq_name=seq_name, start=0, end=0, strand='+', file=(record.name + '.fa'), ugly_hack=record.id)
lacking_annotations = True
if (((':0-0 ' in id_line) or (':1-0 ' in id_line)) and (not lacking_annotations)):
if (not self._wrote_first):
self._wrote_first = True
id_line = ID_LINE_FMT.format(seq_name=seq_name, start=0, end=0, strand='+', file=(record.name + '.fa'), ugly_hack=record.id)
self.handle.write((id_line + '\n'))
else:
self.handle.write(id_line)
for i in range(0, len(record.seq), 80):
self.handle.write(('%s\n' % str(record.seq[i:(i + 80)])))
| 5,108,774,003,558,236,000
|
Write a single SeqRecord to the file (PRIVATE).
|
Bio/AlignIO/MauveIO.py
|
_write_record
|
BioinfoCat/biopython
|
python
|
def _write_record(self, record, record_idx=0):
if (self._length_of_sequences != len(record.seq)):
raise ValueError('Sequences must all be the same length')
seq_name = record.name
try:
seq_name = str(int(record.name))
except ValueError:
seq_name = str((record_idx + 1))
if (('start' in record.annotations) and ('end' in record.annotations)):
suffix0 = ('/%s-%s' % (str(record.annotations['start']), str(record.annotations['end'])))
suffix1 = ('/%s-%s' % (str((record.annotations['start'] + 1)), str(record.annotations['end'])))
if (seq_name[(- len(suffix0)):] == suffix0):
seq_name = seq_name[:(- len(suffix0))]
if (seq_name[(- len(suffix1)):] == suffix1):
seq_name = seq_name[:(- len(suffix1))]
if (('start' in record.annotations) and ('end' in record.annotations) and ('strand' in record.annotations)):
id_line = ID_LINE_FMT.format(seq_name=seq_name, start=(record.annotations['start'] + 1), end=record.annotations['end'], strand=('+' if (record.annotations['strand'] == 1) else '-'), file=(record.name + '.fa'), ugly_hack=record.id)
lacking_annotations = False
else:
id_line = ID_LINE_FMT.format(seq_name=seq_name, start=0, end=0, strand='+', file=(record.name + '.fa'), ugly_hack=record.id)
lacking_annotations = True
if (((':0-0 ' in id_line) or (':1-0 ' in id_line)) and (not lacking_annotations)):
if (not self._wrote_first):
self._wrote_first = True
id_line = ID_LINE_FMT.format(seq_name=seq_name, start=0, end=0, strand='+', file=(record.name + '.fa'), ugly_hack=record.id)
self.handle.write((id_line + '\n'))
else:
self.handle.write(id_line)
for i in range(0, len(record.seq), 80):
self.handle.write(('%s\n' % str(record.seq[i:(i + 80)])))
|
def __next__(self):
'Parse the next alignment from the handle.'
handle = self.handle
line = handle.readline()
if (not line):
raise StopIteration
while (line and line.strip().startswith('#')):
line = handle.readline()
seqs = {}
seq_regions = {}
passed_end_alignment = False
latest_id = None
while True:
if (not line):
break
line = line.strip()
if line.startswith('='):
break
elif line.startswith('>'):
m = XMFA_HEADER_REGEX_BIOPYTHON.match(line)
if (not m):
m = XMFA_HEADER_REGEX.match(line)
if (not m):
raise ValueError('Malformed header line: %s', line)
parsed_id = m.group('id')
parsed_data = {}
for key in ('start', 'end', 'id', 'strand', 'name', 'realname'):
try:
value = m.group(key)
if (key == 'start'):
value = int(value)
if (value > 0):
value -= 1
if (key == 'end'):
value = int(value)
parsed_data[key] = value
except IndexError:
pass
seq_regions[parsed_id] = parsed_data
if (parsed_id not in self._ids):
self._ids.append(parsed_id)
seqs.setdefault(parsed_id, '')
latest_id = parsed_id
else:
assert (not passed_end_alignment)
if (latest_id is None):
raise ValueError('Saw sequence before definition line')
seqs[latest_id] += line
line = handle.readline()
assert (len(seqs) <= len(self._ids))
self.ids = self._ids
self.sequences = seqs
if (self._ids and seqs):
alignment_length = max(map(len, list(seqs.values())))
records = []
for id in self._ids:
if ((id not in seqs) or (len(seqs[id]) == 0) or (len(seqs[id]) == 0)):
seq = ('-' * alignment_length)
else:
seq = seqs[id]
if (alignment_length != len(seq)):
raise ValueError('Sequences have different lengths, or repeated identifier')
if (id not in seq_regions):
continue
if ((seq_regions[id]['start'] != 0) or (seq_regions[id]['end'] != 0)):
suffix = '/{start}-{end}'.format(**seq_regions[id])
if ('realname' in seq_regions[id]):
corrected_id = seq_regions[id]['realname']
else:
corrected_id = seq_regions[id]['name']
if (corrected_id.count(suffix) == 0):
corrected_id += suffix
elif ('realname' in seq_regions[id]):
corrected_id = seq_regions[id]['realname']
else:
corrected_id = seq_regions[id]['name']
record = SeqRecord(Seq(seq, self.alphabet), id=corrected_id, name=id)
record.annotations['start'] = seq_regions[id]['start']
record.annotations['end'] = seq_regions[id]['end']
record.annotations['strand'] = (1 if (seq_regions[id]['strand'] == '+') else (- 1))
records.append(record)
return MultipleSeqAlignment(records, self.alphabet)
else:
raise StopIteration
| -5,048,572,972,203,061,000
|
Parse the next alignment from the handle.
|
Bio/AlignIO/MauveIO.py
|
__next__
|
BioinfoCat/biopython
|
python
|
def __next__(self):
handle = self.handle
line = handle.readline()
if (not line):
raise StopIteration
while (line and line.strip().startswith('#')):
line = handle.readline()
seqs = {}
seq_regions = {}
passed_end_alignment = False
latest_id = None
while True:
if (not line):
break
line = line.strip()
if line.startswith('='):
break
elif line.startswith('>'):
m = XMFA_HEADER_REGEX_BIOPYTHON.match(line)
if (not m):
m = XMFA_HEADER_REGEX.match(line)
if (not m):
raise ValueError('Malformed header line: %s', line)
parsed_id = m.group('id')
parsed_data = {}
for key in ('start', 'end', 'id', 'strand', 'name', 'realname'):
try:
value = m.group(key)
if (key == 'start'):
value = int(value)
if (value > 0):
value -= 1
if (key == 'end'):
value = int(value)
parsed_data[key] = value
except IndexError:
pass
seq_regions[parsed_id] = parsed_data
if (parsed_id not in self._ids):
self._ids.append(parsed_id)
seqs.setdefault(parsed_id, )
latest_id = parsed_id
else:
assert (not passed_end_alignment)
if (latest_id is None):
raise ValueError('Saw sequence before definition line')
seqs[latest_id] += line
line = handle.readline()
assert (len(seqs) <= len(self._ids))
self.ids = self._ids
self.sequences = seqs
if (self._ids and seqs):
alignment_length = max(map(len, list(seqs.values())))
records = []
for id in self._ids:
if ((id not in seqs) or (len(seqs[id]) == 0) or (len(seqs[id]) == 0)):
seq = ('-' * alignment_length)
else:
seq = seqs[id]
if (alignment_length != len(seq)):
raise ValueError('Sequences have different lengths, or repeated identifier')
if (id not in seq_regions):
continue
if ((seq_regions[id]['start'] != 0) or (seq_regions[id]['end'] != 0)):
suffix = '/{start}-{end}'.format(**seq_regions[id])
if ('realname' in seq_regions[id]):
corrected_id = seq_regions[id]['realname']
else:
corrected_id = seq_regions[id]['name']
if (corrected_id.count(suffix) == 0):
corrected_id += suffix
elif ('realname' in seq_regions[id]):
corrected_id = seq_regions[id]['realname']
else:
corrected_id = seq_regions[id]['name']
record = SeqRecord(Seq(seq, self.alphabet), id=corrected_id, name=id)
record.annotations['start'] = seq_regions[id]['start']
record.annotations['end'] = seq_regions[id]['end']
record.annotations['strand'] = (1 if (seq_regions[id]['strand'] == '+') else (- 1))
records.append(record)
return MultipleSeqAlignment(records, self.alphabet)
else:
raise StopIteration
|
def run_gaussian_dataset_montecarlo(iterations: int=30, m: int=10000, n: int=128, param_list=None, epochs: int=300, batch_size: int=100, display_freq: int=1, optimizer='sgd', validation_split: float=0.2, shape_raw: List[int]=None, activation: t_activation='cart_relu', verbose: bool=False, do_all: bool=True, tensorboard: bool=False, polar: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, capacity_equivalent: bool=True, equiv_technique: str='ratio', dropout: Optional[float]=None, models: Optional[List[Model]]=None, plot_data: bool=True, early_stop: bool=False, shuffle: bool=True) -> str:
"\n This function is used to compare CVNN vs RVNN performance over statistical non-circular data.\n 1. Generates a complex-valued gaussian correlated noise with the characteristics given by the inputs.\n 2. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.\n 3. Saves several files into ./log/montecarlo/date/of/run/\n 3.1. run_summary.txt: Summary of the run models and data\n 3.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch\n 3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch\n 3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n :param iterations: Number of iterations to be done for each model\n :param m: Total size of the dataset (number of examples)\n :param n: Number of features / input vector\n :param param_list: A list of len = number of classes.\n Each element of the list is another list of len = 3 with values: [correlation_coeff, sigma_x, sigma_y]\n Example for dataset type A of paper https://arxiv.org/abs/2009.08340:\n param_list = [\n [0.5, 1, 1],\n [-0.5, 1, 1]\n ]\n Default: None will default to the example.\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Frequency in terms of epochs of when to do a checkpoint.\n :param optimizer: Optimizer to be used. Keras optimizers are not allowed.\n Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.\n :param validation_split: float between 0 and 1. Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss\n and any model metrics on this data at the end of each epoch.\n The validation data is selected from the last samples in the x and y data provided, before shuffling.\n This argument is not supported when x is a dataset, generator or keras.utils.Sequence instance.\n :param shape_raw: List of sizes of each hidden layer.\n For example [64] will generate a CVNN with one hidden layer of size 64.\n Default None will default to example.\n :param activation: Activation function to be used at each hidden layer\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param tensorboard: If True, it will generate tensorboard outputs to check training values.\n :param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)\n :param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()\n :param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.\n :param models: List of models to be compared.\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n "
if (param_list is None):
param_list = [[0.3, 1, 1], [(- 0.3), 1, 1]]
dataset = dp.CorrelatedGaussianCoeffCorrel(m, n, param_list, debug=False)
print('Database loaded...')
if (models is not None):
return run_montecarlo(models=models, dataset=dataset, open_dataset=None, iterations=iterations, epochs=epochs, batch_size=batch_size, display_freq=display_freq, validation_split=validation_split, validation_data=None, verbose=verbose, polar=polar, do_all=do_all, tensorboard=tensorboard, do_conf_mat=False, plot_data=plot_data, early_stop=early_stop, shuffle=shuffle)
else:
return mlp_run_real_comparison_montecarlo(dataset=dataset, open_dataset=None, iterations=iterations, epochs=epochs, batch_size=batch_size, display_freq=display_freq, optimizer=optimizer, shape_raw=shape_raw, activation=activation, verbose=verbose, polar=polar, do_all=do_all, tensorboard=tensorboard, capacity_equivalent=capacity_equivalent, equiv_technique=equiv_technique, dropout=dropout, validation_split=validation_split, plot_data=plot_data)
| -1,363,922,818,580,274,200
|
This function is used to compare CVNN vs RVNN performance over statistical non-circular data.
1. Generates a complex-valued gaussian correlated noise with the characteristics given by the inputs.
2. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.
3. Saves several files into ./log/montecarlo/date/of/run/
3.1. run_summary.txt: Summary of the run models and data
3.2. run_data.csv: Full information of performance of iteration of each model at each epoch
3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch
3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch
3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()
:param iterations: Number of iterations to be done for each model
:param m: Total size of the dataset (number of examples)
:param n: Number of features / input vector
:param param_list: A list of len = number of classes.
Each element of the list is another list of len = 3 with values: [correlation_coeff, sigma_x, sigma_y]
Example for dataset type A of paper https://arxiv.org/abs/2009.08340:
param_list = [
[0.5, 1, 1],
[-0.5, 1, 1]
]
Default: None will default to the example.
:param epochs: Number of epochs for each iteration
:param batch_size: Batch size at each iteration
:param display_freq: Frequency in terms of epochs of when to do a checkpoint.
:param optimizer: Optimizer to be used. Keras optimizers are not allowed.
Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.
:param validation_split: float between 0 and 1. Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss
and any model metrics on this data at the end of each epoch.
The validation data is selected from the last samples in the x and y data provided, before shuffling.
This argument is not supported when x is a dataset, generator or keras.utils.Sequence instance.
:param shape_raw: List of sizes of each hidden layer.
For example [64] will generate a CVNN with one hidden layer of size 64.
Default None will default to example.
:param activation: Activation function to be used at each hidden layer
:param verbose: Different modes according to number:
- 0 or 'silent': No output at all
- 1 or False: Progress bar per iteration
- 2 or True or 'debug': Progress bar per epoch
:param tensorboard: If True, it will generate tensorboard outputs to check training values.
:param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)
:param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()
:param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.
:param models: List of models to be compared.
:return: (string) Full path to the run_data.csv generated file.
It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.
|
cvnn/montecarlo.py
|
run_gaussian_dataset_montecarlo
|
NEGU93/cvnn
|
python
|
def run_gaussian_dataset_montecarlo(iterations: int=30, m: int=10000, n: int=128, param_list=None, epochs: int=300, batch_size: int=100, display_freq: int=1, optimizer='sgd', validation_split: float=0.2, shape_raw: List[int]=None, activation: t_activation='cart_relu', verbose: bool=False, do_all: bool=True, tensorboard: bool=False, polar: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, capacity_equivalent: bool=True, equiv_technique: str='ratio', dropout: Optional[float]=None, models: Optional[List[Model]]=None, plot_data: bool=True, early_stop: bool=False, shuffle: bool=True) -> str:
"\n This function is used to compare CVNN vs RVNN performance over statistical non-circular data.\n 1. Generates a complex-valued gaussian correlated noise with the characteristics given by the inputs.\n 2. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.\n 3. Saves several files into ./log/montecarlo/date/of/run/\n 3.1. run_summary.txt: Summary of the run models and data\n 3.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch\n 3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch\n 3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n :param iterations: Number of iterations to be done for each model\n :param m: Total size of the dataset (number of examples)\n :param n: Number of features / input vector\n :param param_list: A list of len = number of classes.\n Each element of the list is another list of len = 3 with values: [correlation_coeff, sigma_x, sigma_y]\n Example for dataset type A of paper https://arxiv.org/abs/2009.08340:\n param_list = [\n [0.5, 1, 1],\n [-0.5, 1, 1]\n ]\n Default: None will default to the example.\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Frequency in terms of epochs of when to do a checkpoint.\n :param optimizer: Optimizer to be used. Keras optimizers are not allowed.\n Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.\n :param validation_split: float between 0 and 1. Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss\n and any model metrics on this data at the end of each epoch.\n The validation data is selected from the last samples in the x and y data provided, before shuffling.\n This argument is not supported when x is a dataset, generator or keras.utils.Sequence instance.\n :param shape_raw: List of sizes of each hidden layer.\n For example [64] will generate a CVNN with one hidden layer of size 64.\n Default None will default to example.\n :param activation: Activation function to be used at each hidden layer\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param tensorboard: If True, it will generate tensorboard outputs to check training values.\n :param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)\n :param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()\n :param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.\n :param models: List of models to be compared.\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n "
if (param_list is None):
param_list = [[0.3, 1, 1], [(- 0.3), 1, 1]]
dataset = dp.CorrelatedGaussianCoeffCorrel(m, n, param_list, debug=False)
print('Database loaded...')
if (models is not None):
return run_montecarlo(models=models, dataset=dataset, open_dataset=None, iterations=iterations, epochs=epochs, batch_size=batch_size, display_freq=display_freq, validation_split=validation_split, validation_data=None, verbose=verbose, polar=polar, do_all=do_all, tensorboard=tensorboard, do_conf_mat=False, plot_data=plot_data, early_stop=early_stop, shuffle=shuffle)
else:
return mlp_run_real_comparison_montecarlo(dataset=dataset, open_dataset=None, iterations=iterations, epochs=epochs, batch_size=batch_size, display_freq=display_freq, optimizer=optimizer, shape_raw=shape_raw, activation=activation, verbose=verbose, polar=polar, do_all=do_all, tensorboard=tensorboard, capacity_equivalent=capacity_equivalent, equiv_technique=equiv_technique, dropout=dropout, validation_split=validation_split, plot_data=plot_data)
|
def run_montecarlo(models: List[Model], dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path]=None, iterations: int=30, epochs: int=300, batch_size: int=100, display_freq: int=1, validation_split: float=0.2, validation_data: Optional[Union[(Tuple, data.Dataset)]]=None, verbose: Union[(bool, int)]=False, do_conf_mat: bool=False, do_all: bool=True, tensorboard: bool=False, polar: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, plot_data: bool=False, early_stop: bool=False, shuffle: bool=True, preprocess_data: bool=True) -> str:
"\n This function is used to compare different neural networks performance.\n 1. Runs simulation and compares them.\n 2. Saves several files into ./log/montecarlo/date/of/run/\n 2.1. run_summary.txt: Summary of the run models and data\n 2.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 2.3. <model_name>_statistical_result.csv: Statistical results of all iterations of each model per epoch\n 2.4. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n\n :param models: List of cvnn.CvnnModel to be compared.\n :param dataset: cvnn.dataset.Dataset with the dataset to be used on the training\n :param open_dataset: (Default: None)\n If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)\n :param iterations: Number of iterations to be done for each model\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Frequency in terms of epochs of when to do a checkpoint.\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)\n :param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()\n :param validation_split: Float between 0 and 1.\n Percentage of the input data to be used as test set (the rest will be use as train set)\n Default: 0.0 (No validation set).\n This input is ignored if validation_data is given.\n :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data. This parameter takes precedence over validation_split.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param do_conf_mat: Generate a confusion matrix based on results.\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n "
if open_dataset:
dataset = dp.OpenDataset(open_dataset)
monte_carlo = MonteCarlo()
for model in models:
monte_carlo.add_model(model)
if ((not open_dataset) and isinstance(dataset, dp.Dataset)):
dataset.save_data(monte_carlo.monte_carlo_analyzer.path)
monte_carlo.output_config['excel_summary'] = False
monte_carlo.output_config['tensorboard'] = tensorboard
monte_carlo.output_config['confusion_matrix'] = do_conf_mat
monte_carlo.output_config['plot_all'] = do_all
if (plot_data and isinstance(dataset, dp.Dataset)):
dataset.plot_data(overlapped=True, showfig=False, save_path=monte_carlo.monte_carlo_analyzer.path, library='matplotlib')
if isinstance(dataset, dp.Dataset):
x = dataset.x
y = dataset.y
data_summary = dataset.summary()
else:
x = dataset
y = None
data_summary = ''
monte_carlo.run(x, y, iterations=iterations, validation_split=validation_split, validation_data=validation_data, epochs=epochs, batch_size=batch_size, display_freq=display_freq, early_stop=early_stop, shuffle=shuffle, verbose=verbose, data_summary=data_summary, real_cast_modes=polar, process_dataset=preprocess_data)
_save_montecarlo_log(iterations=iterations, path=str(monte_carlo.monte_carlo_analyzer.path), models_names=[str(model.name) for model in models], dataset_name=data_summary, num_classes=(str(dataset.y.shape[1]) if isinstance(dataset, dp.Dataset) else ''), polar_mode=str(polar), dataset_size=(str(dataset.x.shape[0]) if isinstance(dataset, dp.Dataset) else ''), features_size=(str(dataset.x.shape[1]) if isinstance(dataset, dp.Dataset) else ''), epochs=epochs, batch_size=batch_size)
return str('./log/run_data.csv')
| 8,390,725,974,427,718,000
|
This function is used to compare different neural networks performance.
1. Runs simulation and compares them.
2. Saves several files into ./log/montecarlo/date/of/run/
2.1. run_summary.txt: Summary of the run models and data
2.2. run_data.csv: Full information of performance of iteration of each model at each epoch
2.3. <model_name>_statistical_result.csv: Statistical results of all iterations of each model per epoch
2.4. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()
:param models: List of cvnn.CvnnModel to be compared.
:param dataset: cvnn.dataset.Dataset with the dataset to be used on the training
:param open_dataset: (Default: None)
If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)
:param iterations: Number of iterations to be done for each model
:param epochs: Number of epochs for each iteration
:param batch_size: Batch size at each iteration
:param display_freq: Frequency in terms of epochs of when to do a checkpoint.
:param verbose: Different modes according to number:
- 0 or 'silent': No output at all
- 1 or False: Progress bar per iteration
- 2 or True or 'debug': Progress bar per epoch
:param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)
:param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()
:param validation_split: Float between 0 and 1.
Percentage of the input data to be used as test set (the rest will be use as train set)
Default: 0.0 (No validation set).
This input is ignored if validation_data is given.
:param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. This parameter takes precedence over validation_split.
It can be:
- tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).
- A tf.data dataset.
:param do_conf_mat: Generate a confusion matrix based on results.
:return: (string) Full path to the run_data.csv generated file.
It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.
|
cvnn/montecarlo.py
|
run_montecarlo
|
NEGU93/cvnn
|
python
|
def run_montecarlo(models: List[Model], dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path]=None, iterations: int=30, epochs: int=300, batch_size: int=100, display_freq: int=1, validation_split: float=0.2, validation_data: Optional[Union[(Tuple, data.Dataset)]]=None, verbose: Union[(bool, int)]=False, do_conf_mat: bool=False, do_all: bool=True, tensorboard: bool=False, polar: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, plot_data: bool=False, early_stop: bool=False, shuffle: bool=True, preprocess_data: bool=True) -> str:
"\n This function is used to compare different neural networks performance.\n 1. Runs simulation and compares them.\n 2. Saves several files into ./log/montecarlo/date/of/run/\n 2.1. run_summary.txt: Summary of the run models and data\n 2.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 2.3. <model_name>_statistical_result.csv: Statistical results of all iterations of each model per epoch\n 2.4. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n\n :param models: List of cvnn.CvnnModel to be compared.\n :param dataset: cvnn.dataset.Dataset with the dataset to be used on the training\n :param open_dataset: (Default: None)\n If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)\n :param iterations: Number of iterations to be done for each model\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Frequency in terms of epochs of when to do a checkpoint.\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)\n :param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()\n :param validation_split: Float between 0 and 1.\n Percentage of the input data to be used as test set (the rest will be use as train set)\n Default: 0.0 (No validation set).\n This input is ignored if validation_data is given.\n :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data. This parameter takes precedence over validation_split.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param do_conf_mat: Generate a confusion matrix based on results.\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n "
if open_dataset:
dataset = dp.OpenDataset(open_dataset)
monte_carlo = MonteCarlo()
for model in models:
monte_carlo.add_model(model)
if ((not open_dataset) and isinstance(dataset, dp.Dataset)):
dataset.save_data(monte_carlo.monte_carlo_analyzer.path)
monte_carlo.output_config['excel_summary'] = False
monte_carlo.output_config['tensorboard'] = tensorboard
monte_carlo.output_config['confusion_matrix'] = do_conf_mat
monte_carlo.output_config['plot_all'] = do_all
if (plot_data and isinstance(dataset, dp.Dataset)):
dataset.plot_data(overlapped=True, showfig=False, save_path=monte_carlo.monte_carlo_analyzer.path, library='matplotlib')
if isinstance(dataset, dp.Dataset):
x = dataset.x
y = dataset.y
data_summary = dataset.summary()
else:
x = dataset
y = None
data_summary =
monte_carlo.run(x, y, iterations=iterations, validation_split=validation_split, validation_data=validation_data, epochs=epochs, batch_size=batch_size, display_freq=display_freq, early_stop=early_stop, shuffle=shuffle, verbose=verbose, data_summary=data_summary, real_cast_modes=polar, process_dataset=preprocess_data)
_save_montecarlo_log(iterations=iterations, path=str(monte_carlo.monte_carlo_analyzer.path), models_names=[str(model.name) for model in models], dataset_name=data_summary, num_classes=(str(dataset.y.shape[1]) if isinstance(dataset, dp.Dataset) else ), polar_mode=str(polar), dataset_size=(str(dataset.x.shape[0]) if isinstance(dataset, dp.Dataset) else ), features_size=(str(dataset.x.shape[1]) if isinstance(dataset, dp.Dataset) else ), epochs=epochs, batch_size=batch_size)
return str('./log/run_data.csv')
|
def mlp_run_real_comparison_montecarlo(dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path]=None, iterations: int=30, epochs: int=300, batch_size: int=100, display_freq: int=1, optimizer='adam', shape_raw=None, activation: t_activation='cart_relu', output_activation: t_activation=DEFAULT_OUTPUT_ACT, verbose: Union[(bool, int)]=False, do_all: bool=True, polar: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, dropout: float=0.5, validation_split: float=0.2, validation_data: Optional[Union[(Tuple, data.Dataset)]]=None, capacity_equivalent: bool=True, equiv_technique: str='ratio', shuffle: bool=True, tensorboard: bool=False, do_conf_mat: bool=False, plot_data: bool=True) -> str:
"\n This function is used to compare CVNN vs RVNN performance over any dataset.\n 1. Automatically creates two Multi-Layer Perceptrons (MLP), one complex and one real.\n 2. Runs simulation and compares them.\n 3. Saves several files into ./log/montecarlo/date/of/run/\n 3.1. run_summary.txt: Summary of the run models and data\n 3.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch\n 3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch\n 3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n\n :param dataset: cvnn.dataset.Dataset with the dataset to be used on the training\n :param open_dataset: (None)\n If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)\n :param iterations: Number of iterations to be done for each model\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Frequency in terms of epochs of when to do a checkpoint.\n :param optimizer: Optimizer to be used. Keras optimizers are not allowed.\n Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.\n :param shape_raw: List of sizes of each hidden layer.\n For example [64] will generate a CVNN with one hidden layer of size 64.\n Default None will default to example.\n :param activation: Activation function to be used at each hidden layer\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)\n :param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()\n :param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.\n :param validation_split: Float between 0 and 1.\n Percentage of the input data to be used as test set (the rest will be use as train set)\n Default: 0.0 (No validation set).\n This input is ignored if validation_data is given.\n :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data. This parameter takes precedence over validation_split.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or\n trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)\n - True, it creates a capacity-equivalent model in terms of trainable parameters\n - False, it will double all layer size (except the last one if classifier=True)\n :param equiv_technique: Used to define the strategy of the capacity equivalent model.\n This parameter is ignored if capacity_equivalent=False\n - 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'\n - 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between\n multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.\n :param shuffle: TODO\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n "
if (shape_raw is None):
shape_raw = [64]
if open_dataset:
dataset = dp.OpenDataset(open_dataset)
input_size = dataset.x.shape[1]
output_size = dataset.y.shape[1]
complex_network = get_mlp(input_size=input_size, output_size=output_size, shape_raw=shape_raw, activation=activation, dropout=dropout, output_activation=output_activation, optimizer=optimizer)
monte_carlo = RealVsComplex(complex_network, capacity_equivalent=capacity_equivalent, equiv_technique=equiv_technique)
monte_carlo.output_config['tensorboard'] = tensorboard
monte_carlo.output_config['plot_all'] = do_all
monte_carlo.output_config['excel_summary'] = False
monte_carlo.output_config['confusion_matrix'] = do_conf_mat
if plot_data:
dataset.plot_data(overlapped=True, showfig=False, save_path=monte_carlo.monte_carlo_analyzer.path, library='matplotlib')
sleep(1)
monte_carlo.run(dataset.x, dataset.y, iterations=iterations, epochs=epochs, batch_size=batch_size, display_freq=display_freq, shuffle=shuffle, verbose=verbose, data_summary=dataset.summary(), real_cast_modes=polar, validation_split=validation_split, validation_data=validation_data)
max_epoch = monte_carlo.pandas_full_data['epoch'].max()
epoch_filter = (monte_carlo.pandas_full_data['epoch'] == max_epoch)
complex_filter = (monte_carlo.pandas_full_data['network'] == 'complex_network')
real_filter = (monte_carlo.pandas_full_data['network'] == 'real_network')
complex_last_epochs = monte_carlo.pandas_full_data[(epoch_filter & complex_filter)]
real_last_epochs = monte_carlo.pandas_full_data[(epoch_filter & real_filter)]
complex_median_train = complex_last_epochs['accuracy'].median()
real_median_train = real_last_epochs['accuracy'].median()
try:
complex_median = complex_last_epochs['val_accuracy'].median()
real_median = real_last_epochs['val_accuracy'].median()
complex_err = median_error(complex_last_epochs['val_accuracy'].quantile(0.75), complex_last_epochs['val_accuracy'].quantile(0.25), iterations)
real_err = median_error(real_last_epochs['val_accuracy'].quantile(0.75), real_last_epochs['val_accuracy'].quantile(0.25), iterations)
winner = ('CVNN' if (complex_median > real_median) else 'RVNN')
except KeyError:
complex_median = None
real_median = None
complex_err = median_error(complex_last_epochs['accuracy'].quantile(0.75), complex_last_epochs['accuracy'].quantile(0.25), iterations)
real_err = median_error(real_last_epochs['accuracy'].quantile(0.75), real_last_epochs['accuracy'].quantile(0.25), iterations)
if (complex_median_train > real_median_train):
winner = 'CVNN'
elif (complex_median_train == real_median_train):
winner = None
else:
winner = 'RVNN'
_save_rvnn_vs_cvnn_montecarlo_log(iterations=iterations, path=str(monte_carlo.monte_carlo_analyzer.path), dataset_name=dataset.dataset_name, optimizer=str(complex_network.optimizer.__class__), loss=str(complex_network.loss.__class__), hl=str(len(shape_raw)), shape=str(shape_raw), dropout=str(dropout), num_classes=str(dataset.y.shape[1]), polar_mode=str(polar), activation=activation, dataset_size=str(dataset.x.shape[0]), feature_size=str(dataset.x.shape[1]), epochs=epochs, batch_size=batch_size, winner=winner, complex_median=complex_median, real_median=real_median, complex_median_train=complex_median_train, real_median_train=real_median_train, complex_err=complex_err, real_err=real_err, filename='./log/mlp_montecarlo_summary.xlsx')
return str((monte_carlo.monte_carlo_analyzer.path / 'run_data.csv'))
| -8,105,493,941,948,592,000
|
This function is used to compare CVNN vs RVNN performance over any dataset.
1. Automatically creates two Multi-Layer Perceptrons (MLP), one complex and one real.
2. Runs simulation and compares them.
3. Saves several files into ./log/montecarlo/date/of/run/
3.1. run_summary.txt: Summary of the run models and data
3.2. run_data.csv: Full information of performance of iteration of each model at each epoch
3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch
3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch
3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()
:param dataset: cvnn.dataset.Dataset with the dataset to be used on the training
:param open_dataset: (None)
If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)
:param iterations: Number of iterations to be done for each model
:param epochs: Number of epochs for each iteration
:param batch_size: Batch size at each iteration
:param display_freq: Frequency in terms of epochs of when to do a checkpoint.
:param optimizer: Optimizer to be used. Keras optimizers are not allowed.
Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.
:param shape_raw: List of sizes of each hidden layer.
For example [64] will generate a CVNN with one hidden layer of size 64.
Default None will default to example.
:param activation: Activation function to be used at each hidden layer
:param verbose: Different modes according to number:
- 0 or 'silent': No output at all
- 1 or False: Progress bar per iteration
- 2 or True or 'debug': Progress bar per epoch
:param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)
:param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()
:param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.
:param validation_split: Float between 0 and 1.
Percentage of the input data to be used as test set (the rest will be use as train set)
Default: 0.0 (No validation set).
This input is ignored if validation_data is given.
:param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. This parameter takes precedence over validation_split.
It can be:
- tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).
- A tf.data dataset.
:param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or
trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)
- True, it creates a capacity-equivalent model in terms of trainable parameters
- False, it will double all layer size (except the last one if classifier=True)
:param equiv_technique: Used to define the strategy of the capacity equivalent model.
This parameter is ignored if capacity_equivalent=False
- 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'
- 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between
multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.
:param shuffle: TODO
:return: (string) Full path to the run_data.csv generated file.
It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.
|
cvnn/montecarlo.py
|
mlp_run_real_comparison_montecarlo
|
NEGU93/cvnn
|
python
|
def mlp_run_real_comparison_montecarlo(dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path]=None, iterations: int=30, epochs: int=300, batch_size: int=100, display_freq: int=1, optimizer='adam', shape_raw=None, activation: t_activation='cart_relu', output_activation: t_activation=DEFAULT_OUTPUT_ACT, verbose: Union[(bool, int)]=False, do_all: bool=True, polar: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, dropout: float=0.5, validation_split: float=0.2, validation_data: Optional[Union[(Tuple, data.Dataset)]]=None, capacity_equivalent: bool=True, equiv_technique: str='ratio', shuffle: bool=True, tensorboard: bool=False, do_conf_mat: bool=False, plot_data: bool=True) -> str:
"\n This function is used to compare CVNN vs RVNN performance over any dataset.\n 1. Automatically creates two Multi-Layer Perceptrons (MLP), one complex and one real.\n 2. Runs simulation and compares them.\n 3. Saves several files into ./log/montecarlo/date/of/run/\n 3.1. run_summary.txt: Summary of the run models and data\n 3.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch\n 3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch\n 3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n\n :param dataset: cvnn.dataset.Dataset with the dataset to be used on the training\n :param open_dataset: (None)\n If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)\n :param iterations: Number of iterations to be done for each model\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Frequency in terms of epochs of when to do a checkpoint.\n :param optimizer: Optimizer to be used. Keras optimizers are not allowed.\n Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.\n :param shape_raw: List of sizes of each hidden layer.\n For example [64] will generate a CVNN with one hidden layer of size 64.\n Default None will default to example.\n :param activation: Activation function to be used at each hidden layer\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)\n :param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()\n :param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.\n :param validation_split: Float between 0 and 1.\n Percentage of the input data to be used as test set (the rest will be use as train set)\n Default: 0.0 (No validation set).\n This input is ignored if validation_data is given.\n :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data. This parameter takes precedence over validation_split.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or\n trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)\n - True, it creates a capacity-equivalent model in terms of trainable parameters\n - False, it will double all layer size (except the last one if classifier=True)\n :param equiv_technique: Used to define the strategy of the capacity equivalent model.\n This parameter is ignored if capacity_equivalent=False\n - 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'\n - 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between\n multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.\n :param shuffle: TODO\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n "
if (shape_raw is None):
shape_raw = [64]
if open_dataset:
dataset = dp.OpenDataset(open_dataset)
input_size = dataset.x.shape[1]
output_size = dataset.y.shape[1]
complex_network = get_mlp(input_size=input_size, output_size=output_size, shape_raw=shape_raw, activation=activation, dropout=dropout, output_activation=output_activation, optimizer=optimizer)
monte_carlo = RealVsComplex(complex_network, capacity_equivalent=capacity_equivalent, equiv_technique=equiv_technique)
monte_carlo.output_config['tensorboard'] = tensorboard
monte_carlo.output_config['plot_all'] = do_all
monte_carlo.output_config['excel_summary'] = False
monte_carlo.output_config['confusion_matrix'] = do_conf_mat
if plot_data:
dataset.plot_data(overlapped=True, showfig=False, save_path=monte_carlo.monte_carlo_analyzer.path, library='matplotlib')
sleep(1)
monte_carlo.run(dataset.x, dataset.y, iterations=iterations, epochs=epochs, batch_size=batch_size, display_freq=display_freq, shuffle=shuffle, verbose=verbose, data_summary=dataset.summary(), real_cast_modes=polar, validation_split=validation_split, validation_data=validation_data)
max_epoch = monte_carlo.pandas_full_data['epoch'].max()
epoch_filter = (monte_carlo.pandas_full_data['epoch'] == max_epoch)
complex_filter = (monte_carlo.pandas_full_data['network'] == 'complex_network')
real_filter = (monte_carlo.pandas_full_data['network'] == 'real_network')
complex_last_epochs = monte_carlo.pandas_full_data[(epoch_filter & complex_filter)]
real_last_epochs = monte_carlo.pandas_full_data[(epoch_filter & real_filter)]
complex_median_train = complex_last_epochs['accuracy'].median()
real_median_train = real_last_epochs['accuracy'].median()
try:
complex_median = complex_last_epochs['val_accuracy'].median()
real_median = real_last_epochs['val_accuracy'].median()
complex_err = median_error(complex_last_epochs['val_accuracy'].quantile(0.75), complex_last_epochs['val_accuracy'].quantile(0.25), iterations)
real_err = median_error(real_last_epochs['val_accuracy'].quantile(0.75), real_last_epochs['val_accuracy'].quantile(0.25), iterations)
winner = ('CVNN' if (complex_median > real_median) else 'RVNN')
except KeyError:
complex_median = None
real_median = None
complex_err = median_error(complex_last_epochs['accuracy'].quantile(0.75), complex_last_epochs['accuracy'].quantile(0.25), iterations)
real_err = median_error(real_last_epochs['accuracy'].quantile(0.75), real_last_epochs['accuracy'].quantile(0.25), iterations)
if (complex_median_train > real_median_train):
winner = 'CVNN'
elif (complex_median_train == real_median_train):
winner = None
else:
winner = 'RVNN'
_save_rvnn_vs_cvnn_montecarlo_log(iterations=iterations, path=str(monte_carlo.monte_carlo_analyzer.path), dataset_name=dataset.dataset_name, optimizer=str(complex_network.optimizer.__class__), loss=str(complex_network.loss.__class__), hl=str(len(shape_raw)), shape=str(shape_raw), dropout=str(dropout), num_classes=str(dataset.y.shape[1]), polar_mode=str(polar), activation=activation, dataset_size=str(dataset.x.shape[0]), feature_size=str(dataset.x.shape[1]), epochs=epochs, batch_size=batch_size, winner=winner, complex_median=complex_median, real_median=real_median, complex_median_train=complex_median_train, real_median_train=real_median_train, complex_err=complex_err, real_err=real_err, filename='./log/mlp_montecarlo_summary.xlsx')
return str((monte_carlo.monte_carlo_analyzer.path / 'run_data.csv'))
|
def __init__(self):
'\n Class that allows the statistical comparison of several models on the same dataset\n '
self.models = []
self.pandas_full_data = pd.DataFrame()
self.monte_carlo_analyzer = MonteCarloAnalyzer()
self.verbose = 1
self.output_config = {'plot_all': False, 'confusion_matrix': False, 'excel_summary': True, 'summary_of_run': True, 'tensorboard': False, 'save_weights': False, 'safety_checkpoints': False}
| 6,956,332,109,359,251,000
|
Class that allows the statistical comparison of several models on the same dataset
|
cvnn/montecarlo.py
|
__init__
|
NEGU93/cvnn
|
python
|
def __init__(self):
'\n \n '
self.models = []
self.pandas_full_data = pd.DataFrame()
self.monte_carlo_analyzer = MonteCarloAnalyzer()
self.verbose = 1
self.output_config = {'plot_all': False, 'confusion_matrix': False, 'excel_summary': True, 'summary_of_run': True, 'tensorboard': False, 'save_weights': False, 'safety_checkpoints': False}
|
def add_model(self, model: Type[Model]):
'\n Adds a cvnn.CvnnModel to the list to then compare between them\n '
self.models.append(model)
| -5,031,837,254,433,821,000
|
Adds a cvnn.CvnnModel to the list to then compare between them
|
cvnn/montecarlo.py
|
add_model
|
NEGU93/cvnn
|
python
|
def add_model(self, model: Type[Model]):
'\n \n '
self.models.append(model)
|
def run(self, x, y, data_summary: str='', real_cast_modes: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, validation_split: float=0.2, validation_data: Optional[Union[(Tuple[(np.ndarray, np.ndarray)], data.Dataset)]]=None, test_data: Optional[Union[(Tuple[(np.ndarray, np.ndarray)], data.Dataset)]]=None, iterations: int=100, epochs: int=10, batch_size: int=100, early_stop: bool=False, shuffle: bool=True, verbose: Optional[Union[(bool, int, str)]]=1, display_freq: int=1, same_weights: bool=False, process_dataset: bool=True):
"\n This function is used to compare all models added with `self.add_model` method.\n Runs the iteration dataset (x, y).\n 1. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.\n 2. Saves several files into ./log/montecarlo/date/of/run/\n 2.1. run_summary.txt: Summary of the run models and data\n 2.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 2.3. <model.name>_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch\n 2.4. (Optional with parameter plot_all)\n `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n :param x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs).\n - A tf.data dataset. Should return a tuple (inputs, targets). Preferred data type (less overhead).\n :param y: Labels/Target data. Like the input data x, it could be either Numpy array(s) or TensorFlow tensor(s).\n If f x is a dataset then y will be ignored (default None)\n :param data_summary: (String) Dataset name to keep track of it\n :param real_cast_modes: mode parameter used by cvnn.utils.transform_to_real to be used when the model to\n train is real-valued. One of the following:\n - String with the mode listed in cvnn.utils.transform_to_real to be used by all the real-valued models to\n cast complex data to real.\n - List or Tuple of strings: Same size of self.models. mode on how to cast complex data to real for each\n model in self.model.\n real_cast_modes[i] will indicate how to cast data for self.models[i] (ignored when model is complex).\n :param validation_split: Float between 0 and 1.\n Percentage of the input data to be used as test set (the rest will be use as train set)\n Default: 0.0 (No validation set).\n This input is ignored if validation_data is given.\n :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data. This parameter takes precedence over validation_split.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param test_data: Data on which to evaluate the loss and any model metrics at the end of a model training.\n The model will not be trained on this data.\n If test data is not None (default) it will generate a file called `test_results.csv` with the\n statistical results from the test data.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param iterations: Number of iterations to be done for each model\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Integer (Default 1). Only relevant if validation data is provided.\n Frequency on terms of epochs before running the validation.\n :param shuffle: (Boolean) Whether to shuffle the training data before each epoch.\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param early_stop: (Default: False) Wheather to implement early stop on training.\n :param same_weights: (Default False) If True it will use the same weights at each iteration.\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n "
if verbose:
self.verbose = self._parse_verbose(verbose)
test_data_cols = None
if (test_data is not None):
test_data_cols = (['network'] + [n.get_config()['name'] for n in self.models[0].metrics])
real_cast_modes = self._check_real_cast_modes(real_cast_modes)
(confusion_matrix, pbar, test_results) = self._beginning_callback(iterations, epochs, batch_size, shuffle, data_summary, test_data_cols)
w_save = []
for model in self.models:
w_save.append(model.get_weights())
for it in range(iterations):
if (self.verbose == 2):
logger.info('Iteration {}/{}'.format((it + 1), iterations))
for (i, model) in enumerate(self.models):
(x_fit, val_data_fit, test_data_fit) = self._get_fit_dataset(model.inputs[0].dtype.is_complex, x, validation_data, test_data, real_cast_modes[i], process_dataset=process_dataset)
clone_model = tf.keras.models.clone_model(model)
if isinstance(model.loss, tf.keras.losses.Loss):
loss = model.loss.__class__.from_config(config=model.loss.get_config())
else:
loss = model.loss
clone_model.compile(optimizer=model.optimizer.__class__.from_config(model.optimizer.get_config()), loss=loss, metrics=['accuracy'])
if same_weights:
clone_model.set_weights(w_save[i])
temp_path = (self.monte_carlo_analyzer.path / f'run/iteration{it}_model{i}_{model.name}')
os.makedirs(temp_path, exist_ok=True)
callbacks = []
if self.output_config['tensorboard']:
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=(temp_path / 'tensorboard'), histogram_freq=1)
callbacks.append(tensorboard_callback)
if early_stop:
eas = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
callbacks.append(eas)
run_result = clone_model.fit(x_fit, y, validation_split=validation_split, validation_data=val_data_fit, epochs=epochs, batch_size=batch_size, verbose=(self.verbose == 2), validation_freq=display_freq, callbacks=callbacks, shuffle=shuffle)
test_results = self._inner_callback(clone_model, validation_data, confusion_matrix, real_cast_modes[i], i, run_result, test_results, test_data_fit, temp_path)
self._outer_callback(pbar)
return self._end_callback(x, y, iterations, data_summary, real_cast_modes, epochs, batch_size, confusion_matrix, test_results, pbar, w_save)
| 3,118,653,797,893,880,300
|
This function is used to compare all models added with `self.add_model` method.
Runs the iteration dataset (x, y).
1. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.
2. Saves several files into ./log/montecarlo/date/of/run/
2.1. run_summary.txt: Summary of the run models and data
2.2. run_data.csv: Full information of performance of iteration of each model at each epoch
2.3. <model.name>_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch
2.4. (Optional with parameter plot_all)
`plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()
:param x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs).
- A tf.data dataset. Should return a tuple (inputs, targets). Preferred data type (less overhead).
:param y: Labels/Target data. Like the input data x, it could be either Numpy array(s) or TensorFlow tensor(s).
If f x is a dataset then y will be ignored (default None)
:param data_summary: (String) Dataset name to keep track of it
:param real_cast_modes: mode parameter used by cvnn.utils.transform_to_real to be used when the model to
train is real-valued. One of the following:
- String with the mode listed in cvnn.utils.transform_to_real to be used by all the real-valued models to
cast complex data to real.
- List or Tuple of strings: Same size of self.models. mode on how to cast complex data to real for each
model in self.model.
real_cast_modes[i] will indicate how to cast data for self.models[i] (ignored when model is complex).
:param validation_split: Float between 0 and 1.
Percentage of the input data to be used as test set (the rest will be use as train set)
Default: 0.0 (No validation set).
This input is ignored if validation_data is given.
:param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. This parameter takes precedence over validation_split.
It can be:
- tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).
- A tf.data dataset.
:param test_data: Data on which to evaluate the loss and any model metrics at the end of a model training.
The model will not be trained on this data.
If test data is not None (default) it will generate a file called `test_results.csv` with the
statistical results from the test data.
It can be:
- tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).
- A tf.data dataset.
:param iterations: Number of iterations to be done for each model
:param epochs: Number of epochs for each iteration
:param batch_size: Batch size at each iteration
:param display_freq: Integer (Default 1). Only relevant if validation data is provided.
Frequency on terms of epochs before running the validation.
:param shuffle: (Boolean) Whether to shuffle the training data before each epoch.
:param verbose: Different modes according to number:
- 0 or 'silent': No output at all
- 1 or False: Progress bar per iteration
- 2 or True or 'debug': Progress bar per epoch
:param early_stop: (Default: False) Wheather to implement early stop on training.
:param same_weights: (Default False) If True it will use the same weights at each iteration.
:return: (string) Full path to the run_data.csv generated file.
It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.
|
cvnn/montecarlo.py
|
run
|
NEGU93/cvnn
|
python
|
def run(self, x, y, data_summary: str=, real_cast_modes: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, validation_split: float=0.2, validation_data: Optional[Union[(Tuple[(np.ndarray, np.ndarray)], data.Dataset)]]=None, test_data: Optional[Union[(Tuple[(np.ndarray, np.ndarray)], data.Dataset)]]=None, iterations: int=100, epochs: int=10, batch_size: int=100, early_stop: bool=False, shuffle: bool=True, verbose: Optional[Union[(bool, int, str)]]=1, display_freq: int=1, same_weights: bool=False, process_dataset: bool=True):
"\n This function is used to compare all models added with `self.add_model` method.\n Runs the iteration dataset (x, y).\n 1. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.\n 2. Saves several files into ./log/montecarlo/date/of/run/\n 2.1. run_summary.txt: Summary of the run models and data\n 2.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 2.3. <model.name>_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch\n 2.4. (Optional with parameter plot_all)\n `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n :param x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs).\n - A tf.data dataset. Should return a tuple (inputs, targets). Preferred data type (less overhead).\n :param y: Labels/Target data. Like the input data x, it could be either Numpy array(s) or TensorFlow tensor(s).\n If f x is a dataset then y will be ignored (default None)\n :param data_summary: (String) Dataset name to keep track of it\n :param real_cast_modes: mode parameter used by cvnn.utils.transform_to_real to be used when the model to\n train is real-valued. One of the following:\n - String with the mode listed in cvnn.utils.transform_to_real to be used by all the real-valued models to\n cast complex data to real.\n - List or Tuple of strings: Same size of self.models. mode on how to cast complex data to real for each\n model in self.model.\n real_cast_modes[i] will indicate how to cast data for self.models[i] (ignored when model is complex).\n :param validation_split: Float between 0 and 1.\n Percentage of the input data to be used as test set (the rest will be use as train set)\n Default: 0.0 (No validation set).\n This input is ignored if validation_data is given.\n :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data. This parameter takes precedence over validation_split.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param test_data: Data on which to evaluate the loss and any model metrics at the end of a model training.\n The model will not be trained on this data.\n If test data is not None (default) it will generate a file called `test_results.csv` with the\n statistical results from the test data.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param iterations: Number of iterations to be done for each model\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Integer (Default 1). Only relevant if validation data is provided.\n Frequency on terms of epochs before running the validation.\n :param shuffle: (Boolean) Whether to shuffle the training data before each epoch.\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param early_stop: (Default: False) Wheather to implement early stop on training.\n :param same_weights: (Default False) If True it will use the same weights at each iteration.\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n "
if verbose:
self.verbose = self._parse_verbose(verbose)
test_data_cols = None
if (test_data is not None):
test_data_cols = (['network'] + [n.get_config()['name'] for n in self.models[0].metrics])
real_cast_modes = self._check_real_cast_modes(real_cast_modes)
(confusion_matrix, pbar, test_results) = self._beginning_callback(iterations, epochs, batch_size, shuffle, data_summary, test_data_cols)
w_save = []
for model in self.models:
w_save.append(model.get_weights())
for it in range(iterations):
if (self.verbose == 2):
logger.info('Iteration {}/{}'.format((it + 1), iterations))
for (i, model) in enumerate(self.models):
(x_fit, val_data_fit, test_data_fit) = self._get_fit_dataset(model.inputs[0].dtype.is_complex, x, validation_data, test_data, real_cast_modes[i], process_dataset=process_dataset)
clone_model = tf.keras.models.clone_model(model)
if isinstance(model.loss, tf.keras.losses.Loss):
loss = model.loss.__class__.from_config(config=model.loss.get_config())
else:
loss = model.loss
clone_model.compile(optimizer=model.optimizer.__class__.from_config(model.optimizer.get_config()), loss=loss, metrics=['accuracy'])
if same_weights:
clone_model.set_weights(w_save[i])
temp_path = (self.monte_carlo_analyzer.path / f'run/iteration{it}_model{i}_{model.name}')
os.makedirs(temp_path, exist_ok=True)
callbacks = []
if self.output_config['tensorboard']:
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=(temp_path / 'tensorboard'), histogram_freq=1)
callbacks.append(tensorboard_callback)
if early_stop:
eas = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
callbacks.append(eas)
run_result = clone_model.fit(x_fit, y, validation_split=validation_split, validation_data=val_data_fit, epochs=epochs, batch_size=batch_size, verbose=(self.verbose == 2), validation_freq=display_freq, callbacks=callbacks, shuffle=shuffle)
test_results = self._inner_callback(clone_model, validation_data, confusion_matrix, real_cast_modes[i], i, run_result, test_results, test_data_fit, temp_path)
self._outer_callback(pbar)
return self._end_callback(x, y, iterations, data_summary, real_cast_modes, epochs, batch_size, confusion_matrix, test_results, pbar, w_save)
|
def _save_summary_of_run(self, run_summary, data_summary):
'\n Saves 2 files:\n - run_summary.txt: A user-friendly resume of the monte carlo run.\n - models_details.json: A full serialized version of the models.\n Contains info that lacks in the txt file like the loss or optimizer.\n '
with open(str((self.monte_carlo_analyzer.path / 'run_summary.txt')), 'w') as file:
file.write(run_summary)
file.write((data_summary + '\n'))
file.write('Models:\n')
for model in self.models:
model.summary(print_fn=(lambda x: file.write((x + '\n'))))
json_dict = {}
for (i, model) in enumerate(self.models):
json_dict[str(i)] = {'name': model.name, 'loss': (model.loss if isinstance(model.loss, str) else model.loss.get_config()), 'optimizer': model.optimizer.get_config(), 'layers': [layer.get_config() for layer in model.layers]}
with open((self.monte_carlo_analyzer.path / 'models_details.json'), 'w') as fp:
json.dump(str(json_dict), fp)
| 2,147,652,564,802,560,800
|
Saves 2 files:
- run_summary.txt: A user-friendly resume of the monte carlo run.
- models_details.json: A full serialized version of the models.
Contains info that lacks in the txt file like the loss or optimizer.
|
cvnn/montecarlo.py
|
_save_summary_of_run
|
NEGU93/cvnn
|
python
|
def _save_summary_of_run(self, run_summary, data_summary):
'\n Saves 2 files:\n - run_summary.txt: A user-friendly resume of the monte carlo run.\n - models_details.json: A full serialized version of the models.\n Contains info that lacks in the txt file like the loss or optimizer.\n '
with open(str((self.monte_carlo_analyzer.path / 'run_summary.txt')), 'w') as file:
file.write(run_summary)
file.write((data_summary + '\n'))
file.write('Models:\n')
for model in self.models:
model.summary(print_fn=(lambda x: file.write((x + '\n'))))
json_dict = {}
for (i, model) in enumerate(self.models):
json_dict[str(i)] = {'name': model.name, 'loss': (model.loss if isinstance(model.loss, str) else model.loss.get_config()), 'optimizer': model.optimizer.get_config(), 'layers': [layer.get_config() for layer in model.layers]}
with open((self.monte_carlo_analyzer.path / 'models_details.json'), 'w') as fp:
json.dump(str(json_dict), fp)
|
def __init__(self, complex_model: Type[Model], capacity_equivalent: bool=True, equiv_technique: str='ratio'):
"\n :param complex_model: Complex keras model (ex: sequential)\n :param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or\n trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)\n - True, it creates a capacity-equivalent model in terms of trainable parameters\n - False, it will double all layer size (except the last one if classifier=True)\n :param equiv_technique: Used to define the strategy of the capacity equivalent model.\n This parameter is ignored if capacity_equivalent=False\n - 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'\n - 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between\n multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.\n "
super().__init__()
self.add_model(complex_model)
self.add_model(get_real_equivalent(complex_model, capacity_equivalent=capacity_equivalent, equiv_technique=equiv_technique, name='real_network'))
| -8,439,983,612,307,735,000
|
:param complex_model: Complex keras model (ex: sequential)
:param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or
trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)
- True, it creates a capacity-equivalent model in terms of trainable parameters
- False, it will double all layer size (except the last one if classifier=True)
:param equiv_technique: Used to define the strategy of the capacity equivalent model.
This parameter is ignored if capacity_equivalent=False
- 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'
- 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between
multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.
|
cvnn/montecarlo.py
|
__init__
|
NEGU93/cvnn
|
python
|
def __init__(self, complex_model: Type[Model], capacity_equivalent: bool=True, equiv_technique: str='ratio'):
"\n :param complex_model: Complex keras model (ex: sequential)\n :param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or\n trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)\n - True, it creates a capacity-equivalent model in terms of trainable parameters\n - False, it will double all layer size (except the last one if classifier=True)\n :param equiv_technique: Used to define the strategy of the capacity equivalent model.\n This parameter is ignored if capacity_equivalent=False\n - 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'\n - 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between\n multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.\n "
super().__init__()
self.add_model(complex_model)
self.add_model(get_real_equivalent(complex_model, capacity_equivalent=capacity_equivalent, equiv_technique=equiv_technique, name='real_network'))
|
def require_collection_playable(handler):
'Decorator that checks if the user can play the given collection.'
def test_can_play(self, collection_id, **kwargs):
'Check if the current user can play the collection.'
actor = rights_manager.Actor(self.user_id)
can_play = actor.can_play(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
can_view = actor.can_view(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
if (can_play and can_view):
return handler(self, collection_id, **kwargs)
else:
raise self.PageNotFoundException
return test_can_play
| 9,035,655,413,190,753,000
|
Decorator that checks if the user can play the given collection.
|
core/controllers/collection_viewer.py
|
require_collection_playable
|
Himanshu1495/oppia
|
python
|
def require_collection_playable(handler):
def test_can_play(self, collection_id, **kwargs):
'Check if the current user can play the collection.'
actor = rights_manager.Actor(self.user_id)
can_play = actor.can_play(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
can_view = actor.can_view(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
if (can_play and can_view):
return handler(self, collection_id, **kwargs)
else:
raise self.PageNotFoundException
return test_can_play
|
def test_can_play(self, collection_id, **kwargs):
'Check if the current user can play the collection.'
actor = rights_manager.Actor(self.user_id)
can_play = actor.can_play(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
can_view = actor.can_view(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
if (can_play and can_view):
return handler(self, collection_id, **kwargs)
else:
raise self.PageNotFoundException
| 8,139,909,462,468,710,000
|
Check if the current user can play the collection.
|
core/controllers/collection_viewer.py
|
test_can_play
|
Himanshu1495/oppia
|
python
|
def test_can_play(self, collection_id, **kwargs):
actor = rights_manager.Actor(self.user_id)
can_play = actor.can_play(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
can_view = actor.can_view(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
if (can_play and can_view):
return handler(self, collection_id, **kwargs)
else:
raise self.PageNotFoundException
|
@require_collection_playable
def get(self, collection_id):
'Handles GET requests.'
try:
collection = collection_services.get_collection_by_id(collection_id)
except Exception as e:
raise self.PageNotFoundException(e)
whitelisted_usernames = config_domain.WHITELISTED_COLLECTION_EDITOR_USERNAMES.value
self.values.update({'can_edit': (bool(self.username) and (self.username in whitelisted_usernames) and (self.username not in config_domain.BANNED_USERNAMES.value) and rights_manager.Actor(self.user_id).can_edit(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)), 'is_logged_in': bool(self.user_id), 'collection_id': collection_id, 'collection_title': collection.title, 'collection_skills': collection.skills, 'is_private': rights_manager.is_collection_private(collection_id), 'meta_name': collection.title, 'meta_description': utils.capitalize_string(collection.objective)})
self.render_template('collection_player/collection_player.html')
| -5,600,260,206,156,374,000
|
Handles GET requests.
|
core/controllers/collection_viewer.py
|
get
|
Himanshu1495/oppia
|
python
|
@require_collection_playable
def get(self, collection_id):
try:
collection = collection_services.get_collection_by_id(collection_id)
except Exception as e:
raise self.PageNotFoundException(e)
whitelisted_usernames = config_domain.WHITELISTED_COLLECTION_EDITOR_USERNAMES.value
self.values.update({'can_edit': (bool(self.username) and (self.username in whitelisted_usernames) and (self.username not in config_domain.BANNED_USERNAMES.value) and rights_manager.Actor(self.user_id).can_edit(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)), 'is_logged_in': bool(self.user_id), 'collection_id': collection_id, 'collection_title': collection.title, 'collection_skills': collection.skills, 'is_private': rights_manager.is_collection_private(collection_id), 'meta_name': collection.title, 'meta_description': utils.capitalize_string(collection.objective)})
self.render_template('collection_player/collection_player.html')
|
def get(self, collection_id):
'Populates the data on the individual collection page.'
allow_invalid_explorations = bool(self.request.get('allow_invalid_explorations'))
try:
collection_dict = collection_services.get_learner_collection_dict_by_id(collection_id, self.user_id, allow_invalid_explorations=allow_invalid_explorations)
except Exception as e:
raise self.PageNotFoundException(e)
self.values.update({'can_edit': (self.user_id and rights_manager.Actor(self.user_id).can_edit(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)), 'collection': collection_dict, 'info_card_image_url': utils.get_info_card_url_for_category(collection_dict['category']), 'is_logged_in': bool(self.user_id), 'session_id': utils.generate_new_session_id()})
self.render_json(self.values)
| -5,690,707,837,412,497,000
|
Populates the data on the individual collection page.
|
core/controllers/collection_viewer.py
|
get
|
Himanshu1495/oppia
|
python
|
def get(self, collection_id):
allow_invalid_explorations = bool(self.request.get('allow_invalid_explorations'))
try:
collection_dict = collection_services.get_learner_collection_dict_by_id(collection_id, self.user_id, allow_invalid_explorations=allow_invalid_explorations)
except Exception as e:
raise self.PageNotFoundException(e)
self.values.update({'can_edit': (self.user_id and rights_manager.Actor(self.user_id).can_edit(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)), 'collection': collection_dict, 'info_card_image_url': utils.get_info_card_url_for_category(collection_dict['category']), 'is_logged_in': bool(self.user_id), 'session_id': utils.generate_new_session_id()})
self.render_json(self.values)
|
def to_getdist(nested_samples):
'Convert from anesthetic to getdist samples.\n\n Parameters\n ----------\n nested_samples: MCMCSamples or NestedSamples\n anesthetic samples to be converted\n\n Returns\n -------\n getdist_samples: getdist.mcsamples.MCSamples\n getdist equivalent samples\n '
import getdist
samples = nested_samples.to_numpy()
weights = nested_samples.weights
loglikes = ((- 2) * nested_samples.logL.to_numpy())
names = nested_samples.columns
ranges = {name: nested_samples._limits(name) for name in names}
return getdist.mcsamples.MCSamples(samples=samples, weights=weights, loglikes=loglikes, ranges=ranges, names=names)
| 3,120,332,844,846,308,000
|
Convert from anesthetic to getdist samples.
Parameters
----------
nested_samples: MCMCSamples or NestedSamples
anesthetic samples to be converted
Returns
-------
getdist_samples: getdist.mcsamples.MCSamples
getdist equivalent samples
|
anesthetic/convert.py
|
to_getdist
|
Stefan-Heimersheim/anesthetic
|
python
|
def to_getdist(nested_samples):
'Convert from anesthetic to getdist samples.\n\n Parameters\n ----------\n nested_samples: MCMCSamples or NestedSamples\n anesthetic samples to be converted\n\n Returns\n -------\n getdist_samples: getdist.mcsamples.MCSamples\n getdist equivalent samples\n '
import getdist
samples = nested_samples.to_numpy()
weights = nested_samples.weights
loglikes = ((- 2) * nested_samples.logL.to_numpy())
names = nested_samples.columns
ranges = {name: nested_samples._limits(name) for name in names}
return getdist.mcsamples.MCSamples(samples=samples, weights=weights, loglikes=loglikes, ranges=ranges, names=names)
|
def __init__(self, cfg):
'\n model: torch.nn.Module\n cfg: model-agnostic experiment configs\n '
super().__init__()
self.cfg = cfg
self.image = ('MAGNETOGRAM' in cfg.DATA.FEATURES)
self.model = build_model(cfg)
self.save_hyperparameters()
| 5,289,503,828,256,731,000
|
model: torch.nn.Module
cfg: model-agnostic experiment configs
|
arnet/modeling/learner.py
|
__init__
|
ZeyuSun/flare-prediction-smarp
|
python
|
def __init__(self, cfg):
'\n model: torch.nn.Module\n cfg: model-agnostic experiment configs\n '
super().__init__()
self.cfg = cfg
self.image = ('MAGNETOGRAM' in cfg.DATA.FEATURES)
self.model = build_model(cfg)
self.save_hyperparameters()
|
def grad_norm(self, norm_type: Union[(float, int, str)]) -> Dict[(str, float)]:
"Compute each parameter's gradient's norm and their overall norm.\n\n The overall norm is computed over all gradients together, as if they\n were concatenated into a single vector.\n\n Args:\n norm_type: The type of the used p-norm, cast to float if necessary.\n Can be ``'inf'`` for infinity norm.\n\n Return:\n norms: The dictionary of p-norms of each parameter's gradient and\n a special entry for the total p-norm of the gradients viewed\n as a single vector.\n "
(norms, all_norms) = ({}, [])
for (name, p) in self.named_parameters():
if (name.split('.')[0] == 'model'):
name = name[6:]
if (p.grad is None):
continue
param_norm = float(p.data.norm(norm_type))
grad_norm = float(p.grad.data.norm(norm_type))
norms[f'grad_{norm_type}_norm/{name}'] = {'param': param_norm, 'grad': grad_norm}
all_norms.append(param_norm)
total_norm = float(torch.tensor(all_norms).norm(norm_type))
norms[f'grad_{norm_type}_norm/total'] = round(total_norm, 3)
return norms
| 6,898,759,577,125,780,000
|
Compute each parameter's gradient's norm and their overall norm.
The overall norm is computed over all gradients together, as if they
were concatenated into a single vector.
Args:
norm_type: The type of the used p-norm, cast to float if necessary.
Can be ``'inf'`` for infinity norm.
Return:
norms: The dictionary of p-norms of each parameter's gradient and
a special entry for the total p-norm of the gradients viewed
as a single vector.
|
arnet/modeling/learner.py
|
grad_norm
|
ZeyuSun/flare-prediction-smarp
|
python
|
def grad_norm(self, norm_type: Union[(float, int, str)]) -> Dict[(str, float)]:
"Compute each parameter's gradient's norm and their overall norm.\n\n The overall norm is computed over all gradients together, as if they\n were concatenated into a single vector.\n\n Args:\n norm_type: The type of the used p-norm, cast to float if necessary.\n Can be ``'inf'`` for infinity norm.\n\n Return:\n norms: The dictionary of p-norms of each parameter's gradient and\n a special entry for the total p-norm of the gradients viewed\n as a single vector.\n "
(norms, all_norms) = ({}, [])
for (name, p) in self.named_parameters():
if (name.split('.')[0] == 'model'):
name = name[6:]
if (p.grad is None):
continue
param_norm = float(p.data.norm(norm_type))
grad_norm = float(p.grad.data.norm(norm_type))
norms[f'grad_{norm_type}_norm/{name}'] = {'param': param_norm, 'grad': grad_norm}
all_norms.append(param_norm)
total_norm = float(torch.tensor(all_norms).norm(norm_type))
norms[f'grad_{norm_type}_norm/total'] = round(total_norm, 3)
return norms
|
def _set_by_path(tree, keys, value):
'Set a value in a nested object in tree by sequence of keys.'
keys = keys.split(';')
_get_by_path(tree, keys[:(- 1)])[keys[(- 1)]] = value
| 2,666,638,307,653,452,000
|
Set a value in a nested object in tree by sequence of keys.
|
code/utils/parse_config.py
|
_set_by_path
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
python
|
def _set_by_path(tree, keys, value):
keys = keys.split(';')
_get_by_path(tree, keys[:(- 1)])[keys[(- 1)]] = value
|
def _get_by_path(tree, keys):
'Access a nested object in tree by sequence of keys.'
return reduce(getitem, keys, tree)
| 320,196,700,010,566,400
|
Access a nested object in tree by sequence of keys.
|
code/utils/parse_config.py
|
_get_by_path
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
python
|
def _get_by_path(tree, keys):
return reduce(getitem, keys, tree)
|
def __init__(self, config, resume=None, modification=None, run_id=None):
'\n class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving\n and logging module.\n :param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example.\n :param resume: String, path to the checkpoint being loaded.\n :param modification: Dict keychain:value, specifying position values to be replaced from config dict.\n :param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default\n '
self._config = _update_config(config, modification)
self.resume = resume
save_dir = Path(self.config['trainer']['save_dir'])
exper_name = self.config['name']
if ('fold' in self.config['data_loader']['args']):
fold = self.config['data_loader']['args']['fold']
else:
fold = 0
if self.resume:
if os.path.isdir(self.resume):
self.root_dir = self.resume
elif os.path.isfile(self.resume):
self.root_dir = Path(self.resume).parent
else:
if (run_id is None):
run_id = '{}_fold_{}'.format(datetime.now().strftime('%m%d_%H%M%S'), fold)
self.root_dir = ((save_dir / exper_name) / run_id)
exist_ok = self.resume
self.root_dir.mkdir(parents=True, exist_ok=exist_ok)
write_json(self.config, (self.save_dir / 'config_{}_fold_{}.json'.format(exper_name, fold)))
setup_logging(self.log_dir)
self.log_levels = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
| -4,933,217,559,418,165,000
|
class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving
and logging module.
:param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example.
:param resume: String, path to the checkpoint being loaded.
:param modification: Dict keychain:value, specifying position values to be replaced from config dict.
:param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default
|
code/utils/parse_config.py
|
__init__
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
python
|
def __init__(self, config, resume=None, modification=None, run_id=None):
'\n class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving\n and logging module.\n :param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example.\n :param resume: String, path to the checkpoint being loaded.\n :param modification: Dict keychain:value, specifying position values to be replaced from config dict.\n :param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default\n '
self._config = _update_config(config, modification)
self.resume = resume
save_dir = Path(self.config['trainer']['save_dir'])
exper_name = self.config['name']
if ('fold' in self.config['data_loader']['args']):
fold = self.config['data_loader']['args']['fold']
else:
fold = 0
if self.resume:
if os.path.isdir(self.resume):
self.root_dir = self.resume
elif os.path.isfile(self.resume):
self.root_dir = Path(self.resume).parent
else:
if (run_id is None):
run_id = '{}_fold_{}'.format(datetime.now().strftime('%m%d_%H%M%S'), fold)
self.root_dir = ((save_dir / exper_name) / run_id)
exist_ok = self.resume
self.root_dir.mkdir(parents=True, exist_ok=exist_ok)
write_json(self.config, (self.save_dir / 'config_{}_fold_{}.json'.format(exper_name, fold)))
setup_logging(self.log_dir)
self.log_levels = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
|
@classmethod
def from_args(cls, args, options='', updates=dict()):
'\n Initialize this class from some cli arguments. Used in train, test.\n '
for opt in options:
args.add_argument(*opt.flags, default=None, type=opt.type)
if (not isinstance(args, tuple)):
args = args.parse_args()
if (args.device is not None):
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
if (args.resume is not None):
resume = Path(args.resume)
if (args.config is None):
cfg_fname = glob.glob(os.path.join(resume, 'config*.json'))[0]
else:
cfg_fname = Path(args.config)
else:
msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
assert (args.config is not None), msg_no_cfg
resume = None
cfg_fname = Path(args.config)
config = read_json(cfg_fname)
if (args.config and resume):
config.update(read_json(args.config))
modification = {opt.target: getattr(args, _get_opt_name(opt.flags)) for opt in options}
modification.update(updates)
return cls(config, resume, modification)
| 1,624,221,919,907,510,500
|
Initialize this class from some cli arguments. Used in train, test.
|
code/utils/parse_config.py
|
from_args
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
python
|
@classmethod
def from_args(cls, args, options=, updates=dict()):
'\n \n '
for opt in options:
args.add_argument(*opt.flags, default=None, type=opt.type)
if (not isinstance(args, tuple)):
args = args.parse_args()
if (args.device is not None):
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
if (args.resume is not None):
resume = Path(args.resume)
if (args.config is None):
cfg_fname = glob.glob(os.path.join(resume, 'config*.json'))[0]
else:
cfg_fname = Path(args.config)
else:
msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
assert (args.config is not None), msg_no_cfg
resume = None
cfg_fname = Path(args.config)
config = read_json(cfg_fname)
if (args.config and resume):
config.update(read_json(args.config))
modification = {opt.target: getattr(args, _get_opt_name(opt.flags)) for opt in options}
modification.update(updates)
return cls(config, resume, modification)
|
def init_obj(self, name, module, *args, **kwargs):
"\n Finds a function handle with the name given as 'type' in config, and returns the\n instance initialized with corresponding arguments given.\n\n `object = config.init_obj('name', module, a, b=1)`\n is equivalent to\n `object = module.name(a, b=1)`\n "
module_name = self[name]['type']
module_args = dict(self[name]['args'])
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
| 2,826,001,643,421,151,000
|
Finds a function handle with the name given as 'type' in config, and returns the
instance initialized with corresponding arguments given.
`object = config.init_obj('name', module, a, b=1)`
is equivalent to
`object = module.name(a, b=1)`
|
code/utils/parse_config.py
|
init_obj
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
python
|
def init_obj(self, name, module, *args, **kwargs):
"\n Finds a function handle with the name given as 'type' in config, and returns the\n instance initialized with corresponding arguments given.\n\n `object = config.init_obj('name', module, a, b=1)`\n is equivalent to\n `object = module.name(a, b=1)`\n "
module_name = self[name]['type']
module_args = dict(self[name]['args'])
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
|
def init_ftn(self, name, module, *args, **kwargs):
"\n Finds a function handle with the name given as 'type' in config, and returns the\n function with given arguments fixed with functools.partial.\n\n `function = config.init_ftn('name', module, a, b=1)`\n is equivalent to\n `function = lambda *args, **kwargs: module.name(a, *args, b=1, **kwargs)`.\n "
module_name = self[name]['type']
module_args = dict(self[name]['args'])
assert all([(k not in module_args) for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return partial(getattr(module, module_name), *args, **module_args)
| 706,491,489,085,046,500
|
Finds a function handle with the name given as 'type' in config, and returns the
function with given arguments fixed with functools.partial.
`function = config.init_ftn('name', module, a, b=1)`
is equivalent to
`function = lambda *args, **kwargs: module.name(a, *args, b=1, **kwargs)`.
|
code/utils/parse_config.py
|
init_ftn
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
python
|
def init_ftn(self, name, module, *args, **kwargs):
"\n Finds a function handle with the name given as 'type' in config, and returns the\n function with given arguments fixed with functools.partial.\n\n `function = config.init_ftn('name', module, a, b=1)`\n is equivalent to\n `function = lambda *args, **kwargs: module.name(a, *args, b=1, **kwargs)`.\n "
module_name = self[name]['type']
module_args = dict(self[name]['args'])
assert all([(k not in module_args) for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return partial(getattr(module, module_name), *args, **module_args)
|
def __getitem__(self, name):
'Access items like ordinary dict.'
return self.config[name]
| -3,024,819,929,258,913,300
|
Access items like ordinary dict.
|
code/utils/parse_config.py
|
__getitem__
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
python
|
def __getitem__(self, name):
return self.config[name]
|
def _get_sorted_box_lims(boxes, box_init):
'Sort the uncertainties for each box in boxes based on a\n normalization given box_init. Unrestricted dimensions are dropped.\n The sorting is based on the normalization of the first box in boxes.\n\n Parameters\n ----------\n boxes : list of numpy structured arrays\n box_init : numpy structured array\n\n Returns\n -------\n tuple\n with the sorted boxes, and the list of restricted uncertainties\n\n '
uncs = set()
for box in boxes:
us = _determine_restricted_dims(box, box_init)
uncs = uncs.union(us)
uncs = np.asarray(list(uncs))
box_lim = boxes[0]
nbl = _normalize(box_lim, box_init, uncs)
box_size = (nbl[:, 1] - nbl[:, 0])
uncs = uncs[np.argsort(box_size)]
box_lims = [box for box in boxes]
return (box_lims, uncs.tolist())
| -9,012,174,965,740,264,000
|
Sort the uncertainties for each box in boxes based on a
normalization given box_init. Unrestricted dimensions are dropped.
The sorting is based on the normalization of the first box in boxes.
Parameters
----------
boxes : list of numpy structured arrays
box_init : numpy structured array
Returns
-------
tuple
with the sorted boxes, and the list of restricted uncertainties
|
ema_workbench/analysis/scenario_discovery_util.py
|
_get_sorted_box_lims
|
brodderickrodriguez/EMA_lite
|
python
|
def _get_sorted_box_lims(boxes, box_init):
'Sort the uncertainties for each box in boxes based on a\n normalization given box_init. Unrestricted dimensions are dropped.\n The sorting is based on the normalization of the first box in boxes.\n\n Parameters\n ----------\n boxes : list of numpy structured arrays\n box_init : numpy structured array\n\n Returns\n -------\n tuple\n with the sorted boxes, and the list of restricted uncertainties\n\n '
uncs = set()
for box in boxes:
us = _determine_restricted_dims(box, box_init)
uncs = uncs.union(us)
uncs = np.asarray(list(uncs))
box_lim = boxes[0]
nbl = _normalize(box_lim, box_init, uncs)
box_size = (nbl[:, 1] - nbl[:, 0])
uncs = uncs[np.argsort(box_size)]
box_lims = [box for box in boxes]
return (box_lims, uncs.tolist())
|
def _make_box(x):
'\n Make a box that encompasses all the data\n\n Parameters\n ----------\n x : DataFrame\n\n Returns\n -------\n DataFrame\n\n\n '
def limits(x):
if (pd.api.types.is_integer_dtype(x.dtype) or pd.api.types.is_float_dtype(x.dtype)):
return pd.Series([x.min(), x.max()])
else:
return pd.Series([set(x), set(x)])
return x.apply(limits)
| -2,165,776,446,668,643,600
|
Make a box that encompasses all the data
Parameters
----------
x : DataFrame
Returns
-------
DataFrame
|
ema_workbench/analysis/scenario_discovery_util.py
|
_make_box
|
brodderickrodriguez/EMA_lite
|
python
|
def _make_box(x):
'\n Make a box that encompasses all the data\n\n Parameters\n ----------\n x : DataFrame\n\n Returns\n -------\n DataFrame\n\n\n '
def limits(x):
if (pd.api.types.is_integer_dtype(x.dtype) or pd.api.types.is_float_dtype(x.dtype)):
return pd.Series([x.min(), x.max()])
else:
return pd.Series([set(x), set(x)])
return x.apply(limits)
|
def _normalize(box_lim, box_init, uncertainties):
'Normalize the given box lim to the unit interval derived\n from box init for the specified uncertainties.\n\n Categorical uncertainties are normalized based on fractionated. So\n value specifies the fraction of categories in the box_lim.\n\n Parameters\n ----------\n box_lim : DataFrame\n box_init : DataFrame\n uncertainties : list of strings\n valid names of columns that exist in both structured\n arrays.\n\n Returns\n -------\n ndarray\n a numpy array of the shape (2, len(uncertainties) with the\n normalized box limits.\n\n\n '
norm_box_lim = np.zeros((len(uncertainties), box_lim.shape[0]))
for (i, u) in enumerate(uncertainties):
dtype = box_lim[u].dtype
if (dtype == np.dtype(object)):
nu = (len(box_lim.loc[(0, u)]) / len(box_init.loc[(0, u)]))
nl = 0
else:
(lower, upper) = box_lim.loc[:, u]
dif = (box_init.loc[(1, u)] - box_init.loc[(0, u)])
a = (1 / dif)
b = (((- 1) * box_init.loc[(0, u)]) / dif)
nl = ((a * lower) + b)
nu = ((a * upper) + b)
norm_box_lim[i, :] = (nl, nu)
return norm_box_lim
| -522,221,167,959,005,700
|
Normalize the given box lim to the unit interval derived
from box init for the specified uncertainties.
Categorical uncertainties are normalized based on fractionated. So
value specifies the fraction of categories in the box_lim.
Parameters
----------
box_lim : DataFrame
box_init : DataFrame
uncertainties : list of strings
valid names of columns that exist in both structured
arrays.
Returns
-------
ndarray
a numpy array of the shape (2, len(uncertainties) with the
normalized box limits.
|
ema_workbench/analysis/scenario_discovery_util.py
|
_normalize
|
brodderickrodriguez/EMA_lite
|
python
|
def _normalize(box_lim, box_init, uncertainties):
'Normalize the given box lim to the unit interval derived\n from box init for the specified uncertainties.\n\n Categorical uncertainties are normalized based on fractionated. So\n value specifies the fraction of categories in the box_lim.\n\n Parameters\n ----------\n box_lim : DataFrame\n box_init : DataFrame\n uncertainties : list of strings\n valid names of columns that exist in both structured\n arrays.\n\n Returns\n -------\n ndarray\n a numpy array of the shape (2, len(uncertainties) with the\n normalized box limits.\n\n\n '
norm_box_lim = np.zeros((len(uncertainties), box_lim.shape[0]))
for (i, u) in enumerate(uncertainties):
dtype = box_lim[u].dtype
if (dtype == np.dtype(object)):
nu = (len(box_lim.loc[(0, u)]) / len(box_init.loc[(0, u)]))
nl = 0
else:
(lower, upper) = box_lim.loc[:, u]
dif = (box_init.loc[(1, u)] - box_init.loc[(0, u)])
a = (1 / dif)
b = (((- 1) * box_init.loc[(0, u)]) / dif)
nl = ((a * lower) + b)
nu = ((a * upper) + b)
norm_box_lim[i, :] = (nl, nu)
return norm_box_lim
|
def _determine_restricted_dims(box_limits, box_init):
'returns a list of dimensions that is restricted\n\n Parameters\n ----------\n box_limits : pd.DataFrame\n box_init : pd.DataFrame\n\n Returns\n -------\n list of str\n\n '
cols = box_init.columns.values
restricted_dims = cols[(np.all((box_init.values == box_limits.values), axis=0) == False)]
return restricted_dims
| 4,333,435,090,552,701,000
|
returns a list of dimensions that is restricted
Parameters
----------
box_limits : pd.DataFrame
box_init : pd.DataFrame
Returns
-------
list of str
|
ema_workbench/analysis/scenario_discovery_util.py
|
_determine_restricted_dims
|
brodderickrodriguez/EMA_lite
|
python
|
def _determine_restricted_dims(box_limits, box_init):
'returns a list of dimensions that is restricted\n\n Parameters\n ----------\n box_limits : pd.DataFrame\n box_init : pd.DataFrame\n\n Returns\n -------\n list of str\n\n '
cols = box_init.columns.values
restricted_dims = cols[(np.all((box_init.values == box_limits.values), axis=0) == False)]
return restricted_dims
|
def _determine_nr_restricted_dims(box_lims, box_init):
'\n\n determine the number of restriced dimensions of a box given\n compared to the inital box that contains all the data\n\n Parameters\n ----------\n box_lims : structured numpy array\n a specific box limit\n box_init : structured numpy array\n the initial box containing all data points\n\n\n Returns\n -------\n int\n\n '
return _determine_restricted_dims(box_lims, box_init).shape[0]
| -6,357,786,457,148,202,000
|
determine the number of restriced dimensions of a box given
compared to the inital box that contains all the data
Parameters
----------
box_lims : structured numpy array
a specific box limit
box_init : structured numpy array
the initial box containing all data points
Returns
-------
int
|
ema_workbench/analysis/scenario_discovery_util.py
|
_determine_nr_restricted_dims
|
brodderickrodriguez/EMA_lite
|
python
|
def _determine_nr_restricted_dims(box_lims, box_init):
'\n\n determine the number of restriced dimensions of a box given\n compared to the inital box that contains all the data\n\n Parameters\n ----------\n box_lims : structured numpy array\n a specific box limit\n box_init : structured numpy array\n the initial box containing all data points\n\n\n Returns\n -------\n int\n\n '
return _determine_restricted_dims(box_lims, box_init).shape[0]
|
def _compare(a, b):
'compare two boxes, for each dimension return True if the\n same and false otherwise'
dtypesDesc = a.dtype.descr
logical = np.ones(len(dtypesDesc), dtype=np.bool)
for (i, entry) in enumerate(dtypesDesc):
name = entry[0]
logical[i] = ((logical[i] & (a[name][0] == b[name][0])) & (a[name][1] == b[name][1]))
return logical
| 7,529,231,224,578,261,000
|
compare two boxes, for each dimension return True if the
same and false otherwise
|
ema_workbench/analysis/scenario_discovery_util.py
|
_compare
|
brodderickrodriguez/EMA_lite
|
python
|
def _compare(a, b):
'compare two boxes, for each dimension return True if the\n same and false otherwise'
dtypesDesc = a.dtype.descr
logical = np.ones(len(dtypesDesc), dtype=np.bool)
for (i, entry) in enumerate(dtypesDesc):
name = entry[0]
logical[i] = ((logical[i] & (a[name][0] == b[name][0])) & (a[name][1] == b[name][1]))
return logical
|
def _in_box(x, boxlim):
'\n\n returns the a boolean index indicated which data points are inside\n and which are outside of the given box_lims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxlim : pd.DataFrame\n\n Returns\n -------\n ndarray\n boolean 1D array\n\n Raises\n ------\n Attribute error if not numbered columns are not pandas\n category dtype\n\n '
x_numbered = x.select_dtypes(np.number)
boxlim_numbered = boxlim.select_dtypes(np.number)
logical = ((boxlim_numbered.loc[0, :].values <= x_numbered.values) & (x_numbered.values <= boxlim_numbered.loc[1, :].values))
logical = logical.all(axis=1)
for (column, values) in x.select_dtypes(exclude=np.number).iteritems():
entries = boxlim.loc[(0, column)]
not_present = (set(values.cat.categories.values) - entries)
if not_present:
l = pd.isnull(x[column].cat.remove_categories(list(entries)))
logical = (l & logical)
return logical
| -4,604,904,357,187,681,000
|
returns the a boolean index indicated which data points are inside
and which are outside of the given box_lims
Parameters
----------
x : pd.DataFrame
boxlim : pd.DataFrame
Returns
-------
ndarray
boolean 1D array
Raises
------
Attribute error if not numbered columns are not pandas
category dtype
|
ema_workbench/analysis/scenario_discovery_util.py
|
_in_box
|
brodderickrodriguez/EMA_lite
|
python
|
def _in_box(x, boxlim):
'\n\n returns the a boolean index indicated which data points are inside\n and which are outside of the given box_lims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxlim : pd.DataFrame\n\n Returns\n -------\n ndarray\n boolean 1D array\n\n Raises\n ------\n Attribute error if not numbered columns are not pandas\n category dtype\n\n '
x_numbered = x.select_dtypes(np.number)
boxlim_numbered = boxlim.select_dtypes(np.number)
logical = ((boxlim_numbered.loc[0, :].values <= x_numbered.values) & (x_numbered.values <= boxlim_numbered.loc[1, :].values))
logical = logical.all(axis=1)
for (column, values) in x.select_dtypes(exclude=np.number).iteritems():
entries = boxlim.loc[(0, column)]
not_present = (set(values.cat.categories.values) - entries)
if not_present:
l = pd.isnull(x[column].cat.remove_categories(list(entries)))
logical = (l & logical)
return logical
|
def _setup(results, classify, incl_unc=[]):
'helper function for setting up CART or PRIM\n\n Parameters\n ----------\n results : tuple of DataFrame and dict with numpy arrays\n the return from :meth:`perform_experiments`.\n classify : string, function or callable\n either a string denoting the outcome of interest to\n use or a function.\n incl_unc : list of strings\n\n Notes\n -----\n CART, PRIM, and feature scoring only work for a 1D numpy array\n for the dependent variable\n\n Raises\n ------\n TypeError\n if classify is not a string or a callable.\n\n '
(x, outcomes) = results
if incl_unc:
drop_names = (set(x.columns.values.tolist()) - set(incl_unc))
x = x.drop(drop_names, axis=1)
if isinstance(classify, str):
y = outcomes[classify]
mode = RuleInductionType.REGRESSION
elif callable(classify):
y = classify(outcomes)
mode = RuleInductionType.BINARY
else:
raise TypeError('unknown type for classify')
assert (y.ndim == 1)
return (x, y, mode)
| 6,892,984,485,061,205,000
|
helper function for setting up CART or PRIM
Parameters
----------
results : tuple of DataFrame and dict with numpy arrays
the return from :meth:`perform_experiments`.
classify : string, function or callable
either a string denoting the outcome of interest to
use or a function.
incl_unc : list of strings
Notes
-----
CART, PRIM, and feature scoring only work for a 1D numpy array
for the dependent variable
Raises
------
TypeError
if classify is not a string or a callable.
|
ema_workbench/analysis/scenario_discovery_util.py
|
_setup
|
brodderickrodriguez/EMA_lite
|
python
|
def _setup(results, classify, incl_unc=[]):
'helper function for setting up CART or PRIM\n\n Parameters\n ----------\n results : tuple of DataFrame and dict with numpy arrays\n the return from :meth:`perform_experiments`.\n classify : string, function or callable\n either a string denoting the outcome of interest to\n use or a function.\n incl_unc : list of strings\n\n Notes\n -----\n CART, PRIM, and feature scoring only work for a 1D numpy array\n for the dependent variable\n\n Raises\n ------\n TypeError\n if classify is not a string or a callable.\n\n '
(x, outcomes) = results
if incl_unc:
drop_names = (set(x.columns.values.tolist()) - set(incl_unc))
x = x.drop(drop_names, axis=1)
if isinstance(classify, str):
y = outcomes[classify]
mode = RuleInductionType.REGRESSION
elif callable(classify):
y = classify(outcomes)
mode = RuleInductionType.BINARY
else:
raise TypeError('unknown type for classify')
assert (y.ndim == 1)
return (x, y, mode)
|
def _calculate_quasip(x, y, box, Hbox, Tbox):
'\n\n Parameters\n ----------\n x : DataFrame\n y : np.array\n box : DataFrame\n Hbox : int\n Tbox : int\n\n '
logical = _in_box(x, box)
yi = y[logical]
Tj = yi.shape[0]
Hj = np.sum(yi)
p = (Hj / Tj)
Hbox = int(Hbox)
Tbox = int(Tbox)
qp = sp.stats.binom_test(Hbox, Tbox, p, alternative='greater')
return qp
| 5,761,812,687,022,758,000
|
Parameters
----------
x : DataFrame
y : np.array
box : DataFrame
Hbox : int
Tbox : int
|
ema_workbench/analysis/scenario_discovery_util.py
|
_calculate_quasip
|
brodderickrodriguez/EMA_lite
|
python
|
def _calculate_quasip(x, y, box, Hbox, Tbox):
'\n\n Parameters\n ----------\n x : DataFrame\n y : np.array\n box : DataFrame\n Hbox : int\n Tbox : int\n\n '
logical = _in_box(x, box)
yi = y[logical]
Tj = yi.shape[0]
Hj = np.sum(yi)
p = (Hj / Tj)
Hbox = int(Hbox)
Tbox = int(Tbox)
qp = sp.stats.binom_test(Hbox, Tbox, p, alternative='greater')
return qp
|
def plot_pair_wise_scatter(x, y, boxlim, box_init, restricted_dims):
' helper function for pair wise scatter plotting\n\n Parameters\n ----------\n x : DataFrame\n the experiments\n y : numpy array\n the outcome of interest\n box_lim : DataFrame\n a boxlim\n box_init : DataFrame\n restricted_dims : collection of strings\n list of uncertainties that define the boxlims\n\n '
x = x[restricted_dims]
data = x.copy()
categorical_columns = data.select_dtypes('category').columns.values
categorical_mappings = {}
for column in categorical_columns:
categories_inbox = boxlim.at[(0, column)]
categories_all = box_init.at[(0, column)]
missing = (categories_all - categories_inbox)
categories = (list(categories_inbox) + list(missing))
print(column, categories)
data[column] = data[column].cat.set_categories(categories)
categorical_mappings[column] = dict(enumerate(data[column].cat.categories))
data[column] = data[column].cat.codes
data['y'] = y
grid = sns.pairplot(data=data, hue='y', vars=x.columns.values)
cats = set(categorical_columns)
for (row, ylabel) in zip(grid.axes, grid.y_vars):
ylim = boxlim[ylabel]
if (ylabel in cats):
y = (- 0.2)
height = (len(ylim[0]) - 0.6)
else:
y = ylim[0]
height = (ylim[1] - ylim[0])
for (ax, xlabel) in zip(row, grid.x_vars):
if (ylabel == xlabel):
continue
if (xlabel in cats):
xlim = boxlim.at[(0, xlabel)]
x = (- 0.2)
width = (len(xlim) - 0.6)
else:
xlim = boxlim[xlabel]
x = xlim[0]
width = (xlim[1] - xlim[0])
xy = (x, y)
box = patches.Rectangle(xy, width, height, edgecolor='red', facecolor='none', lw=3)
ax.add_patch(box)
for (row, ylabel) in zip(grid.axes, grid.y_vars):
if (ylabel in cats):
ax = row[0]
labels = []
for entry in ax.get_yticklabels():
(_, value) = entry.get_position()
try:
label = categorical_mappings[ylabel][value]
except KeyError:
label = ''
labels.append(label)
ax.set_yticklabels(labels)
for (ax, xlabel) in zip(grid.axes[(- 1)], grid.x_vars):
if (xlabel in cats):
labels = []
locs = []
mapping = categorical_mappings[xlabel]
for i in range((- 1), (len(mapping) + 1)):
locs.append(i)
try:
label = categorical_mappings[xlabel][i]
except KeyError:
label = ''
labels.append(label)
ax.set_xticks(locs)
ax.set_xticklabels(labels, rotation=90)
return grid
| -772,414,657,591,447,600
|
helper function for pair wise scatter plotting
Parameters
----------
x : DataFrame
the experiments
y : numpy array
the outcome of interest
box_lim : DataFrame
a boxlim
box_init : DataFrame
restricted_dims : collection of strings
list of uncertainties that define the boxlims
|
ema_workbench/analysis/scenario_discovery_util.py
|
plot_pair_wise_scatter
|
brodderickrodriguez/EMA_lite
|
python
|
def plot_pair_wise_scatter(x, y, boxlim, box_init, restricted_dims):
' helper function for pair wise scatter plotting\n\n Parameters\n ----------\n x : DataFrame\n the experiments\n y : numpy array\n the outcome of interest\n box_lim : DataFrame\n a boxlim\n box_init : DataFrame\n restricted_dims : collection of strings\n list of uncertainties that define the boxlims\n\n '
x = x[restricted_dims]
data = x.copy()
categorical_columns = data.select_dtypes('category').columns.values
categorical_mappings = {}
for column in categorical_columns:
categories_inbox = boxlim.at[(0, column)]
categories_all = box_init.at[(0, column)]
missing = (categories_all - categories_inbox)
categories = (list(categories_inbox) + list(missing))
print(column, categories)
data[column] = data[column].cat.set_categories(categories)
categorical_mappings[column] = dict(enumerate(data[column].cat.categories))
data[column] = data[column].cat.codes
data['y'] = y
grid = sns.pairplot(data=data, hue='y', vars=x.columns.values)
cats = set(categorical_columns)
for (row, ylabel) in zip(grid.axes, grid.y_vars):
ylim = boxlim[ylabel]
if (ylabel in cats):
y = (- 0.2)
height = (len(ylim[0]) - 0.6)
else:
y = ylim[0]
height = (ylim[1] - ylim[0])
for (ax, xlabel) in zip(row, grid.x_vars):
if (ylabel == xlabel):
continue
if (xlabel in cats):
xlim = boxlim.at[(0, xlabel)]
x = (- 0.2)
width = (len(xlim) - 0.6)
else:
xlim = boxlim[xlabel]
x = xlim[0]
width = (xlim[1] - xlim[0])
xy = (x, y)
box = patches.Rectangle(xy, width, height, edgecolor='red', facecolor='none', lw=3)
ax.add_patch(box)
for (row, ylabel) in zip(grid.axes, grid.y_vars):
if (ylabel in cats):
ax = row[0]
labels = []
for entry in ax.get_yticklabels():
(_, value) = entry.get_position()
try:
label = categorical_mappings[ylabel][value]
except KeyError:
label =
labels.append(label)
ax.set_yticklabels(labels)
for (ax, xlabel) in zip(grid.axes[(- 1)], grid.x_vars):
if (xlabel in cats):
labels = []
locs = []
mapping = categorical_mappings[xlabel]
for i in range((- 1), (len(mapping) + 1)):
locs.append(i)
try:
label = categorical_mappings[xlabel][i]
except KeyError:
label =
labels.append(label)
ax.set_xticks(locs)
ax.set_xticklabels(labels, rotation=90)
return grid
|
def _setup_figure(uncs):
'\n\n helper function for creating the basic layout for the figures that\n show the box lims.\n\n '
nr_unc = len(uncs)
fig = plt.figure()
ax = fig.add_subplot(111)
rect = mpl.patches.Rectangle((0, (- 0.5)), 1, (nr_unc + 1.5), alpha=0.25, facecolor='#C0C0C0', edgecolor='#C0C0C0')
ax.add_patch(rect)
ax.set_xlim(left=(- 0.2), right=1.2)
ax.set_ylim(top=(- 0.5), bottom=(nr_unc - 0.5))
ax.yaxis.set_ticks([y for y in range(nr_unc)])
ax.xaxis.set_ticks([0, 0.25, 0.5, 0.75, 1])
ax.set_yticklabels(uncs[::(- 1)])
return (fig, ax)
| 6,302,861,723,883,544,000
|
helper function for creating the basic layout for the figures that
show the box lims.
|
ema_workbench/analysis/scenario_discovery_util.py
|
_setup_figure
|
brodderickrodriguez/EMA_lite
|
python
|
def _setup_figure(uncs):
'\n\n helper function for creating the basic layout for the figures that\n show the box lims.\n\n '
nr_unc = len(uncs)
fig = plt.figure()
ax = fig.add_subplot(111)
rect = mpl.patches.Rectangle((0, (- 0.5)), 1, (nr_unc + 1.5), alpha=0.25, facecolor='#C0C0C0', edgecolor='#C0C0C0')
ax.add_patch(rect)
ax.set_xlim(left=(- 0.2), right=1.2)
ax.set_ylim(top=(- 0.5), bottom=(nr_unc - 0.5))
ax.yaxis.set_ticks([y for y in range(nr_unc)])
ax.xaxis.set_ticks([0, 0.25, 0.5, 0.75, 1])
ax.set_yticklabels(uncs[::(- 1)])
return (fig, ax)
|
def plot_box(boxlim, qp_values, box_init, uncs, coverage, density, ticklabel_formatter='{} ({})', boxlim_formatter='{: .2g}', table_formatter='{:.3g}'):
'Helper function for parallel coordinate style visualization\n of a box\n\n Parameters\n ----------\n boxlim : DataFrame\n qp_values : dict\n box_init : DataFrame\n uncs : list\n coverage : float\n density : float\n ticklabel_formatter : str\n boxlim_formatter : str\n table_formatter : str\n\n Returns\n -------\n a Figure instance\n\n\n '
norm_box_lim = _normalize(boxlim, box_init, uncs)
(fig, ax) = _setup_figure(uncs)
for (j, u) in enumerate(uncs):
xj = ((len(uncs) - j) - 1)
plot_unc(box_init, xj, j, 0, norm_box_lim, boxlim, u, ax)
dtype = box_init[u].dtype
props = {'facecolor': 'white', 'edgecolor': 'white', 'alpha': 0.25}
y = xj
if (dtype == object):
elements = sorted(list(box_init[u][0]))
max_value = (len(elements) - 1)
values = boxlim.loc[(0, u)]
x = [elements.index(entry) for entry in values]
x = [(entry / max_value) for entry in x]
for (xi, label) in zip(x, values):
ax.text(xi, (y - 0.2), label, ha='center', va='center', bbox=props, color='blue', fontweight='normal')
else:
props = {'facecolor': 'white', 'edgecolor': 'white', 'alpha': 0.25}
x = norm_box_lim[(j, 0)]
if (not np.allclose(x, 0)):
label = boxlim_formatter.format(boxlim.loc[(0, u)])
ax.text(x, (y - 0.2), label, ha='center', va='center', bbox=props, color='blue', fontweight='normal')
x = norm_box_lim[j][1]
if (not np.allclose(x, 1)):
label = boxlim_formatter.format(boxlim.loc[(1, u)])
ax.text(x, (y - 0.2), label, ha='center', va='center', bbox=props, color='blue', fontweight='normal')
x = 0
label = boxlim_formatter.format(box_init.loc[(0, u)])
ax.text((x - 0.01), y, label, ha='right', va='center', bbox=props, color='black', fontweight='normal')
x = 1
label = boxlim_formatter.format(box_init.loc[(1, u)])
ax.text((x + 0.01), y, label, ha='left', va='center', bbox=props, color='black', fontweight='normal')
qp_formatted = {}
for (key, values) in qp_values.items():
values = [vi for vi in values if (vi != (- 1))]
if (len(values) == 1):
value = '{:.2g}'.format(values[0])
else:
value = '{:.2g}, {:.2g}'.format(*values)
qp_formatted[key] = value
labels = [ticklabel_formatter.format(u, qp_formatted[u]) for u in uncs]
labels = labels[::(- 1)]
ax.set_yticklabels(labels)
ax.set_xticklabels([])
coverage = table_formatter.format(coverage)
density = table_formatter.format(density)
ax.table(cellText=[[coverage], [density]], colWidths=([0.1] * 2), rowLabels=['coverage', 'density'], colLabels=None, loc='right', bbox=[1.2, 0.9, 0.1, 0.1])
plt.subplots_adjust(left=0.1, right=0.75)
return fig
| 3,736,344,928,917,715,000
|
Helper function for parallel coordinate style visualization
of a box
Parameters
----------
boxlim : DataFrame
qp_values : dict
box_init : DataFrame
uncs : list
coverage : float
density : float
ticklabel_formatter : str
boxlim_formatter : str
table_formatter : str
Returns
-------
a Figure instance
|
ema_workbench/analysis/scenario_discovery_util.py
|
plot_box
|
brodderickrodriguez/EMA_lite
|
python
|
def plot_box(boxlim, qp_values, box_init, uncs, coverage, density, ticklabel_formatter='{} ({})', boxlim_formatter='{: .2g}', table_formatter='{:.3g}'):
'Helper function for parallel coordinate style visualization\n of a box\n\n Parameters\n ----------\n boxlim : DataFrame\n qp_values : dict\n box_init : DataFrame\n uncs : list\n coverage : float\n density : float\n ticklabel_formatter : str\n boxlim_formatter : str\n table_formatter : str\n\n Returns\n -------\n a Figure instance\n\n\n '
norm_box_lim = _normalize(boxlim, box_init, uncs)
(fig, ax) = _setup_figure(uncs)
for (j, u) in enumerate(uncs):
xj = ((len(uncs) - j) - 1)
plot_unc(box_init, xj, j, 0, norm_box_lim, boxlim, u, ax)
dtype = box_init[u].dtype
props = {'facecolor': 'white', 'edgecolor': 'white', 'alpha': 0.25}
y = xj
if (dtype == object):
elements = sorted(list(box_init[u][0]))
max_value = (len(elements) - 1)
values = boxlim.loc[(0, u)]
x = [elements.index(entry) for entry in values]
x = [(entry / max_value) for entry in x]
for (xi, label) in zip(x, values):
ax.text(xi, (y - 0.2), label, ha='center', va='center', bbox=props, color='blue', fontweight='normal')
else:
props = {'facecolor': 'white', 'edgecolor': 'white', 'alpha': 0.25}
x = norm_box_lim[(j, 0)]
if (not np.allclose(x, 0)):
label = boxlim_formatter.format(boxlim.loc[(0, u)])
ax.text(x, (y - 0.2), label, ha='center', va='center', bbox=props, color='blue', fontweight='normal')
x = norm_box_lim[j][1]
if (not np.allclose(x, 1)):
label = boxlim_formatter.format(boxlim.loc[(1, u)])
ax.text(x, (y - 0.2), label, ha='center', va='center', bbox=props, color='blue', fontweight='normal')
x = 0
label = boxlim_formatter.format(box_init.loc[(0, u)])
ax.text((x - 0.01), y, label, ha='right', va='center', bbox=props, color='black', fontweight='normal')
x = 1
label = boxlim_formatter.format(box_init.loc[(1, u)])
ax.text((x + 0.01), y, label, ha='left', va='center', bbox=props, color='black', fontweight='normal')
qp_formatted = {}
for (key, values) in qp_values.items():
values = [vi for vi in values if (vi != (- 1))]
if (len(values) == 1):
value = '{:.2g}'.format(values[0])
else:
value = '{:.2g}, {:.2g}'.format(*values)
qp_formatted[key] = value
labels = [ticklabel_formatter.format(u, qp_formatted[u]) for u in uncs]
labels = labels[::(- 1)]
ax.set_yticklabels(labels)
ax.set_xticklabels([])
coverage = table_formatter.format(coverage)
density = table_formatter.format(density)
ax.table(cellText=[[coverage], [density]], colWidths=([0.1] * 2), rowLabels=['coverage', 'density'], colLabels=None, loc='right', bbox=[1.2, 0.9, 0.1, 0.1])
plt.subplots_adjust(left=0.1, right=0.75)
return fig
|
def plot_ppt(peeling_trajectory):
'show the peeling and pasting trajectory in a figure'
ax = host_subplot(111)
ax.set_xlabel('peeling and pasting trajectory')
par = ax.twinx()
par.set_ylabel('nr. restricted dimensions')
ax.plot(peeling_trajectory['mean'], label='mean')
ax.plot(peeling_trajectory['mass'], label='mass')
ax.plot(peeling_trajectory['coverage'], label='coverage')
ax.plot(peeling_trajectory['density'], label='density')
par.plot(peeling_trajectory['res_dim'], label='restricted dims')
ax.grid(True, which='both')
ax.set_ylim(bottom=0, top=1)
fig = plt.gcf()
make_legend(['mean', 'mass', 'coverage', 'density', 'restricted_dim'], ax, ncol=5, alpha=1)
return fig
| -7,503,041,594,958,456,000
|
show the peeling and pasting trajectory in a figure
|
ema_workbench/analysis/scenario_discovery_util.py
|
plot_ppt
|
brodderickrodriguez/EMA_lite
|
python
|
def plot_ppt(peeling_trajectory):
ax = host_subplot(111)
ax.set_xlabel('peeling and pasting trajectory')
par = ax.twinx()
par.set_ylabel('nr. restricted dimensions')
ax.plot(peeling_trajectory['mean'], label='mean')
ax.plot(peeling_trajectory['mass'], label='mass')
ax.plot(peeling_trajectory['coverage'], label='coverage')
ax.plot(peeling_trajectory['density'], label='density')
par.plot(peeling_trajectory['res_dim'], label='restricted dims')
ax.grid(True, which='both')
ax.set_ylim(bottom=0, top=1)
fig = plt.gcf()
make_legend(['mean', 'mass', 'coverage', 'density', 'restricted_dim'], ax, ncol=5, alpha=1)
return fig
|
def plot_tradeoff(peeling_trajectory, cmap=mpl.cm.viridis):
'Visualize the trade off between coverage and density. Color\n is used to denote the number of restricted dimensions.\n\n Parameters\n ----------\n cmap : valid matplotlib colormap\n\n Returns\n -------\n a Figure instance\n\n '
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
boundaries = np.arange((- 0.5), (max(peeling_trajectory['res_dim']) + 1.5), step=1)
ncolors = cmap.N
norm = mpl.colors.BoundaryNorm(boundaries, ncolors)
p = ax.scatter(peeling_trajectory['coverage'], peeling_trajectory['density'], c=peeling_trajectory['res_dim'], norm=norm, cmap=cmap)
ax.set_ylabel('density')
ax.set_xlabel('coverage')
ax.set_ylim(bottom=0, top=1.2)
ax.set_xlim(left=0, right=1.2)
ticklocs = np.arange(0, (max(peeling_trajectory['res_dim']) + 1), step=1)
cb = fig.colorbar(p, spacing='uniform', ticks=ticklocs, drawedges=True)
cb.set_label('nr. of restricted dimensions')
return fig
| -840,138,350,086,765,300
|
Visualize the trade off between coverage and density. Color
is used to denote the number of restricted dimensions.
Parameters
----------
cmap : valid matplotlib colormap
Returns
-------
a Figure instance
|
ema_workbench/analysis/scenario_discovery_util.py
|
plot_tradeoff
|
brodderickrodriguez/EMA_lite
|
python
|
def plot_tradeoff(peeling_trajectory, cmap=mpl.cm.viridis):
'Visualize the trade off between coverage and density. Color\n is used to denote the number of restricted dimensions.\n\n Parameters\n ----------\n cmap : valid matplotlib colormap\n\n Returns\n -------\n a Figure instance\n\n '
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
boundaries = np.arange((- 0.5), (max(peeling_trajectory['res_dim']) + 1.5), step=1)
ncolors = cmap.N
norm = mpl.colors.BoundaryNorm(boundaries, ncolors)
p = ax.scatter(peeling_trajectory['coverage'], peeling_trajectory['density'], c=peeling_trajectory['res_dim'], norm=norm, cmap=cmap)
ax.set_ylabel('density')
ax.set_xlabel('coverage')
ax.set_ylim(bottom=0, top=1.2)
ax.set_xlim(left=0, right=1.2)
ticklocs = np.arange(0, (max(peeling_trajectory['res_dim']) + 1), step=1)
cb = fig.colorbar(p, spacing='uniform', ticks=ticklocs, drawedges=True)
cb.set_label('nr. of restricted dimensions')
return fig
|
def plot_unc(box_init, xi, i, j, norm_box_lim, box_lim, u, ax, color=sns.color_palette()[0]):
'\n\n Parameters:\n ----------\n xi : int\n the row at which to plot\n i : int\n the index of the uncertainty being plotted\n j : int\n the index of the box being plotted\n u : string\n the uncertainty being plotted:\n ax : axes instance\n the ax on which to plot\n\n '
dtype = box_init[u].dtype
y = (xi - (j * 0.1))
if (dtype == object):
elements = sorted(list(box_init[u][0]))
max_value = (len(elements) - 1)
box_lim = box_lim[u][0]
x = [elements.index(entry) for entry in box_lim]
x = [(entry / max_value) for entry in x]
y = ([y] * len(x))
ax.scatter(x, y, edgecolor=color, facecolor=color)
else:
ax.plot(norm_box_lim[i], (y, y), c=color)
| -4,350,063,930,766,987,000
|
Parameters:
----------
xi : int
the row at which to plot
i : int
the index of the uncertainty being plotted
j : int
the index of the box being plotted
u : string
the uncertainty being plotted:
ax : axes instance
the ax on which to plot
|
ema_workbench/analysis/scenario_discovery_util.py
|
plot_unc
|
brodderickrodriguez/EMA_lite
|
python
|
def plot_unc(box_init, xi, i, j, norm_box_lim, box_lim, u, ax, color=sns.color_palette()[0]):
'\n\n Parameters:\n ----------\n xi : int\n the row at which to plot\n i : int\n the index of the uncertainty being plotted\n j : int\n the index of the box being plotted\n u : string\n the uncertainty being plotted:\n ax : axes instance\n the ax on which to plot\n\n '
dtype = box_init[u].dtype
y = (xi - (j * 0.1))
if (dtype == object):
elements = sorted(list(box_init[u][0]))
max_value = (len(elements) - 1)
box_lim = box_lim[u][0]
x = [elements.index(entry) for entry in box_lim]
x = [(entry / max_value) for entry in x]
y = ([y] * len(x))
ax.scatter(x, y, edgecolor=color, facecolor=color)
else:
ax.plot(norm_box_lim[i], (y, y), c=color)
|
def plot_boxes(x, boxes, together):
'Helper function for plotting multiple boxlims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxes : list of pd.DataFrame\n together : bool\n\n '
box_init = _make_box(x)
(box_lims, uncs) = _get_sorted_box_lims(boxes, box_init)
norm_box_lims = [_normalize(box_lim, box_init, uncs) for box_lim in boxes]
if together:
(fig, ax) = _setup_figure(uncs)
for (i, u) in enumerate(uncs):
colors = itertools.cycle(COLOR_LIST)
xi = ((len(uncs) - i) - 1)
for (j, norm_box_lim) in enumerate(norm_box_lims):
color = next(colors)
plot_unc(box_init, xi, i, j, norm_box_lim, box_lims[j], u, ax, color)
plt.tight_layout()
return fig
else:
figs = []
colors = itertools.cycle(COLOR_LIST)
for (j, norm_box_lim) in enumerate(norm_box_lims):
(fig, ax) = _setup_figure(uncs)
ax.set_title('box {}'.format(j))
color = next(colors)
figs.append(fig)
for (i, u) in enumerate(uncs):
xi = ((len(uncs) - i) - 1)
plot_unc(box_init, xi, i, 0, norm_box_lim, box_lims[j], u, ax, color)
plt.tight_layout()
return figs
| 6,861,939,631,656,800,000
|
Helper function for plotting multiple boxlims
Parameters
----------
x : pd.DataFrame
boxes : list of pd.DataFrame
together : bool
|
ema_workbench/analysis/scenario_discovery_util.py
|
plot_boxes
|
brodderickrodriguez/EMA_lite
|
python
|
def plot_boxes(x, boxes, together):
'Helper function for plotting multiple boxlims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxes : list of pd.DataFrame\n together : bool\n\n '
box_init = _make_box(x)
(box_lims, uncs) = _get_sorted_box_lims(boxes, box_init)
norm_box_lims = [_normalize(box_lim, box_init, uncs) for box_lim in boxes]
if together:
(fig, ax) = _setup_figure(uncs)
for (i, u) in enumerate(uncs):
colors = itertools.cycle(COLOR_LIST)
xi = ((len(uncs) - i) - 1)
for (j, norm_box_lim) in enumerate(norm_box_lims):
color = next(colors)
plot_unc(box_init, xi, i, j, norm_box_lim, box_lims[j], u, ax, color)
plt.tight_layout()
return fig
else:
figs = []
colors = itertools.cycle(COLOR_LIST)
for (j, norm_box_lim) in enumerate(norm_box_lims):
(fig, ax) = _setup_figure(uncs)
ax.set_title('box {}'.format(j))
color = next(colors)
figs.append(fig)
for (i, u) in enumerate(uncs):
xi = ((len(uncs) - i) - 1)
plot_unc(box_init, xi, i, 0, norm_box_lim, box_lims[j], u, ax, color)
plt.tight_layout()
return figs
|
@abc.abstractproperty
def boxes(self):
'Property for getting a list of box limits'
raise NotImplementedError
| -4,880,963,140,910,533,000
|
Property for getting a list of box limits
|
ema_workbench/analysis/scenario_discovery_util.py
|
boxes
|
brodderickrodriguez/EMA_lite
|
python
|
@abc.abstractproperty
def boxes(self):
raise NotImplementedError
|
@abc.abstractproperty
def stats(self):
'property for getting a list of dicts containing the statistics\n for each box'
raise NotImplementedError
| 5,963,327,774,905,103,000
|
property for getting a list of dicts containing the statistics
for each box
|
ema_workbench/analysis/scenario_discovery_util.py
|
stats
|
brodderickrodriguez/EMA_lite
|
python
|
@abc.abstractproperty
def stats(self):
'property for getting a list of dicts containing the statistics\n for each box'
raise NotImplementedError
|
def boxes_to_dataframe(self):
'convert boxes to pandas dataframe'
boxes = self.boxes
(box_lims, uncs) = _get_sorted_box_lims(boxes, _make_box(self.x))
nr_boxes = len(boxes)
dtype = float
index = ['box {}'.format((i + 1)) for i in range(nr_boxes)]
for value in box_lims[0].dtypes:
if (value == object):
dtype = object
break
columns = pd.MultiIndex.from_product([index, ['min', 'max']])
df_boxes = pd.DataFrame(np.zeros((len(uncs), (nr_boxes * 2))), index=uncs, dtype=dtype, columns=columns)
for (i, box) in enumerate(box_lims):
for unc in uncs:
values = box.loc[:, unc]
values = values.rename({0: 'min', 1: 'max'})
df_boxes.loc[unc][index[i]] = values
return df_boxes
| 2,196,040,324,089,710,600
|
convert boxes to pandas dataframe
|
ema_workbench/analysis/scenario_discovery_util.py
|
boxes_to_dataframe
|
brodderickrodriguez/EMA_lite
|
python
|
def boxes_to_dataframe(self):
boxes = self.boxes
(box_lims, uncs) = _get_sorted_box_lims(boxes, _make_box(self.x))
nr_boxes = len(boxes)
dtype = float
index = ['box {}'.format((i + 1)) for i in range(nr_boxes)]
for value in box_lims[0].dtypes:
if (value == object):
dtype = object
break
columns = pd.MultiIndex.from_product([index, ['min', 'max']])
df_boxes = pd.DataFrame(np.zeros((len(uncs), (nr_boxes * 2))), index=uncs, dtype=dtype, columns=columns)
for (i, box) in enumerate(box_lims):
for unc in uncs:
values = box.loc[:, unc]
values = values.rename({0: 'min', 1: 'max'})
df_boxes.loc[unc][index[i]] = values
return df_boxes
|
def stats_to_dataframe(self):
'convert stats to pandas dataframe'
stats = self.stats
index = pd.Index(['box {}'.format((i + 1)) for i in range(len(stats))])
return pd.DataFrame(stats, index=index)
| 1,509,923,910,514,162,200
|
convert stats to pandas dataframe
|
ema_workbench/analysis/scenario_discovery_util.py
|
stats_to_dataframe
|
brodderickrodriguez/EMA_lite
|
python
|
def stats_to_dataframe(self):
stats = self.stats
index = pd.Index(['box {}'.format((i + 1)) for i in range(len(stats))])
return pd.DataFrame(stats, index=index)
|
def show_boxes(self, together=False):
'display boxes\n\n Parameters\n ----------\n together : bool, otional\n\n '
plot_boxes(self.x, self.boxes, together=together)
| -1,717,583,655,820,089,600
|
display boxes
Parameters
----------
together : bool, otional
|
ema_workbench/analysis/scenario_discovery_util.py
|
show_boxes
|
brodderickrodriguez/EMA_lite
|
python
|
def show_boxes(self, together=False):
'display boxes\n\n Parameters\n ----------\n together : bool, otional\n\n '
plot_boxes(self.x, self.boxes, together=together)
|
def read_code(filename):
'读取code编码文件并输出为字典格式\n 1、支持json格式\n 2、支持本包规定的xlsx格式\n see alse to_code\n '
file_type = os.path.splitext(filename)[1][1:]
if (file_type == 'json'):
import json
code = json.load(filename)
return code
d = pd.read_excel(filename, header=None)
d = d[d.any(axis=1)]
d.fillna('NULL', inplace=True)
d = d.as_matrix()
code = {}
for i in range(len(d)):
tmp = d[(i, 0)].strip()
if (tmp == 'key'):
code[d[(i, 1)]] = {}
key = d[(i, 1)]
elif (tmp in ['qlist', 'code_order']):
ind = np.argwhere((d[(i + 1):, 0] != 'NULL'))
if (len(ind) > 0):
j = ((i + 1) + ind[0][0])
else:
j = len(d)
tmp2 = list(d[i:j, 1])
for i in range(len(tmp2)):
if isinstance(tmp2[i], str):
tmp2[i] = tmp2[i].strip()
code[key][tmp] = tmp2
elif (tmp in ['code', 'code_r']):
ind = np.argwhere((d[(i + 1):, 0] != 'NULL'))
if (len(ind) > 0):
j = ((i + 1) + ind[0][0])
else:
j = len(d)
tmp1 = list(d[i:j, 1])
tmp2 = list(d[i:j, 2])
for i in range(len(tmp2)):
if isinstance(tmp2[i], str):
tmp2[i] = tmp2[i].strip()
code[key][tmp] = dict(zip(tmp1, tmp2))
elif ((tmp != 'NULL') and (d[(i, 2)] == 'NULL') and ((i == (len(d) - 1)) or (d[((i + 1), 0)] == 'NULL'))):
ind = np.argwhere((d[(i + 1):, 0] != 'NULL'))
if (len(ind) > 0):
j = ((i + 1) + ind[0][0])
else:
j = len(d)
if (i == (len(d) - 1)):
code[key][tmp] = d[(i, 1)]
else:
tmp2 = list(d[i:j, 1])
for i in range(len(tmp2)):
if isinstance(tmp2[i], str):
tmp2[i] = tmp2[i].strip()
code[key][tmp] = tmp2
elif ((tmp != 'NULL') and (d[(i, 2)] != 'NULL') and ((i == (len(d) - 1)) or (d[((i + 1), 0)] == 'NULL'))):
ind = np.argwhere((d[(i + 1):, 0] != 'NULL'))
if (len(ind) > 0):
j = ((i + 1) + ind[0][0])
else:
j = len(d)
tmp1 = list(d[i:j, 1])
tmp2 = list(d[i:j, 2])
for i in range(len(tmp2)):
if isinstance(tmp2[i], str):
tmp2[i] = tmp2[i].strip()
code[key][tmp] = dict(zip(tmp1, tmp2))
elif (tmp == 'NULL'):
continue
else:
code[key][tmp] = d[(i, 1)]
return code
| 2,747,203,315,166,959,600
|
读取code编码文件并输出为字典格式
1、支持json格式
2、支持本包规定的xlsx格式
see alse to_code
|
reportgen/questionnaire/questionnaire.py
|
read_code
|
brightgeng/reportgen
|
python
|
def read_code(filename):
'读取code编码文件并输出为字典格式\n 1、支持json格式\n 2、支持本包规定的xlsx格式\n see alse to_code\n '
file_type = os.path.splitext(filename)[1][1:]
if (file_type == 'json'):
import json
code = json.load(filename)
return code
d = pd.read_excel(filename, header=None)
d = d[d.any(axis=1)]
d.fillna('NULL', inplace=True)
d = d.as_matrix()
code = {}
for i in range(len(d)):
tmp = d[(i, 0)].strip()
if (tmp == 'key'):
code[d[(i, 1)]] = {}
key = d[(i, 1)]
elif (tmp in ['qlist', 'code_order']):
ind = np.argwhere((d[(i + 1):, 0] != 'NULL'))
if (len(ind) > 0):
j = ((i + 1) + ind[0][0])
else:
j = len(d)
tmp2 = list(d[i:j, 1])
for i in range(len(tmp2)):
if isinstance(tmp2[i], str):
tmp2[i] = tmp2[i].strip()
code[key][tmp] = tmp2
elif (tmp in ['code', 'code_r']):
ind = np.argwhere((d[(i + 1):, 0] != 'NULL'))
if (len(ind) > 0):
j = ((i + 1) + ind[0][0])
else:
j = len(d)
tmp1 = list(d[i:j, 1])
tmp2 = list(d[i:j, 2])
for i in range(len(tmp2)):
if isinstance(tmp2[i], str):
tmp2[i] = tmp2[i].strip()
code[key][tmp] = dict(zip(tmp1, tmp2))
elif ((tmp != 'NULL') and (d[(i, 2)] == 'NULL') and ((i == (len(d) - 1)) or (d[((i + 1), 0)] == 'NULL'))):
ind = np.argwhere((d[(i + 1):, 0] != 'NULL'))
if (len(ind) > 0):
j = ((i + 1) + ind[0][0])
else:
j = len(d)
if (i == (len(d) - 1)):
code[key][tmp] = d[(i, 1)]
else:
tmp2 = list(d[i:j, 1])
for i in range(len(tmp2)):
if isinstance(tmp2[i], str):
tmp2[i] = tmp2[i].strip()
code[key][tmp] = tmp2
elif ((tmp != 'NULL') and (d[(i, 2)] != 'NULL') and ((i == (len(d) - 1)) or (d[((i + 1), 0)] == 'NULL'))):
ind = np.argwhere((d[(i + 1):, 0] != 'NULL'))
if (len(ind) > 0):
j = ((i + 1) + ind[0][0])
else:
j = len(d)
tmp1 = list(d[i:j, 1])
tmp2 = list(d[i:j, 2])
for i in range(len(tmp2)):
if isinstance(tmp2[i], str):
tmp2[i] = tmp2[i].strip()
code[key][tmp] = dict(zip(tmp1, tmp2))
elif (tmp == 'NULL'):
continue
else:
code[key][tmp] = d[(i, 1)]
return code
|
def save_code(code, filename='code.xlsx'):
'code本地输出\n 1、输出为json格式,根据文件名自动识别\n 2、输出为Excel格式\n see also read_code\n '
save_type = os.path.splitext(filename)[1][1:]
if (save_type == 'json'):
code = pd.DataFrame(code)
code.to_json(filename, force_ascii=False)
return
tmp = pd.DataFrame(columns=['name', 'value1', 'value2'])
i = 0
if all([('Q' in c[0]) for c in code.keys()]):
key_qlist = sorted(code, key=(lambda c: int(re.findall('\\d+', c)[0])))
else:
key_qlist = code.keys()
for key in key_qlist:
code0 = code[key]
tmp.loc[i] = ['key', key, '']
i += 1
for key0 in code0:
tmp2 = code0[key0]
if ((type(tmp2) == list) and tmp2):
tmp.loc[i] = [key0, tmp2[0], '']
i += 1
for ll in tmp2[1:]:
tmp.loc[i] = ['', ll, '']
i += 1
elif ((type(tmp2) == dict) and tmp2):
try:
tmp2_key = sorted(tmp2, key=(lambda c: float(re.findall('[\\d\\.]+', ('%s' % c))[(- 1)])))
except:
tmp2_key = list(tmp2.keys())
j = 0
for key1 in tmp2_key:
if (j == 0):
tmp.loc[i] = [key0, key1, tmp2[key1]]
else:
tmp.loc[i] = ['', key1, tmp2[key1]]
i += 1
j += 1
elif tmp2:
tmp.loc[i] = [key0, tmp2, '']
i += 1
if (sys.version > '3'):
tmp.to_excel(filename, index=False, header=False)
else:
tmp.to_csv(filename, index=False, header=False, encoding='utf-8')
| -741,657,556,108,953,100
|
code本地输出
1、输出为json格式,根据文件名自动识别
2、输出为Excel格式
see also read_code
|
reportgen/questionnaire/questionnaire.py
|
save_code
|
brightgeng/reportgen
|
python
|
def save_code(code, filename='code.xlsx'):
'code本地输出\n 1、输出为json格式,根据文件名自动识别\n 2、输出为Excel格式\n see also read_code\n '
save_type = os.path.splitext(filename)[1][1:]
if (save_type == 'json'):
code = pd.DataFrame(code)
code.to_json(filename, force_ascii=False)
return
tmp = pd.DataFrame(columns=['name', 'value1', 'value2'])
i = 0
if all([('Q' in c[0]) for c in code.keys()]):
key_qlist = sorted(code, key=(lambda c: int(re.findall('\\d+', c)[0])))
else:
key_qlist = code.keys()
for key in key_qlist:
code0 = code[key]
tmp.loc[i] = ['key', key, ]
i += 1
for key0 in code0:
tmp2 = code0[key0]
if ((type(tmp2) == list) and tmp2):
tmp.loc[i] = [key0, tmp2[0], ]
i += 1
for ll in tmp2[1:]:
tmp.loc[i] = [, ll, ]
i += 1
elif ((type(tmp2) == dict) and tmp2):
try:
tmp2_key = sorted(tmp2, key=(lambda c: float(re.findall('[\\d\\.]+', ('%s' % c))[(- 1)])))
except:
tmp2_key = list(tmp2.keys())
j = 0
for key1 in tmp2_key:
if (j == 0):
tmp.loc[i] = [key0, key1, tmp2[key1]]
else:
tmp.loc[i] = [, key1, tmp2[key1]]
i += 1
j += 1
elif tmp2:
tmp.loc[i] = [key0, tmp2, ]
i += 1
if (sys.version > '3'):
tmp.to_excel(filename, index=False, header=False)
else:
tmp.to_csv(filename, index=False, header=False, encoding='utf-8')
|
def dataText_to_code(df, sep, qqlist=None):
'编码文本数据\n\n '
if (sep in [';', '┋']):
qtype = '多选题'
elif (sep in ['-->', '→']):
qtype = '排序题'
if (not qqlist):
qqlist = df.columns
code = {}
for qq in qqlist:
tmp = df[qq].map((lambda x: (x.split(sep) if isinstance(x, str) else [])))
item_list = sorted(set(tmp.sum()))
if (qtype == '多选题'):
tmp = tmp.map((lambda x: [int((t in x)) for t in item_list]))
code_tmp = {'code': {}, 'qtype': u'多选题', 'qlist': [], 'content': qq}
elif (qtype == '排序题'):
tmp = tmp.map((lambda x: [((x.index(t) + 1) if (t in x) else np.nan) for t in item_list]))
code_tmp = {'code': {}, 'qtype': u'排序题', 'qlist': [], 'content': qq}
for (i, t) in enumerate(item_list):
column_name = '{}_A{:.0f}'.format(qq, (i + 1))
df[column_name] = tmp.map((lambda x: x[i]))
code_tmp['code'][column_name] = item_list[i]
code_tmp['qlist'] = (code_tmp['qlist'] + [column_name])
code[qq] = code_tmp
df.drop(qq, axis=1, inplace=True)
return (df, code)
| -3,276,474,069,112,958,000
|
编码文本数据
|
reportgen/questionnaire/questionnaire.py
|
dataText_to_code
|
brightgeng/reportgen
|
python
|
def dataText_to_code(df, sep, qqlist=None):
'\n\n '
if (sep in [';', '┋']):
qtype = '多选题'
elif (sep in ['-->', '→']):
qtype = '排序题'
if (not qqlist):
qqlist = df.columns
code = {}
for qq in qqlist:
tmp = df[qq].map((lambda x: (x.split(sep) if isinstance(x, str) else [])))
item_list = sorted(set(tmp.sum()))
if (qtype == '多选题'):
tmp = tmp.map((lambda x: [int((t in x)) for t in item_list]))
code_tmp = {'code': {}, 'qtype': u'多选题', 'qlist': [], 'content': qq}
elif (qtype == '排序题'):
tmp = tmp.map((lambda x: [((x.index(t) + 1) if (t in x) else np.nan) for t in item_list]))
code_tmp = {'code': {}, 'qtype': u'排序题', 'qlist': [], 'content': qq}
for (i, t) in enumerate(item_list):
column_name = '{}_A{:.0f}'.format(qq, (i + 1))
df[column_name] = tmp.map((lambda x: x[i]))
code_tmp['code'][column_name] = item_list[i]
code_tmp['qlist'] = (code_tmp['qlist'] + [column_name])
code[qq] = code_tmp
df.drop(qq, axis=1, inplace=True)
return (df, code)
|
def dataCode_to_text(df, code=None):
'将按序号数据转换成文本\n\n '
if (df.max().max() > 1):
sep = '→'
else:
sep = '┋'
if code:
df = df.rename(code)
qlist = list(df.columns)
df['text'] = np.nan
if (sep in ['┋']):
for i in df.index:
w = (df.loc[i, :] == 1)
df.loc[(i, 'text')] = sep.join(list(w.index[w]))
elif (sep in ['→']):
for i in df.index:
w = df.loc[i, :]
w = w[(w >= 1)].sort_values()
df.loc[(i, 'text')] = sep.join(list(w.index))
df.drop(qlist, axis=1, inplace=True)
return df
| 2,083,806,469,545,990,400
|
将按序号数据转换成文本
|
reportgen/questionnaire/questionnaire.py
|
dataCode_to_text
|
brightgeng/reportgen
|
python
|
def dataCode_to_text(df, code=None):
'\n\n '
if (df.max().max() > 1):
sep = '→'
else:
sep = '┋'
if code:
df = df.rename(code)
qlist = list(df.columns)
df['text'] = np.nan
if (sep in ['┋']):
for i in df.index:
w = (df.loc[i, :] == 1)
df.loc[(i, 'text')] = sep.join(list(w.index[w]))
elif (sep in ['→']):
for i in df.index:
w = df.loc[i, :]
w = w[(w >= 1)].sort_values()
df.loc[(i, 'text')] = sep.join(list(w.index))
df.drop(qlist, axis=1, inplace=True)
return df
|
def var_combine(data, code, qq1, qq2, sep=',', qnum_new=None, qname_new=None):
"将两个变量组合成一个变量\n 例如:\n Q1:'性别',Q2: 年龄\n 组合后生成:\n 1、男_16~19岁\n 2、男_20岁~40岁\n 3、女_16~19岁\n 4、女_20~40岁\n "
if (qnum_new is None):
if ('Q' == qq2[0]):
qnum_new = ((qq1 + '_') + qq2[1:])
else:
qnum_new = ((qq1 + '_') + qq2)
if (qname_new is None):
qname_new = ((code[qq1]['content'] + '_') + code[qq2]['content'])
if ((code[qq1]['qtype'] != '单选题') or (code[qq2]['qtype'] != '单选题')):
print('只支持组合两个单选题,请检查.')
raise
d1 = data[code[qq1]['qlist'][0]]
d2 = data[code[qq2]['qlist'][0]]
sm = max(code[qq1]['code'].keys())
sn = max(code[qq2]['code'].keys())
if (isinstance(sm, str) or isinstance(sn, str)):
print('所选择的两个变量不符合函数要求.')
raise
data[qnum_new] = (((d1 - 1) * sn) + d2)
code[qnum_new] = {'qtype': '单选题', 'qlist': [qnum_new], 'content': qname_new}
code_tmp = {}
for c1 in code[qq1]['code']:
for c2 in code[qq2]['code']:
cc = (((c1 - 1) * sn) + c2)
value = '{}{}{}'.format(code[qq1]['code'][c1], sep, code[qq2]['code'][c2])
code_tmp[cc] = value
code[qnum_new]['code'] = code_tmp
print('变量已合并,新变量题号为:{}'.format(qnum_new))
return (data, code)
| -7,023,054,160,902,175,000
|
将两个变量组合成一个变量
例如:
Q1:'性别',Q2: 年龄
组合后生成:
1、男_16~19岁
2、男_20岁~40岁
3、女_16~19岁
4、女_20~40岁
|
reportgen/questionnaire/questionnaire.py
|
var_combine
|
brightgeng/reportgen
|
python
|
def var_combine(data, code, qq1, qq2, sep=',', qnum_new=None, qname_new=None):
"将两个变量组合成一个变量\n 例如:\n Q1:'性别',Q2: 年龄\n 组合后生成:\n 1、男_16~19岁\n 2、男_20岁~40岁\n 3、女_16~19岁\n 4、女_20~40岁\n "
if (qnum_new is None):
if ('Q' == qq2[0]):
qnum_new = ((qq1 + '_') + qq2[1:])
else:
qnum_new = ((qq1 + '_') + qq2)
if (qname_new is None):
qname_new = ((code[qq1]['content'] + '_') + code[qq2]['content'])
if ((code[qq1]['qtype'] != '单选题') or (code[qq2]['qtype'] != '单选题')):
print('只支持组合两个单选题,请检查.')
raise
d1 = data[code[qq1]['qlist'][0]]
d2 = data[code[qq2]['qlist'][0]]
sm = max(code[qq1]['code'].keys())
sn = max(code[qq2]['code'].keys())
if (isinstance(sm, str) or isinstance(sn, str)):
print('所选择的两个变量不符合函数要求.')
raise
data[qnum_new] = (((d1 - 1) * sn) + d2)
code[qnum_new] = {'qtype': '单选题', 'qlist': [qnum_new], 'content': qname_new}
code_tmp = {}
for c1 in code[qq1]['code']:
for c2 in code[qq2]['code']:
cc = (((c1 - 1) * sn) + c2)
value = '{}{}{}'.format(code[qq1]['code'][c1], sep, code[qq2]['code'][c2])
code_tmp[cc] = value
code[qnum_new]['code'] = code_tmp
print('变量已合并,新变量题号为:{}'.format(qnum_new))
return (data, code)
|
def wenjuanwang(filepath='.\\data', encoding='gbk'):
'问卷网数据导入和编码\n 输入:\n filepath:\n 列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件\n 文件夹路径,函数会自动在文件夹下搜寻相关数据\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_n\n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据\n '
if isinstance(filepath, list):
filename1 = filepath[0]
filename2 = filepath[1]
filename3 = filepath[2]
elif os.path.isdir(filepath):
filename1 = os.path.join(filepath, 'All_Data_Readable.csv')
filename2 = os.path.join(filepath, 'All_Data_Original.csv')
filename3 = os.path.join(filepath, 'code.csv')
else:
print('can not dection the filepath!')
d1 = pd.read_csv(filename1, encoding=encoding)
d1.drop([u'答题时长'], axis=1, inplace=True)
d2 = pd.read_csv(filename2, encoding=encoding)
d3 = pd.read_csv(filename3, encoding=encoding, header=None, na_filter=False)
d3 = d3.as_matrix()
code = {}
for i in range(len(d3)):
if d3[(i, 0)]:
key = d3[(i, 0)]
code[key] = {}
code[key]['content'] = d3[(i, 1)]
code[key]['qtype'] = d3[(i, 2)]
code[key]['code'] = {}
code[key]['qlist'] = []
elif d3[(i, 2)]:
tmp = d3[(i, 1)]
if (code[key]['qtype'] in [u'多选题', u'排序题']):
tmp = ((key + '_A') + ('%s' % tmp))
code[key]['code'][tmp] = ('%s' % d3[(i, 2)])
code[key]['qlist'].append(tmp)
elif (code[key]['qtype'] in [u'单选题']):
try:
tmp = int(tmp)
except:
tmp = ('%s' % tmp)
code[key]['code'][tmp] = ('%s' % d3[(i, 2)])
code[key]['qlist'] = [key]
elif (code[key]['qtype'] in [u'填空题']):
code[key]['qlist'] = [key]
else:
try:
tmp = int(tmp)
except:
tmp = ('%s' % tmp)
code[key]['code'][tmp] = ('%s' % d3[(i, 2)])
qnames_Readable = list(d1.columns)
qnames = list(d2.columns)
for key in code.keys():
qlist = []
for name in qnames:
if (re.match((key + '_'), name) or (key == name)):
qlist.append(name)
if (('qlist' not in code[key]) or (not code[key]['qlist'])):
code[key]['qlist'] = qlist
if (code[key]['qtype'] in [u'矩阵单选题']):
tmp = [qnames_Readable[qnames.index(q)] for q in code[key]['qlist']]
code_r = [re.findall('_([^_]*?)$', t)[0] for t in tmp]
code[key]['code_r'] = dict(zip(code[key]['qlist'], code_r))
d2['start'] = pd.to_datetime(d2['start'])
d2['finish'] = pd.to_datetime(d2['finish'])
tmp = (d2['finish'] - d2['start'])
tmp = tmp.astype(str).map((lambda x: ((60 * int(re.findall(':(\\d+):', x)[0])) + int(re.findall(':(\\d+)\\.', x)[0]))))
ind = np.where((d2.columns == 'finish'))[0][0]
d2.insert((int(ind) + 1), u'答题时长(秒)', tmp)
return (d2, code)
| 575,949,123,954,226,600
|
问卷网数据导入和编码
输入:
filepath:
列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件
文件夹路径,函数会自动在文件夹下搜寻相关数据
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
|
reportgen/questionnaire/questionnaire.py
|
wenjuanwang
|
brightgeng/reportgen
|
python
|
def wenjuanwang(filepath='.\\data', encoding='gbk'):
'问卷网数据导入和编码\n 输入:\n filepath:\n 列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件\n 文件夹路径,函数会自动在文件夹下搜寻相关数据\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_n\n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据\n '
if isinstance(filepath, list):
filename1 = filepath[0]
filename2 = filepath[1]
filename3 = filepath[2]
elif os.path.isdir(filepath):
filename1 = os.path.join(filepath, 'All_Data_Readable.csv')
filename2 = os.path.join(filepath, 'All_Data_Original.csv')
filename3 = os.path.join(filepath, 'code.csv')
else:
print('can not dection the filepath!')
d1 = pd.read_csv(filename1, encoding=encoding)
d1.drop([u'答题时长'], axis=1, inplace=True)
d2 = pd.read_csv(filename2, encoding=encoding)
d3 = pd.read_csv(filename3, encoding=encoding, header=None, na_filter=False)
d3 = d3.as_matrix()
code = {}
for i in range(len(d3)):
if d3[(i, 0)]:
key = d3[(i, 0)]
code[key] = {}
code[key]['content'] = d3[(i, 1)]
code[key]['qtype'] = d3[(i, 2)]
code[key]['code'] = {}
code[key]['qlist'] = []
elif d3[(i, 2)]:
tmp = d3[(i, 1)]
if (code[key]['qtype'] in [u'多选题', u'排序题']):
tmp = ((key + '_A') + ('%s' % tmp))
code[key]['code'][tmp] = ('%s' % d3[(i, 2)])
code[key]['qlist'].append(tmp)
elif (code[key]['qtype'] in [u'单选题']):
try:
tmp = int(tmp)
except:
tmp = ('%s' % tmp)
code[key]['code'][tmp] = ('%s' % d3[(i, 2)])
code[key]['qlist'] = [key]
elif (code[key]['qtype'] in [u'填空题']):
code[key]['qlist'] = [key]
else:
try:
tmp = int(tmp)
except:
tmp = ('%s' % tmp)
code[key]['code'][tmp] = ('%s' % d3[(i, 2)])
qnames_Readable = list(d1.columns)
qnames = list(d2.columns)
for key in code.keys():
qlist = []
for name in qnames:
if (re.match((key + '_'), name) or (key == name)):
qlist.append(name)
if (('qlist' not in code[key]) or (not code[key]['qlist'])):
code[key]['qlist'] = qlist
if (code[key]['qtype'] in [u'矩阵单选题']):
tmp = [qnames_Readable[qnames.index(q)] for q in code[key]['qlist']]
code_r = [re.findall('_([^_]*?)$', t)[0] for t in tmp]
code[key]['code_r'] = dict(zip(code[key]['qlist'], code_r))
d2['start'] = pd.to_datetime(d2['start'])
d2['finish'] = pd.to_datetime(d2['finish'])
tmp = (d2['finish'] - d2['start'])
tmp = tmp.astype(str).map((lambda x: ((60 * int(re.findall(':(\\d+):', x)[0])) + int(re.findall(':(\\d+)\\.', x)[0]))))
ind = np.where((d2.columns == 'finish'))[0][0]
d2.insert((int(ind) + 1), u'答题时长(秒)', tmp)
return (d2, code)
|
def wenjuanxing(filepath='.\\data', headlen=6):
'问卷星数据导入和编码\n 输入:\n filepath:\n 列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本\n 文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\\d+_\\d+_0.xls和\\d+_\\d+_2.xls\n headlen: 问卷星数据基础信息的列数\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_n\n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据\n '
if isinstance(filepath, list):
filename1 = filepath[0]
filename2 = filepath[1]
elif os.path.isdir(filepath):
filelist = os.listdir(filepath)
n1 = n2 = 0
for f in filelist:
s1 = re.findall('\\d+_\\d+_0.xls', f)
s2 = re.findall('\\d+_\\d+_2.xls', f)
if s1:
filename1 = s1[0]
n1 += 1
if s2:
filename2 = s2[0]
n2 += 1
if ((n1 + n2) == 0):
print(u'在文件夹下没有找到问卷星按序号和按文本数据,请检查目录或者工作目录.')
return
elif ((n1 + n2) > 2):
print(u'存在多组问卷星数据,请检查.')
return
filename1 = os.path.join(filepath, filename1)
filename2 = os.path.join(filepath, filename2)
else:
print('can not dection the filepath!')
d1 = pd.read_excel(filename1)
d2 = pd.read_excel(filename2)
d2.replace({(- 2): np.nan, (- 3): np.nan}, inplace=True)
code = {}
'\n 遍历一遍按文本数据,获取题号和每个题目的类型\n '
for name in d1.columns[headlen:]:
tmp = re.findall(u'^(\\d{1,3})[、::]', name)
if tmp:
new_name = ('Q' + tmp[0])
current_name = ('Q' + tmp[0])
code[new_name] = {}
content = re.findall(u'\\d{1,3}[、::](.*)', name)
code[new_name]['content'] = content[0]
d1.rename(columns={name: new_name}, inplace=True)
code[new_name]['qlist'] = []
code[new_name]['code'] = {}
code[new_name]['qtype'] = ''
code[new_name]['name'] = ''
qcontent = str(list(d1[new_name]))
if (('〖' in qcontent) and ('〗' in qcontent)):
code[new_name]['qlist_open'] = []
if ('┋' in qcontent):
code[new_name]['qtype'] = u'多选题'
elif ('→' in qcontent):
code[new_name]['qtype'] = u'排序题'
else:
tmp2 = re.findall(u'^第(\\d{1,3})题\\(.*?\\)', name)
if tmp2:
new_name = ('Q' + tmp2[0])
else:
pass
if (new_name not in code.keys()):
j = 1
current_name = new_name
new_name = (new_name + ('_R%s' % j))
code[current_name] = {}
code[current_name]['content'] = (current_name + '(问卷星数据中未找到题目具体内容)')
code[current_name]['qlist'] = []
code[current_name]['code'] = {}
code[current_name]['code_r'] = {}
code[current_name]['qtype'] = u'矩阵单选题'
code[current_name]['name'] = ''
d1.rename(columns={name: new_name}, inplace=True)
else:
j += 1
new_name = (new_name + ('_R%s' % j))
d1.rename(columns={name: new_name}, inplace=True)
d2qlist = d2.columns[6:].tolist()
for name in d2qlist:
tmp1 = re.findall(u'^(\\d{1,3})[、::]', name)
tmp2 = re.findall(u'^第(.*?)题', name)
if tmp1:
current_name = ('Q' + tmp1[0])
d2.rename(columns={name: current_name}, inplace=True)
code[current_name]['qlist'].append(current_name)
ind = d2[current_name].copy()
ind = ind.notnull()
c1 = d1.loc[(ind, current_name)].unique()
c2 = d2.loc[(ind, current_name)].unique()
if ((c2.dtype == object) or ((list(c1) == list(c2)) and (len(c2) >= min(15, len(d2[ind])))) or (len(c2) > 50)):
code[current_name]['qtype'] = u'填空题'
else:
code[current_name]['qtype'] = u'单选题'
if ('qlist_open' in code[current_name].keys()):
tmp = d1[current_name].map((lambda x: (re.findall('〖(.*?)〗', x)[0] if re.findall('〖(.*?)〗', x) else '')))
ind_open = np.argwhere((d2.columns.values == current_name)).tolist()[0][0]
d2.insert((ind_open + 1), (current_name + '_open'), tmp)
d1[current_name] = d1[current_name].map((lambda x: re.sub('〖.*?〗', '', x)))
code[current_name]['qlist_open'] = [(current_name + '_open')]
code[current_name]['code'] = dict(zip(d2.loc[(ind, current_name)], d1.loc[(ind, current_name)]))
elif tmp2:
name0 = ('Q' + tmp2[0])
if (name0 != current_name):
j = 1
current_name = name0
c2 = list(d2[name].unique())
if (code[current_name]['qtype'] == u'矩阵单选题'):
name1 = (('Q' + tmp2[0]) + ('_R%s' % j))
c1 = list(d1[name1].unique())
code[current_name]['code'] = dict(zip(c2, c1))
else:
name1 = (('Q' + tmp2[0]) + ('_A%s' % j))
else:
j += 1
c2 = list(d2[name].unique())
if (code[current_name]['qtype'] == u'矩阵单选题'):
name1 = (('Q' + tmp2[0]) + ('_R%s' % j))
c1 = list(d1[name1].unique())
old_dict = code[current_name]['code'].copy()
new_dict = dict(zip(c2, c1))
old_dict.update(new_dict)
code[current_name]['code'] = old_dict.copy()
else:
name1 = (('Q' + tmp2[0]) + ('_A%s' % j))
code[current_name]['qlist'].append(name1)
d2.rename(columns={name: name1}, inplace=True)
tmp3 = re.findall(u'第.*?题\\((.*)\\)', name)[0]
if (code[current_name]['qtype'] == u'矩阵单选题'):
code[current_name]['code_r'][name1] = tmp3
else:
code[current_name]['code'][name1] = tmp3
if (code[current_name]['qtype'] == u'多选题'):
openq = (tmp3 + '〖.*?〗')
openq = re.sub('\\)', '\\)', openq)
openq = re.sub('\\(', '\\(', openq)
openq = re.compile(openq)
qcontent = str(list(d1[current_name]))
if re.findall(openq, qcontent):
tmp = d1[current_name].map((lambda x: (re.findall(openq, x)[0] if re.findall(openq, x) else '')))
ind = np.argwhere((d2.columns.values == name1)).tolist()[0][0]
d2.insert((ind + 1), (name1 + '_open'), tmp)
code[current_name]['qlist_open'].append((name1 + '_open'))
keys = list(code[current_name]['code'].keys())
for key in keys:
if (('%s' % key) == 'nan'):
del code[current_name]['code'][key]
for k in code.keys():
content = code[k]['content']
qtype = code[k]['qtype']
if (('code' in code[k]) and (code[k]['code'] != {})):
tmp1 = code[k]['code'].keys()
tmp2 = code[k]['code'].values()
tmp3 = [(len(re.findall('\\d+', ('%s' % v))) > 0) for v in tmp2]
tmp4 = [(len(re.findall('-|~', ('%s' % v))) > 0) for v in tmp2]
if ((np.array(tmp3).sum() >= (len(tmp2) - 2)) or (np.array(tmp4).sum() >= ((len(tmp2) * 0.8) - 1e-17))):
try:
tmp_key = sorted(code[k]['code'], key=(lambda c: float(re.findall('[\\d\\.]+', ('%s' % c))[(- 1)])))
except:
tmp_key = list(tmp1)
code_order = [code[k]['code'][v] for v in tmp_key]
code[k]['code_order'] = code_order
if (qtype == '矩阵单选题'):
tmp3 = [int(re.findall('\\d+', ('%s' % v))[0]) for v in tmp2 if re.findall('\\d+', ('%s' % v))]
if ((set(tmp3) <= set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])) and (len(tmp3) == len(tmp2))):
code[k]['weight'] = dict(zip(tmp1, tmp3))
continue
if (('性别' in content) and ('男' in tmp2) and ('女' in tmp2)):
code[k]['name'] = '性别'
if (('gender' in content.lower()) and ('Male' in tmp2) and ('Female' in tmp2)):
code[k]['name'] = '性别'
if ((('年龄' in content) or ('age' in content.lower())) and (np.array(tmp3).sum() >= (len(tmp2) - 1))):
code[k]['name'] = '年龄'
if (('满意度' in content) and ('整体' in content)):
tmp3 = [int(re.findall('\\d+', ('%s' % v))[0]) for v in tmp2 if re.findall('\\d+', ('%s' % v))]
if (set(tmp3) <= set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])):
code[k]['name'] = '满意度'
if (len(tmp3) == len(tmp2)):
code[k]['weight'] = dict(zip(tmp1, tmp3))
if (('意愿' in content) and ('推荐' in content)):
tmp3 = [int(re.findall('\\d+', ('%s' % v))[0]) for v in tmp2 if re.findall('\\d+', ('%s' % v))]
if (set(tmp3) <= set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])):
code[k]['name'] = 'NPS'
if (len(tmp3) == len(tmp2)):
weight = pd.Series(dict(zip(tmp1, tmp3)))
weight = weight.replace(dict(zip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [(- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), 0, 0, 100, 100])))
code[k]['weight'] = weight.to_dict()
try:
d2[u'所用时间'] = d2[u'所用时间'].map((lambda s: int(s[:(- 1)])))
except:
pass
return (d2, code)
| -4,022,388,977,035,539,000
|
问卷星数据导入和编码
输入:
filepath:
列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本
文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\d+_\d+_0.xls和\d+_\d+_2.xls
headlen: 问卷星数据基础信息的列数
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
|
reportgen/questionnaire/questionnaire.py
|
wenjuanxing
|
brightgeng/reportgen
|
python
|
def wenjuanxing(filepath='.\\data', headlen=6):
'问卷星数据导入和编码\n 输入:\n filepath:\n 列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本\n 文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\\d+_\\d+_0.xls和\\d+_\\d+_2.xls\n headlen: 问卷星数据基础信息的列数\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_n\n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据\n '
if isinstance(filepath, list):
filename1 = filepath[0]
filename2 = filepath[1]
elif os.path.isdir(filepath):
filelist = os.listdir(filepath)
n1 = n2 = 0
for f in filelist:
s1 = re.findall('\\d+_\\d+_0.xls', f)
s2 = re.findall('\\d+_\\d+_2.xls', f)
if s1:
filename1 = s1[0]
n1 += 1
if s2:
filename2 = s2[0]
n2 += 1
if ((n1 + n2) == 0):
print(u'在文件夹下没有找到问卷星按序号和按文本数据,请检查目录或者工作目录.')
return
elif ((n1 + n2) > 2):
print(u'存在多组问卷星数据,请检查.')
return
filename1 = os.path.join(filepath, filename1)
filename2 = os.path.join(filepath, filename2)
else:
print('can not dection the filepath!')
d1 = pd.read_excel(filename1)
d2 = pd.read_excel(filename2)
d2.replace({(- 2): np.nan, (- 3): np.nan}, inplace=True)
code = {}
'\n 遍历一遍按文本数据,获取题号和每个题目的类型\n '
for name in d1.columns[headlen:]:
tmp = re.findall(u'^(\\d{1,3})[、::]', name)
if tmp:
new_name = ('Q' + tmp[0])
current_name = ('Q' + tmp[0])
code[new_name] = {}
content = re.findall(u'\\d{1,3}[、::](.*)', name)
code[new_name]['content'] = content[0]
d1.rename(columns={name: new_name}, inplace=True)
code[new_name]['qlist'] = []
code[new_name]['code'] = {}
code[new_name]['qtype'] =
code[new_name]['name'] =
qcontent = str(list(d1[new_name]))
if (('〖' in qcontent) and ('〗' in qcontent)):
code[new_name]['qlist_open'] = []
if ('┋' in qcontent):
code[new_name]['qtype'] = u'多选题'
elif ('→' in qcontent):
code[new_name]['qtype'] = u'排序题'
else:
tmp2 = re.findall(u'^第(\\d{1,3})题\\(.*?\\)', name)
if tmp2:
new_name = ('Q' + tmp2[0])
else:
pass
if (new_name not in code.keys()):
j = 1
current_name = new_name
new_name = (new_name + ('_R%s' % j))
code[current_name] = {}
code[current_name]['content'] = (current_name + '(问卷星数据中未找到题目具体内容)')
code[current_name]['qlist'] = []
code[current_name]['code'] = {}
code[current_name]['code_r'] = {}
code[current_name]['qtype'] = u'矩阵单选题'
code[current_name]['name'] =
d1.rename(columns={name: new_name}, inplace=True)
else:
j += 1
new_name = (new_name + ('_R%s' % j))
d1.rename(columns={name: new_name}, inplace=True)
d2qlist = d2.columns[6:].tolist()
for name in d2qlist:
tmp1 = re.findall(u'^(\\d{1,3})[、::]', name)
tmp2 = re.findall(u'^第(.*?)题', name)
if tmp1:
current_name = ('Q' + tmp1[0])
d2.rename(columns={name: current_name}, inplace=True)
code[current_name]['qlist'].append(current_name)
ind = d2[current_name].copy()
ind = ind.notnull()
c1 = d1.loc[(ind, current_name)].unique()
c2 = d2.loc[(ind, current_name)].unique()
if ((c2.dtype == object) or ((list(c1) == list(c2)) and (len(c2) >= min(15, len(d2[ind])))) or (len(c2) > 50)):
code[current_name]['qtype'] = u'填空题'
else:
code[current_name]['qtype'] = u'单选题'
if ('qlist_open' in code[current_name].keys()):
tmp = d1[current_name].map((lambda x: (re.findall('〖(.*?)〗', x)[0] if re.findall('〖(.*?)〗', x) else )))
ind_open = np.argwhere((d2.columns.values == current_name)).tolist()[0][0]
d2.insert((ind_open + 1), (current_name + '_open'), tmp)
d1[current_name] = d1[current_name].map((lambda x: re.sub('〖.*?〗', , x)))
code[current_name]['qlist_open'] = [(current_name + '_open')]
code[current_name]['code'] = dict(zip(d2.loc[(ind, current_name)], d1.loc[(ind, current_name)]))
elif tmp2:
name0 = ('Q' + tmp2[0])
if (name0 != current_name):
j = 1
current_name = name0
c2 = list(d2[name].unique())
if (code[current_name]['qtype'] == u'矩阵单选题'):
name1 = (('Q' + tmp2[0]) + ('_R%s' % j))
c1 = list(d1[name1].unique())
code[current_name]['code'] = dict(zip(c2, c1))
else:
name1 = (('Q' + tmp2[0]) + ('_A%s' % j))
else:
j += 1
c2 = list(d2[name].unique())
if (code[current_name]['qtype'] == u'矩阵单选题'):
name1 = (('Q' + tmp2[0]) + ('_R%s' % j))
c1 = list(d1[name1].unique())
old_dict = code[current_name]['code'].copy()
new_dict = dict(zip(c2, c1))
old_dict.update(new_dict)
code[current_name]['code'] = old_dict.copy()
else:
name1 = (('Q' + tmp2[0]) + ('_A%s' % j))
code[current_name]['qlist'].append(name1)
d2.rename(columns={name: name1}, inplace=True)
tmp3 = re.findall(u'第.*?题\\((.*)\\)', name)[0]
if (code[current_name]['qtype'] == u'矩阵单选题'):
code[current_name]['code_r'][name1] = tmp3
else:
code[current_name]['code'][name1] = tmp3
if (code[current_name]['qtype'] == u'多选题'):
openq = (tmp3 + '〖.*?〗')
openq = re.sub('\\)', '\\)', openq)
openq = re.sub('\\(', '\\(', openq)
openq = re.compile(openq)
qcontent = str(list(d1[current_name]))
if re.findall(openq, qcontent):
tmp = d1[current_name].map((lambda x: (re.findall(openq, x)[0] if re.findall(openq, x) else )))
ind = np.argwhere((d2.columns.values == name1)).tolist()[0][0]
d2.insert((ind + 1), (name1 + '_open'), tmp)
code[current_name]['qlist_open'].append((name1 + '_open'))
keys = list(code[current_name]['code'].keys())
for key in keys:
if (('%s' % key) == 'nan'):
del code[current_name]['code'][key]
for k in code.keys():
content = code[k]['content']
qtype = code[k]['qtype']
if (('code' in code[k]) and (code[k]['code'] != {})):
tmp1 = code[k]['code'].keys()
tmp2 = code[k]['code'].values()
tmp3 = [(len(re.findall('\\d+', ('%s' % v))) > 0) for v in tmp2]
tmp4 = [(len(re.findall('-|~', ('%s' % v))) > 0) for v in tmp2]
if ((np.array(tmp3).sum() >= (len(tmp2) - 2)) or (np.array(tmp4).sum() >= ((len(tmp2) * 0.8) - 1e-17))):
try:
tmp_key = sorted(code[k]['code'], key=(lambda c: float(re.findall('[\\d\\.]+', ('%s' % c))[(- 1)])))
except:
tmp_key = list(tmp1)
code_order = [code[k]['code'][v] for v in tmp_key]
code[k]['code_order'] = code_order
if (qtype == '矩阵单选题'):
tmp3 = [int(re.findall('\\d+', ('%s' % v))[0]) for v in tmp2 if re.findall('\\d+', ('%s' % v))]
if ((set(tmp3) <= set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])) and (len(tmp3) == len(tmp2))):
code[k]['weight'] = dict(zip(tmp1, tmp3))
continue
if (('性别' in content) and ('男' in tmp2) and ('女' in tmp2)):
code[k]['name'] = '性别'
if (('gender' in content.lower()) and ('Male' in tmp2) and ('Female' in tmp2)):
code[k]['name'] = '性别'
if ((('年龄' in content) or ('age' in content.lower())) and (np.array(tmp3).sum() >= (len(tmp2) - 1))):
code[k]['name'] = '年龄'
if (('满意度' in content) and ('整体' in content)):
tmp3 = [int(re.findall('\\d+', ('%s' % v))[0]) for v in tmp2 if re.findall('\\d+', ('%s' % v))]
if (set(tmp3) <= set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])):
code[k]['name'] = '满意度'
if (len(tmp3) == len(tmp2)):
code[k]['weight'] = dict(zip(tmp1, tmp3))
if (('意愿' in content) and ('推荐' in content)):
tmp3 = [int(re.findall('\\d+', ('%s' % v))[0]) for v in tmp2 if re.findall('\\d+', ('%s' % v))]
if (set(tmp3) <= set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])):
code[k]['name'] = 'NPS'
if (len(tmp3) == len(tmp2)):
weight = pd.Series(dict(zip(tmp1, tmp3)))
weight = weight.replace(dict(zip([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [(- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), 0, 0, 100, 100])))
code[k]['weight'] = weight.to_dict()
try:
d2[u'所用时间'] = d2[u'所用时间'].map((lambda s: int(s[:(- 1)])))
except:
pass
return (d2, code)
|
def load_data(method='filedialog', **kwargs):
'导入问卷数据\n # 暂时只支持已编码的和问卷星数据\n 1、支持路径搜寻\n 2、支持自由选择文件\n method:\n -filedialog: 打开文件窗口选择\n -pathsearch:自带搜索路径,需提供filepath\n '
if (method == 'filedialog'):
import tkinter as tk
from tkinter.filedialog import askopenfilenames
tk.Tk().withdraw()
if ('initialdir' in kwargs):
initialdir = kwargs['initialdir']
elif os.path.isdir('.\\data'):
initialdir = '.\\data'
else:
initialdir = '.'
title = u'请选择编码所需要的数据文件(支持问卷星和已编码好的数据)'
filetypes = (('Excel files', '*.xls;*.xlsx'), ('CSV files', '*.csv'), ('all files', '*.*'))
filenames = []
while (len(filenames) < 1):
filenames = askopenfilenames(initialdir=initialdir, title=title, filetypes=filetypes)
if (len(filenames) < 1):
print('请至少选择一个文件.')
filenames = list(filenames)
elif (method == 'pathsearch'):
if ('filepath' in kwargs):
filepath = kwargs['filepath']
else:
filepath = '.\\data\\'
if os.path.isdir(filepath):
filenames = os.listdir(filepath)
filenames = [os.path.join(filepath, s) for s in filenames]
else:
print('搜索路径错误')
raise
info = []
for filename in filenames:
filename_nopath = os.path.split(filename)[1]
data = read_data(filename)
field_c1 = set(data.iloc[:, 0].dropna().unique())
field_r1 = set(data.columns)
hqlen = [(len(re.findall('^[qQ]\\d+', c)) > 0) for c in field_r1]
hqrate = ((hqlen.count(True) / len(field_r1)) if (len(field_r1) > 0) else 0)
(rowlens, collens) = data.shape
rate_real = ((data.applymap((lambda x: isinstance(x, (int, float)))).sum().sum() / rowlens) / collens)
tmp = {'filename': filename_nopath, 'filenametype': '', 'rowlens': rowlens, 'collens': collens, 'field_c1': field_c1, 'field_r1': field_r1, 'type': '', 'rate_real': rate_real}
if (len(re.findall('^data.*\\.xls', filename_nopath)) > 0):
tmp['filenametype'] = 'data'
elif (len(re.findall('^code.*\\.xls', filename_nopath)) > 0):
tmp['filenametype'] = 'code'
elif (len(re.findall('\\d+_\\d+_\\d.xls', filename_nopath)) > 0):
tmp['filenametype'] = 'wenjuanxing'
if ((tmp['filenametype'] == 'code') or (set(['key', 'code', 'qlist', 'qtype']) < field_c1)):
tmp['type'] = 'code'
if ((tmp['filenametype'] == 'wenjuanxing') or (len((set(['序号', '提交答卷时间', '所用时间', '来自IP', '来源', '来源详情', '总分']) & field_r1)) >= 5)):
tmp['type'] = 'wenjuanxing'
if ((tmp['filenametype'] == 'data') or (hqrate >= 0.5)):
tmp['type'] = 'data'
info.append(tmp)
questype = [k['type'] for k in info]
if ((questype.count('data') * questype.count('code')) == 1):
data = read_data(filenames[questype.index('data')])
code = read_code(filenames[questype.index('code')])
elif (questype.count('wenjuanxing') >= 2):
filenames = [(f, info[i]['rate_real']) for (i, f) in enumerate(filenames) if (questype[i] == 'wenjuanxing')]
tmp = []
for (f, rate_real) in filenames:
t2 = (0 if (rate_real < 0.5) else 2)
d = pd.read_excel(f)
d = d.iloc[:, 0]
tmp.append((t2, d))
tmp_equal = 0
for (t, d0) in tmp[:(- 1)]:
if ((len(d) == len(d0)) and all((d == d0))):
tmp_equal += 1
tmp[(- 1)] = ((t2 + (int((t / 10)) * 10)), tmp[(- 1)][1])
max_quesnum = max([int((t / 10)) for (t, d) in tmp])
if (tmp_equal == 0):
tmp[(- 1)] = (((tmp[(- 1)][0] + (max_quesnum * 10)) + 10), tmp[(- 1)][1])
questype = [t for (t, d) in tmp]
filenames = [f for (f, r) in filenames]
quesnums = max([int((t / 10)) for t in questype])
filename_wjx = []
for i in range(1, (quesnums + 1)):
if ((questype.count((i * 10)) == 1) and (questype.count(((i * 10) + 2)) == 1)):
filename_wjx.append([filenames[questype.index((i * 10))], filenames[questype.index(((i * 10) + 2))]])
if (len(filename_wjx) == 1):
(data, code) = wenjuanxing(filename_wjx[0])
elif (len(filename_wjx) > 1):
print('脚本识别出多组问卷星数据,请选择需要编码的数据:')
for (i, f) in enumerate(filename_wjx):
print('{}: {}'.format((i + 1), '/'.join([os.path.split(f[0])[1], os.path.split(f[1])[1]])))
ii = input('您选择的数据是(数据前的编码,如:1):')
ii = re.sub('\\s', '', ii)
if ii.isnumeric():
(data, code) = wenjuanxing(filename_wjx[(int(ii) - 1)])
else:
print('您输入正确的编码.')
else:
print('没有找到任何问卷数据..')
raise
else:
print('没有找到任何数据')
raise
return (data, code)
| 4,276,380,133,315,128,000
|
导入问卷数据
# 暂时只支持已编码的和问卷星数据
1、支持路径搜寻
2、支持自由选择文件
method:
-filedialog: 打开文件窗口选择
-pathsearch:自带搜索路径,需提供filepath
|
reportgen/questionnaire/questionnaire.py
|
load_data
|
brightgeng/reportgen
|
python
|
def load_data(method='filedialog', **kwargs):
'导入问卷数据\n # 暂时只支持已编码的和问卷星数据\n 1、支持路径搜寻\n 2、支持自由选择文件\n method:\n -filedialog: 打开文件窗口选择\n -pathsearch:自带搜索路径,需提供filepath\n '
if (method == 'filedialog'):
import tkinter as tk
from tkinter.filedialog import askopenfilenames
tk.Tk().withdraw()
if ('initialdir' in kwargs):
initialdir = kwargs['initialdir']
elif os.path.isdir('.\\data'):
initialdir = '.\\data'
else:
initialdir = '.'
title = u'请选择编码所需要的数据文件(支持问卷星和已编码好的数据)'
filetypes = (('Excel files', '*.xls;*.xlsx'), ('CSV files', '*.csv'), ('all files', '*.*'))
filenames = []
while (len(filenames) < 1):
filenames = askopenfilenames(initialdir=initialdir, title=title, filetypes=filetypes)
if (len(filenames) < 1):
print('请至少选择一个文件.')
filenames = list(filenames)
elif (method == 'pathsearch'):
if ('filepath' in kwargs):
filepath = kwargs['filepath']
else:
filepath = '.\\data\\'
if os.path.isdir(filepath):
filenames = os.listdir(filepath)
filenames = [os.path.join(filepath, s) for s in filenames]
else:
print('搜索路径错误')
raise
info = []
for filename in filenames:
filename_nopath = os.path.split(filename)[1]
data = read_data(filename)
field_c1 = set(data.iloc[:, 0].dropna().unique())
field_r1 = set(data.columns)
hqlen = [(len(re.findall('^[qQ]\\d+', c)) > 0) for c in field_r1]
hqrate = ((hqlen.count(True) / len(field_r1)) if (len(field_r1) > 0) else 0)
(rowlens, collens) = data.shape
rate_real = ((data.applymap((lambda x: isinstance(x, (int, float)))).sum().sum() / rowlens) / collens)
tmp = {'filename': filename_nopath, 'filenametype': , 'rowlens': rowlens, 'collens': collens, 'field_c1': field_c1, 'field_r1': field_r1, 'type': , 'rate_real': rate_real}
if (len(re.findall('^data.*\\.xls', filename_nopath)) > 0):
tmp['filenametype'] = 'data'
elif (len(re.findall('^code.*\\.xls', filename_nopath)) > 0):
tmp['filenametype'] = 'code'
elif (len(re.findall('\\d+_\\d+_\\d.xls', filename_nopath)) > 0):
tmp['filenametype'] = 'wenjuanxing'
if ((tmp['filenametype'] == 'code') or (set(['key', 'code', 'qlist', 'qtype']) < field_c1)):
tmp['type'] = 'code'
if ((tmp['filenametype'] == 'wenjuanxing') or (len((set(['序号', '提交答卷时间', '所用时间', '来自IP', '来源', '来源详情', '总分']) & field_r1)) >= 5)):
tmp['type'] = 'wenjuanxing'
if ((tmp['filenametype'] == 'data') or (hqrate >= 0.5)):
tmp['type'] = 'data'
info.append(tmp)
questype = [k['type'] for k in info]
if ((questype.count('data') * questype.count('code')) == 1):
data = read_data(filenames[questype.index('data')])
code = read_code(filenames[questype.index('code')])
elif (questype.count('wenjuanxing') >= 2):
filenames = [(f, info[i]['rate_real']) for (i, f) in enumerate(filenames) if (questype[i] == 'wenjuanxing')]
tmp = []
for (f, rate_real) in filenames:
t2 = (0 if (rate_real < 0.5) else 2)
d = pd.read_excel(f)
d = d.iloc[:, 0]
tmp.append((t2, d))
tmp_equal = 0
for (t, d0) in tmp[:(- 1)]:
if ((len(d) == len(d0)) and all((d == d0))):
tmp_equal += 1
tmp[(- 1)] = ((t2 + (int((t / 10)) * 10)), tmp[(- 1)][1])
max_quesnum = max([int((t / 10)) for (t, d) in tmp])
if (tmp_equal == 0):
tmp[(- 1)] = (((tmp[(- 1)][0] + (max_quesnum * 10)) + 10), tmp[(- 1)][1])
questype = [t for (t, d) in tmp]
filenames = [f for (f, r) in filenames]
quesnums = max([int((t / 10)) for t in questype])
filename_wjx = []
for i in range(1, (quesnums + 1)):
if ((questype.count((i * 10)) == 1) and (questype.count(((i * 10) + 2)) == 1)):
filename_wjx.append([filenames[questype.index((i * 10))], filenames[questype.index(((i * 10) + 2))]])
if (len(filename_wjx) == 1):
(data, code) = wenjuanxing(filename_wjx[0])
elif (len(filename_wjx) > 1):
print('脚本识别出多组问卷星数据,请选择需要编码的数据:')
for (i, f) in enumerate(filename_wjx):
print('{}: {}'.format((i + 1), '/'.join([os.path.split(f[0])[1], os.path.split(f[1])[1]])))
ii = input('您选择的数据是(数据前的编码,如:1):')
ii = re.sub('\\s', , ii)
if ii.isnumeric():
(data, code) = wenjuanxing(filename_wjx[(int(ii) - 1)])
else:
print('您输入正确的编码.')
else:
print('没有找到任何问卷数据..')
raise
else:
print('没有找到任何数据')
raise
return (data, code)
|
def levenshtein(s, t):
"'' From Wikipedia article; Iterative with two matrix rows. "
if (s == t):
return 0
elif (len(s) == 0):
return len(t)
elif (len(t) == 0):
return len(s)
v0 = ([None] * (len(t) + 1))
v1 = ([None] * (len(t) + 1))
for i in range(len(v0)):
v0[i] = i
for i in range(len(s)):
v1[0] = (i + 1)
for j in range(len(t)):
cost = (0 if (s[i] == t[j]) else 1)
v1[(j + 1)] = min((v1[j] + 1), (v0[(j + 1)] + 1), (v0[j] + cost))
for j in range(len(v0)):
v0[j] = v1[j]
return v1[len(t)]
| -6,058,263,141,906,959,000
|
'' From Wikipedia article; Iterative with two matrix rows.
|
reportgen/questionnaire/questionnaire.py
|
levenshtein
|
brightgeng/reportgen
|
python
|
def levenshtein(s, t):
" "
if (s == t):
return 0
elif (len(s) == 0):
return len(t)
elif (len(t) == 0):
return len(s)
v0 = ([None] * (len(t) + 1))
v1 = ([None] * (len(t) + 1))
for i in range(len(v0)):
v0[i] = i
for i in range(len(s)):
v1[0] = (i + 1)
for j in range(len(t)):
cost = (0 if (s[i] == t[j]) else 1)
v1[(j + 1)] = min((v1[j] + 1), (v0[(j + 1)] + 1), (v0[j] + cost))
for j in range(len(v0)):
v0[j] = v1[j]
return v1[len(t)]
|
def code_similar(code1, code2):
'\n 题目内容相似度用最小编辑距离来度量\n 选项相似度分为几种\n 1、完全相同:1\n 2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2\n 2、多选题/排序题:不考虑序号,共同变量超过一半即可:3\n 3、矩阵单选题:code_r 暂时只考虑完全匹配\n 4、其他情况为0\n\n '
code_distance_min = pd.DataFrame(index=code1.keys(), columns=['qnum', 'similar_content', 'similar_code'])
for c1 in code1:
disstance_str = pd.Series(index=code2.keys())
for c2 in code2:
if (code1[c1]['qtype'] == code2[c2]['qtype']):
disstance_str[c2] = levenshtein(code1[c1]['content'], code2[c2]['content'])
c2 = disstance_str.idxmin()
if (('%s' % c2) == 'nan'):
continue
min_len = ((len(code1[c1]['content']) + len(code2[c2]['content'])) / 2)
similar_content = ((100 - ((100 * disstance_str[c2]) / min_len)) if (min_len > 0) else 0)
qtype = code2[c2]['qtype']
if (qtype == '单选题'):
t1 = code1[c1]['code']
t2 = code2[c2]['code']
inner_key = list((set(t1.keys()) & set(t2.keys())))
tmp = all([(t1[c] == t2[c]) for c in inner_key])
if (t1 == t2):
similar_code = 1
elif ((len(inner_key) >= (0.5 * len((set(t1.keys()) | set(t2.keys()))))) and tmp):
similar_code = 2
else:
similar_code = 0
elif (qtype in ['多选题', '排序题']):
t1 = code1[c1]['code']
t2 = code2[c2]['code']
t1 = [t1[c] for c in code1[c1]['qlist']]
t2 = [t2[c] for c in code2[c2]['qlist']]
inner_key = (set(t1) & set(t2))
if (t1 == t2):
similar_code = 1
elif (len((set(t1) & set(t2))) >= (0.5 * len((set(t1) | set(t2))))):
similar_code = 3
else:
similar_code = 0
elif (qtype in ['矩阵多选题']):
t1 = code1[c1]['code_r']
t2 = code2[c2]['code_r']
t1 = [t1[c] for c in code1[c1]['qlist']]
t2 = [t2[c] for c in code2[c2]['qlist']]
inner_key = (set(t1) & set(t2))
if (t1 == t2):
similar_code = 1
elif (len((set(t1) & set(t2))) >= (0.5 * len((set(t1) | set(t2))))):
similar_code = 3
else:
similar_code = 0
elif (qtype in ['填空题']):
similar_code = 1
else:
similar_code = 0
code_distance_min.loc[(c1, 'qnum')] = c2
code_distance_min.loc[(c1, 'similar_content')] = similar_content
code_distance_min.loc[(c1, 'similar_code')] = similar_code
code_distance_min = code_distance_min.sort_values(['qnum', 'similar_content', 'similar_code'], ascending=[False, False, True])
code_distance_min.loc[code_distance_min.duplicated(['qnum']), :] = np.nan
code_distance_min = pd.DataFrame(code_distance_min, index=code1.keys())
return code_distance_min
| 2,684,696,960,743,818,000
|
题目内容相似度用最小编辑距离来度量
选项相似度分为几种
1、完全相同:1
2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2
2、多选题/排序题:不考虑序号,共同变量超过一半即可:3
3、矩阵单选题:code_r 暂时只考虑完全匹配
4、其他情况为0
|
reportgen/questionnaire/questionnaire.py
|
code_similar
|
brightgeng/reportgen
|
python
|
def code_similar(code1, code2):
'\n 题目内容相似度用最小编辑距离来度量\n 选项相似度分为几种\n 1、完全相同:1\n 2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2\n 2、多选题/排序题:不考虑序号,共同变量超过一半即可:3\n 3、矩阵单选题:code_r 暂时只考虑完全匹配\n 4、其他情况为0\n\n '
code_distance_min = pd.DataFrame(index=code1.keys(), columns=['qnum', 'similar_content', 'similar_code'])
for c1 in code1:
disstance_str = pd.Series(index=code2.keys())
for c2 in code2:
if (code1[c1]['qtype'] == code2[c2]['qtype']):
disstance_str[c2] = levenshtein(code1[c1]['content'], code2[c2]['content'])
c2 = disstance_str.idxmin()
if (('%s' % c2) == 'nan'):
continue
min_len = ((len(code1[c1]['content']) + len(code2[c2]['content'])) / 2)
similar_content = ((100 - ((100 * disstance_str[c2]) / min_len)) if (min_len > 0) else 0)
qtype = code2[c2]['qtype']
if (qtype == '单选题'):
t1 = code1[c1]['code']
t2 = code2[c2]['code']
inner_key = list((set(t1.keys()) & set(t2.keys())))
tmp = all([(t1[c] == t2[c]) for c in inner_key])
if (t1 == t2):
similar_code = 1
elif ((len(inner_key) >= (0.5 * len((set(t1.keys()) | set(t2.keys()))))) and tmp):
similar_code = 2
else:
similar_code = 0
elif (qtype in ['多选题', '排序题']):
t1 = code1[c1]['code']
t2 = code2[c2]['code']
t1 = [t1[c] for c in code1[c1]['qlist']]
t2 = [t2[c] for c in code2[c2]['qlist']]
inner_key = (set(t1) & set(t2))
if (t1 == t2):
similar_code = 1
elif (len((set(t1) & set(t2))) >= (0.5 * len((set(t1) | set(t2))))):
similar_code = 3
else:
similar_code = 0
elif (qtype in ['矩阵多选题']):
t1 = code1[c1]['code_r']
t2 = code2[c2]['code_r']
t1 = [t1[c] for c in code1[c1]['qlist']]
t2 = [t2[c] for c in code2[c2]['qlist']]
inner_key = (set(t1) & set(t2))
if (t1 == t2):
similar_code = 1
elif (len((set(t1) & set(t2))) >= (0.5 * len((set(t1) | set(t2))))):
similar_code = 3
else:
similar_code = 0
elif (qtype in ['填空题']):
similar_code = 1
else:
similar_code = 0
code_distance_min.loc[(c1, 'qnum')] = c2
code_distance_min.loc[(c1, 'similar_content')] = similar_content
code_distance_min.loc[(c1, 'similar_code')] = similar_code
code_distance_min = code_distance_min.sort_values(['qnum', 'similar_content', 'similar_code'], ascending=[False, False, True])
code_distance_min.loc[code_distance_min.duplicated(['qnum']), :] = np.nan
code_distance_min = pd.DataFrame(code_distance_min, index=code1.keys())
return code_distance_min
|
def data_merge(ques1, ques2, qlist1=None, qlist2=None, name1='ques1', name2='ques2', mergeqnum='Q0', similar_threshold=70):
'合并两份数据\n ques1: 列表,[data1,code1]\n ques2: 列表,[data2,code2]\n '
(data1, code1) = ques1
(data2, code2) = ques2
if ((qlist1 is None) or (qlist2 is None)):
qlist1 = []
qlist2 = []
qqlist1 = []
qqlist2 = []
code_distance_min = code_similar(code1, code2)
code1_key = sorted(code1, key=(lambda x: int(re.findall('\\d+', x)[0])))
for c1 in code1_key:
qtype1 = code1[c1]['qtype']
rs_qq = code_distance_min.loc[(c1, 'qnum')]
similar_content = code_distance_min.loc[(c1, 'similar_content')]
similar_code = code_distance_min.loc[(c1, 'similar_code')]
if ((similar_content >= similar_threshold) and (similar_code in [1, 2])):
print('将自动合并: {} 和 {}'.format(c1, rs_qq))
user_qq = rs_qq
qqlist1 += code1[c1]['qlist']
qqlist2 += code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(rs_qq)
elif ((similar_content >= similar_threshold) and (similar_code == 3)):
t1 = (code1[c1]['code_r'] if (qtype1 == '矩阵单选题') else code1[c1]['code'])
t1_qlist = code1[c1]['qlist']
t1_value = [t1[k] for k in t1_qlist]
t2 = (code2[rs_qq]['code_r'] if (qtype1 == '矩阵单选题') else code2[rs_qq]['code'])
t2_qlist = code2[rs_qq]['qlist']
t2_value = [t2[k] for k in t2_qlist]
t1_qlist_new = [q for q in t1_qlist if (t1[q] in list((set(t1_value) & set(t2_value))))]
t2_r = dict(zip([s[1] for s in t2.items()], [s[0] for s in t2.items()]))
t2_qlist_new = [t2_r[s] for s in [t1[q] for q in t1_qlist_new]]
code1[c1]['qlist'] = t1_qlist_new
code1[c1]['code'] = {k: t1[k] for k in t1_qlist_new}
qqlist1 += t1_qlist_new
qqlist2 += t2_qlist_new
qlist1.append(c1)
qlist2.append(rs_qq)
print('将自动合并: {} 和 {} (只保留了相同的选项)'.format(c1, rs_qq))
elif (similar_code in [1, 2]):
print(('-' * 40))
print('为【 {}:{} 】自动匹配到: '.format(c1, code1[c1]['content']))
print(' 【 {}:{} 】,其相似度为{:.0f}%.'.format(rs_qq, code2[rs_qq]['content'], similar_content))
tmp = input('是否合并该组题目,请输入 yes/no (也可以输入第二份数据中其他您需要匹配的题目): ')
tmp = re.sub('\\s', '', tmp)
tmp = tmp.lower()
if (tmp in ['yes', 'y']):
user_qq = rs_qq
elif (tmp in ['no', 'n']):
user_qq = None
else:
tmp = re.sub('^q', 'Q', tmp)
if (tmp not in code2):
user_qq = None
elif ((tmp in code2) and (tmp != rs_qq)):
print('您输入的是{}:{}'.format(tmp, code2[tmp]['content']))
user_qq = tmp
if (user_qq == rs_qq):
qqlist1 += code1[c1]['qlist']
qqlist2 += code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1, rs_qq))
elif (user_qq is not None):
if (('code' in code1[c1]) and (len(code1[c1]['code']) > 0)):
t1 = (code1[c1]['code_r'] if (qtype1 == '矩阵单选题') else code1[c1]['code'])
t2 = (code2[user_qq]['code_r'] if (code2[user_qq]['qtype'] == '矩阵单选题') else code2[user_qq]['code'])
if (set(t1.values()) == set(t2.values())):
qqlist1 += code1[c1]['qlist']
qqlist2 += code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1, user_qq))
else:
print('两个题目的选项不匹配,将自动跳过.')
else:
qqlist1 += [code1[c1]['qlist'][0]]
qqlist2 += [code2[user_qq]['qlist'][0]]
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1, user_qq))
else:
print('将自动跳过: {}'.format(c1))
print(('-' * 40))
else:
print('将自动跳过: {}'.format(c1))
tmp = input('请问您需要的题目是否都已经合并? 请输入(yes / no): ')
tmp = re.sub('\\s', '', tmp)
tmp = tmp.lower()
if (tmp in ['no', 'n']):
print('请确保接下来您要合并的题目类型和选项完全一样.')
while 1:
tmp = input('请输入您想合并的题目对,直接回车则终止输入(如: Q1,Q1 ): ')
tmp = re.sub('\\s', '', tmp)
tmp = re.sub(',', ',', tmp)
tmp = tmp.split(',')
tmp = [re.sub('^q', 'Q', qq) for qq in tmp]
if (len(tmp) < 2):
break
if ((tmp[0] in qlist1) or (tmp[1] in qlist2)):
print('该题已经被合并,请重新输入')
continue
if ((tmp[0] not in code1) or (tmp[1] not in code2)):
print('输入错误, 请重新输入')
continue
c1 = tmp[0]
c2 = tmp[1]
print('您输入的是:')
print('第一份数据中的【 {}:{} 】'.format(c1, code1[c1]['content']))
print('第二份数据中的【 {}:{} 】'.format(c2, code2[c2]['content']))
w = code_similar({c1: code1[c1]}, {c2: code2[c2]})
similar_code = w.loc[(c1, 'similar_code')]
if ((similar_code in [1, 2]) and (len(code1[c1]['qlist']) == len(code2[c2]['qlist']))):
qqlist1 += code1[c1]['qlist']
qqlist2 += code2[c2]['qlist']
qlist1.append(c1)
qlist2.append(c2)
print('将自动合并: {} 和 {}'.format(c1, c2))
else:
print('选项不匹配,请重新输入')
else:
qqlist1 = []
for qq in qlist1:
qqlist1 = (qqlist1 + code1[qq]['qlist'])
qqlist2 = []
for qq in qlist2:
qqlist2 = (qqlist2 + code2[qq]['qlist'])
if (mergeqnum in qqlist1):
mergeqnum = (mergeqnum + 'merge')
data1 = data1.loc[:, qqlist1]
data1.loc[:, mergeqnum] = 1
data2 = data2.loc[:, qqlist2]
data2.loc[:, mergeqnum] = 2
if (len(qqlist1) != len(qqlist2)):
print('两份数据选项不完全匹配,请检查....')
raise
data2 = data2.rename(columns=dict(zip(qqlist2, qqlist1)))
data12 = data1.append(data2, ignore_index=True)
code12 = {}
for (i, cc) in enumerate(qlist1):
code12[cc] = code1[cc]
if (('code' in code1[cc]) and ('code' in code2[qlist2[i]])):
code12[cc]['code'].update(code2[qlist2[i]]['code'])
code12[mergeqnum] = {'content': u'来源', 'code': {1: name1, 2: name2}, 'qtype': u'单选题', 'qlist': [mergeqnum]}
return (data12, code12)
| 6,126,437,658,393,545,000
|
合并两份数据
ques1: 列表,[data1,code1]
ques2: 列表,[data2,code2]
|
reportgen/questionnaire/questionnaire.py
|
data_merge
|
brightgeng/reportgen
|
python
|
def data_merge(ques1, ques2, qlist1=None, qlist2=None, name1='ques1', name2='ques2', mergeqnum='Q0', similar_threshold=70):
'合并两份数据\n ques1: 列表,[data1,code1]\n ques2: 列表,[data2,code2]\n '
(data1, code1) = ques1
(data2, code2) = ques2
if ((qlist1 is None) or (qlist2 is None)):
qlist1 = []
qlist2 = []
qqlist1 = []
qqlist2 = []
code_distance_min = code_similar(code1, code2)
code1_key = sorted(code1, key=(lambda x: int(re.findall('\\d+', x)[0])))
for c1 in code1_key:
qtype1 = code1[c1]['qtype']
rs_qq = code_distance_min.loc[(c1, 'qnum')]
similar_content = code_distance_min.loc[(c1, 'similar_content')]
similar_code = code_distance_min.loc[(c1, 'similar_code')]
if ((similar_content >= similar_threshold) and (similar_code in [1, 2])):
print('将自动合并: {} 和 {}'.format(c1, rs_qq))
user_qq = rs_qq
qqlist1 += code1[c1]['qlist']
qqlist2 += code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(rs_qq)
elif ((similar_content >= similar_threshold) and (similar_code == 3)):
t1 = (code1[c1]['code_r'] if (qtype1 == '矩阵单选题') else code1[c1]['code'])
t1_qlist = code1[c1]['qlist']
t1_value = [t1[k] for k in t1_qlist]
t2 = (code2[rs_qq]['code_r'] if (qtype1 == '矩阵单选题') else code2[rs_qq]['code'])
t2_qlist = code2[rs_qq]['qlist']
t2_value = [t2[k] for k in t2_qlist]
t1_qlist_new = [q for q in t1_qlist if (t1[q] in list((set(t1_value) & set(t2_value))))]
t2_r = dict(zip([s[1] for s in t2.items()], [s[0] for s in t2.items()]))
t2_qlist_new = [t2_r[s] for s in [t1[q] for q in t1_qlist_new]]
code1[c1]['qlist'] = t1_qlist_new
code1[c1]['code'] = {k: t1[k] for k in t1_qlist_new}
qqlist1 += t1_qlist_new
qqlist2 += t2_qlist_new
qlist1.append(c1)
qlist2.append(rs_qq)
print('将自动合并: {} 和 {} (只保留了相同的选项)'.format(c1, rs_qq))
elif (similar_code in [1, 2]):
print(('-' * 40))
print('为【 {}:{} 】自动匹配到: '.format(c1, code1[c1]['content']))
print(' 【 {}:{} 】,其相似度为{:.0f}%.'.format(rs_qq, code2[rs_qq]['content'], similar_content))
tmp = input('是否合并该组题目,请输入 yes/no (也可以输入第二份数据中其他您需要匹配的题目): ')
tmp = re.sub('\\s', , tmp)
tmp = tmp.lower()
if (tmp in ['yes', 'y']):
user_qq = rs_qq
elif (tmp in ['no', 'n']):
user_qq = None
else:
tmp = re.sub('^q', 'Q', tmp)
if (tmp not in code2):
user_qq = None
elif ((tmp in code2) and (tmp != rs_qq)):
print('您输入的是{}:{}'.format(tmp, code2[tmp]['content']))
user_qq = tmp
if (user_qq == rs_qq):
qqlist1 += code1[c1]['qlist']
qqlist2 += code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1, rs_qq))
elif (user_qq is not None):
if (('code' in code1[c1]) and (len(code1[c1]['code']) > 0)):
t1 = (code1[c1]['code_r'] if (qtype1 == '矩阵单选题') else code1[c1]['code'])
t2 = (code2[user_qq]['code_r'] if (code2[user_qq]['qtype'] == '矩阵单选题') else code2[user_qq]['code'])
if (set(t1.values()) == set(t2.values())):
qqlist1 += code1[c1]['qlist']
qqlist2 += code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1, user_qq))
else:
print('两个题目的选项不匹配,将自动跳过.')
else:
qqlist1 += [code1[c1]['qlist'][0]]
qqlist2 += [code2[user_qq]['qlist'][0]]
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1, user_qq))
else:
print('将自动跳过: {}'.format(c1))
print(('-' * 40))
else:
print('将自动跳过: {}'.format(c1))
tmp = input('请问您需要的题目是否都已经合并? 请输入(yes / no): ')
tmp = re.sub('\\s', , tmp)
tmp = tmp.lower()
if (tmp in ['no', 'n']):
print('请确保接下来您要合并的题目类型和选项完全一样.')
while 1:
tmp = input('请输入您想合并的题目对,直接回车则终止输入(如: Q1,Q1 ): ')
tmp = re.sub('\\s', , tmp)
tmp = re.sub(',', ',', tmp)
tmp = tmp.split(',')
tmp = [re.sub('^q', 'Q', qq) for qq in tmp]
if (len(tmp) < 2):
break
if ((tmp[0] in qlist1) or (tmp[1] in qlist2)):
print('该题已经被合并,请重新输入')
continue
if ((tmp[0] not in code1) or (tmp[1] not in code2)):
print('输入错误, 请重新输入')
continue
c1 = tmp[0]
c2 = tmp[1]
print('您输入的是:')
print('第一份数据中的【 {}:{} 】'.format(c1, code1[c1]['content']))
print('第二份数据中的【 {}:{} 】'.format(c2, code2[c2]['content']))
w = code_similar({c1: code1[c1]}, {c2: code2[c2]})
similar_code = w.loc[(c1, 'similar_code')]
if ((similar_code in [1, 2]) and (len(code1[c1]['qlist']) == len(code2[c2]['qlist']))):
qqlist1 += code1[c1]['qlist']
qqlist2 += code2[c2]['qlist']
qlist1.append(c1)
qlist2.append(c2)
print('将自动合并: {} 和 {}'.format(c1, c2))
else:
print('选项不匹配,请重新输入')
else:
qqlist1 = []
for qq in qlist1:
qqlist1 = (qqlist1 + code1[qq]['qlist'])
qqlist2 = []
for qq in qlist2:
qqlist2 = (qqlist2 + code2[qq]['qlist'])
if (mergeqnum in qqlist1):
mergeqnum = (mergeqnum + 'merge')
data1 = data1.loc[:, qqlist1]
data1.loc[:, mergeqnum] = 1
data2 = data2.loc[:, qqlist2]
data2.loc[:, mergeqnum] = 2
if (len(qqlist1) != len(qqlist2)):
print('两份数据选项不完全匹配,请检查....')
raise
data2 = data2.rename(columns=dict(zip(qqlist2, qqlist1)))
data12 = data1.append(data2, ignore_index=True)
code12 = {}
for (i, cc) in enumerate(qlist1):
code12[cc] = code1[cc]
if (('code' in code1[cc]) and ('code' in code2[qlist2[i]])):
code12[cc]['code'].update(code2[qlist2[i]]['code'])
code12[mergeqnum] = {'content': u'来源', 'code': {1: name1, 2: name2}, 'qtype': u'单选题', 'qlist': [mergeqnum]}
return (data12, code12)
|
def clean_ftime(ftime, cut_percent=0.25):
'\n ftime 是完成问卷的秒数\n 思路:\n 1、只考虑截断问卷完成时间较小的样本\n 2、找到完成时间变化的拐点,即需要截断的时间点\n 返回:r\n 建议截断<r的样本\n '
t_min = int(ftime.min())
t_cut = int(ftime.quantile(cut_percent))
x = np.array(range(t_min, t_cut))
y = np.array([len(ftime[(ftime <= i)]) for i in range(t_min, t_cut)])
z1 = np.polyfit(x, y, 4)
z2 = np.polyder(z1, 2)
r = np.roots(np.polyder(z2, 1))
r = int(r[0])
return r
| -6,333,484,139,126,520,000
|
ftime 是完成问卷的秒数
思路:
1、只考虑截断问卷完成时间较小的样本
2、找到完成时间变化的拐点,即需要截断的时间点
返回:r
建议截断<r的样本
|
reportgen/questionnaire/questionnaire.py
|
clean_ftime
|
brightgeng/reportgen
|
python
|
def clean_ftime(ftime, cut_percent=0.25):
'\n ftime 是完成问卷的秒数\n 思路:\n 1、只考虑截断问卷完成时间较小的样本\n 2、找到完成时间变化的拐点,即需要截断的时间点\n 返回:r\n 建议截断<r的样本\n '
t_min = int(ftime.min())
t_cut = int(ftime.quantile(cut_percent))
x = np.array(range(t_min, t_cut))
y = np.array([len(ftime[(ftime <= i)]) for i in range(t_min, t_cut)])
z1 = np.polyfit(x, y, 4)
z2 = np.polyder(z1, 2)
r = np.roots(np.polyder(z2, 1))
r = int(r[0])
return r
|
def data_auto_code(data):
'智能判断问卷数据\n 输入\n data: 数据框,列名需要满足Qi或者Qi_\n 输出:\n code: 自动编码\n '
data = pd.DataFrame(data)
columns = data.columns
columns = [c for c in columns if re.match('Q\\d+', c)]
code = {}
for cc in columns:
if ('_' not in cc):
key = cc
else:
key = cc.split('_')[0]
if (key not in code):
code[key] = {}
code[key]['qlist'] = []
code[key]['code'] = {}
code[key]['content'] = key
code[key]['qtype'] = ''
if (key == cc):
code[key]['qlist'] = [key]
elif re.findall((('^' + key) + '_[a-zA-Z]{0,}\\d+$'), cc):
code[key]['qlist'].append(cc)
elif ('qlist_open' in code[key]):
code[key]['qlist_open'].append(cc)
else:
code[key]['qlist_open'] = [cc]
for kk in code.keys():
dd = data[code[kk]['qlist']]
if (len(dd.columns) == 1):
tmp = dd[dd.notnull()].iloc[:, 0].unique()
if (dd.iloc[:, 0].value_counts().mean() >= 2):
code[kk]['qtype'] = u'单选题'
code[kk]['code'] = dict(zip(tmp, tmp))
else:
code[kk]['qtype'] = u'填空题'
del code[kk]['code']
else:
tmp = set(dd[dd.notnull()].as_matrix().flatten())
if (set(tmp) == set([0, 1])):
code[kk]['qtype'] = u'多选题'
code[kk]['code'] = dict(zip(code[kk]['qlist'], code[kk]['qlist']))
elif ('R' in code[kk]['qlist'][0]):
code[kk]['qtype'] = u'矩阵单选题'
code[kk]['code_r'] = dict(zip(code[kk]['qlist'], code[kk]['qlist']))
code[kk]['code'] = dict(zip(list(tmp), list(tmp)))
else:
code[kk]['qtype'] = u'排序题'
code[kk]['code'] = dict(zip(code[kk]['qlist'], code[kk]['qlist']))
return code
| 7,994,973,602,825,367,000
|
智能判断问卷数据
输入
data: 数据框,列名需要满足Qi或者Qi_
输出:
code: 自动编码
|
reportgen/questionnaire/questionnaire.py
|
data_auto_code
|
brightgeng/reportgen
|
python
|
def data_auto_code(data):
'智能判断问卷数据\n 输入\n data: 数据框,列名需要满足Qi或者Qi_\n 输出:\n code: 自动编码\n '
data = pd.DataFrame(data)
columns = data.columns
columns = [c for c in columns if re.match('Q\\d+', c)]
code = {}
for cc in columns:
if ('_' not in cc):
key = cc
else:
key = cc.split('_')[0]
if (key not in code):
code[key] = {}
code[key]['qlist'] = []
code[key]['code'] = {}
code[key]['content'] = key
code[key]['qtype'] =
if (key == cc):
code[key]['qlist'] = [key]
elif re.findall((('^' + key) + '_[a-zA-Z]{0,}\\d+$'), cc):
code[key]['qlist'].append(cc)
elif ('qlist_open' in code[key]):
code[key]['qlist_open'].append(cc)
else:
code[key]['qlist_open'] = [cc]
for kk in code.keys():
dd = data[code[kk]['qlist']]
if (len(dd.columns) == 1):
tmp = dd[dd.notnull()].iloc[:, 0].unique()
if (dd.iloc[:, 0].value_counts().mean() >= 2):
code[kk]['qtype'] = u'单选题'
code[kk]['code'] = dict(zip(tmp, tmp))
else:
code[kk]['qtype'] = u'填空题'
del code[kk]['code']
else:
tmp = set(dd[dd.notnull()].as_matrix().flatten())
if (set(tmp) == set([0, 1])):
code[kk]['qtype'] = u'多选题'
code[kk]['code'] = dict(zip(code[kk]['qlist'], code[kk]['qlist']))
elif ('R' in code[kk]['qlist'][0]):
code[kk]['qtype'] = u'矩阵单选题'
code[kk]['code_r'] = dict(zip(code[kk]['qlist'], code[kk]['qlist']))
code[kk]['code'] = dict(zip(list(tmp), list(tmp)))
else:
code[kk]['qtype'] = u'排序题'
code[kk]['code'] = dict(zip(code[kk]['qlist'], code[kk]['qlist']))
return code
|
def save_data(data, filename=u'data.xlsx', code=None):
'保存问卷数据到本地\n 根据filename后缀选择相应的格式保存\n 如果有code,则保存按文本数据\n '
savetype = os.path.splitext(filename)[1][1:]
data1 = data.copy()
if code:
for qq in code.keys():
qtype = code[qq]['qtype']
qlist = code[qq]['qlist']
if (qtype == u'单选题'):
data1[qlist[0]].replace(code[qq]['code'], inplace=True)
data1.rename(columns={qq: '{}({})'.format(qq, code[qq]['content'])}, inplace=True)
elif (qtype == u'矩阵单选题'):
data1[code[qq]['qlist']].replace(code[qq]['code'], inplace=True)
tmp1 = code[qq]['qlist']
tmp2 = ['{}({})'.format(q, code[qq]['code_r'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1, tmp2)), inplace=True)
elif (qtype in [u'排序题']):
tmp = data[qlist]
tmp = tmp.rename(columns=code[qq]['code'])
tmp = dataCode_to_text(tmp)
ind = list(data1.columns).index(qlist[0])
qqname = '{}({})'.format(qq, code[qq]['content'])
data1.insert(ind, qqname, tmp)
tmp1 = code[qq]['qlist']
tmp2 = ['{}_{}'.format(qq, code[qq]['code'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1, tmp2)), inplace=True)
elif (qtype in [u'多选题']):
tmp = data[qlist]
tmp = tmp.rename(columns=code[qq]['code'])
tmp = dataCode_to_text(tmp)
ind = list(data1.columns).index(qlist[0])
qqname = '{}({})'.format(qq, code[qq]['content'])
data1.insert(ind, qqname, tmp)
for q in qlist:
data1[q].replace({0: '', 1: code[qq]['code'][q]}, inplace=True)
tmp2 = ['{}_{}'.format(qq, code[qq]['code'][q]) for q in qlist]
data1.rename(columns=dict(zip(qlist, tmp2)), inplace=True)
else:
data1.rename(columns={qq: '{}({})'.format(qq, code[qq]['content'])}, inplace=True)
if ((savetype == u'xlsx') or (savetype == u'xls')):
data1.to_excel(filename, index=False)
elif (savetype == u'csv'):
data1.to_csv(filename, index=False)
| 5,844,349,184,456,264,000
|
保存问卷数据到本地
根据filename后缀选择相应的格式保存
如果有code,则保存按文本数据
|
reportgen/questionnaire/questionnaire.py
|
save_data
|
brightgeng/reportgen
|
python
|
def save_data(data, filename=u'data.xlsx', code=None):
'保存问卷数据到本地\n 根据filename后缀选择相应的格式保存\n 如果有code,则保存按文本数据\n '
savetype = os.path.splitext(filename)[1][1:]
data1 = data.copy()
if code:
for qq in code.keys():
qtype = code[qq]['qtype']
qlist = code[qq]['qlist']
if (qtype == u'单选题'):
data1[qlist[0]].replace(code[qq]['code'], inplace=True)
data1.rename(columns={qq: '{}({})'.format(qq, code[qq]['content'])}, inplace=True)
elif (qtype == u'矩阵单选题'):
data1[code[qq]['qlist']].replace(code[qq]['code'], inplace=True)
tmp1 = code[qq]['qlist']
tmp2 = ['{}({})'.format(q, code[qq]['code_r'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1, tmp2)), inplace=True)
elif (qtype in [u'排序题']):
tmp = data[qlist]
tmp = tmp.rename(columns=code[qq]['code'])
tmp = dataCode_to_text(tmp)
ind = list(data1.columns).index(qlist[0])
qqname = '{}({})'.format(qq, code[qq]['content'])
data1.insert(ind, qqname, tmp)
tmp1 = code[qq]['qlist']
tmp2 = ['{}_{}'.format(qq, code[qq]['code'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1, tmp2)), inplace=True)
elif (qtype in [u'多选题']):
tmp = data[qlist]
tmp = tmp.rename(columns=code[qq]['code'])
tmp = dataCode_to_text(tmp)
ind = list(data1.columns).index(qlist[0])
qqname = '{}({})'.format(qq, code[qq]['content'])
data1.insert(ind, qqname, tmp)
for q in qlist:
data1[q].replace({0: , 1: code[qq]['code'][q]}, inplace=True)
tmp2 = ['{}_{}'.format(qq, code[qq]['code'][q]) for q in qlist]
data1.rename(columns=dict(zip(qlist, tmp2)), inplace=True)
else:
data1.rename(columns={qq: '{}({})'.format(qq, code[qq]['content'])}, inplace=True)
if ((savetype == u'xlsx') or (savetype == u'xls')):
data1.to_excel(filename, index=False)
elif (savetype == u'csv'):
data1.to_csv(filename, index=False)
|
def sa_to_ma(data):
'单选题数据转换成多选题数据\n data是单选题数据, 要求非有效列别为nan\n 可以使用内置函数pd.get_dummies()代替\n '
if isinstance(data, pd.core.frame.DataFrame):
data = data[data.columns[0]]
categorys = data[data.notnull()].unique()
try:
categorys = sorted(categorys)
except:
pass
data_ma = pd.DataFrame(index=data.index, columns=categorys)
for c in categorys:
data_ma[c] = data.map((lambda x: int((x == c))))
data_ma.loc[data.isnull(), :] = np.nan
return data_ma
| -8,025,656,272,193,248,000
|
单选题数据转换成多选题数据
data是单选题数据, 要求非有效列别为nan
可以使用内置函数pd.get_dummies()代替
|
reportgen/questionnaire/questionnaire.py
|
sa_to_ma
|
brightgeng/reportgen
|
python
|
def sa_to_ma(data):
'单选题数据转换成多选题数据\n data是单选题数据, 要求非有效列别为nan\n 可以使用内置函数pd.get_dummies()代替\n '
if isinstance(data, pd.core.frame.DataFrame):
data = data[data.columns[0]]
categorys = data[data.notnull()].unique()
try:
categorys = sorted(categorys)
except:
pass
data_ma = pd.DataFrame(index=data.index, columns=categorys)
for c in categorys:
data_ma[c] = data.map((lambda x: int((x == c))))
data_ma.loc[data.isnull(), :] = np.nan
return data_ma
|
def to_dummpy(data, code, qqlist=None, qtype_new='多选题', ignore_open=True):
'转化成哑变量\n 将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题\n 返回一个很大的只有0和1的数据\n '
if (qqlist is None):
qqlist = sorted(code, key=(lambda x: int(re.findall('\\d+', x)[0])))
bdata = pd.DataFrame()
bcode = {}
for qq in qqlist:
qtype = code[qq]['qtype']
data0 = data[code[qq]['qlist']]
if (qtype == '单选题'):
data0 = data0.iloc[:, 0]
categorys = data0[data0.notnull()].unique()
try:
categorys = sorted(categorys)
except:
pass
categorys = [t for t in categorys if (t in code[qq]['code'])]
cname = [code[qq]['code'][k] for k in categorys]
columns_name = ['{}_A{}'.format(qq, (i + 1)) for i in range(len(categorys))]
tmp = pd.DataFrame(index=data0.index, columns=columns_name)
for (i, c) in enumerate(categorys):
tmp[columns_name[i]] = data0.map((lambda x: int((x == c))))
code_tmp = {'content': code[qq]['content'], 'qtype': qtype_new}
code_tmp['code'] = dict(zip(columns_name, cname))
code_tmp['qlist'] = columns_name
bcode.update({qq: code_tmp})
bdata = pd.concat([bdata, tmp], axis=1)
elif (qtype in ['多选题', '排序题', '矩阵单选题']):
bdata = pd.concat([bdata, data0], axis=1)
bcode.update({qq: code[qq]})
bdata = bdata.fillna(0)
try:
bdata = bdata.astype(np.int64, raise_on_error=False)
except:
pass
return (bdata, bcode)
| 840,683,149,439,387,100
|
转化成哑变量
将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题
返回一个很大的只有0和1的数据
|
reportgen/questionnaire/questionnaire.py
|
to_dummpy
|
brightgeng/reportgen
|
python
|
def to_dummpy(data, code, qqlist=None, qtype_new='多选题', ignore_open=True):
'转化成哑变量\n 将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题\n 返回一个很大的只有0和1的数据\n '
if (qqlist is None):
qqlist = sorted(code, key=(lambda x: int(re.findall('\\d+', x)[0])))
bdata = pd.DataFrame()
bcode = {}
for qq in qqlist:
qtype = code[qq]['qtype']
data0 = data[code[qq]['qlist']]
if (qtype == '单选题'):
data0 = data0.iloc[:, 0]
categorys = data0[data0.notnull()].unique()
try:
categorys = sorted(categorys)
except:
pass
categorys = [t for t in categorys if (t in code[qq]['code'])]
cname = [code[qq]['code'][k] for k in categorys]
columns_name = ['{}_A{}'.format(qq, (i + 1)) for i in range(len(categorys))]
tmp = pd.DataFrame(index=data0.index, columns=columns_name)
for (i, c) in enumerate(categorys):
tmp[columns_name[i]] = data0.map((lambda x: int((x == c))))
code_tmp = {'content': code[qq]['content'], 'qtype': qtype_new}
code_tmp['code'] = dict(zip(columns_name, cname))
code_tmp['qlist'] = columns_name
bcode.update({qq: code_tmp})
bdata = pd.concat([bdata, tmp], axis=1)
elif (qtype in ['多选题', '排序题', '矩阵单选题']):
bdata = pd.concat([bdata, data0], axis=1)
bcode.update({qq: code[qq]})
bdata = bdata.fillna(0)
try:
bdata = bdata.astype(np.int64, raise_on_error=False)
except:
pass
return (bdata, bcode)
|
def qdata_flatten(data, code, quesid=None, userid_begin=None):
'将问卷数据展平,字段如下\n userid: 用户ID\n quesid: 问卷ID\n qnum: 题号\n qname: 题目内容\n qtype: 题目类型\n samplelen:题目的样本数\n itemnum: 选项序号\n itemname: 选项内容\n code: 用户的选择\n codename: 用户选择的具体值\n count: 计数\n percent(%): 计数占比(百分比)\n '
if (not userid_begin):
userid_begin = 1000000
data.index = [((userid_begin + i) + 1) for i in range(len(data))]
if ('提交答卷时间' in data.columns):
begin_date = pd.to_datetime(data['提交答卷时间']).min().strftime('%Y-%m-%d')
end_date = pd.to_datetime(data['提交答卷时间']).max().strftime('%Y-%m-%d')
else:
begin_date = ''
end_date = ''
(data, code) = to_dummpy(data, code, qtype_new='单选题')
code_item = {}
for qq in code:
if (code[qq]['qtype'] == '矩阵单选题'):
code_item.update(code[qq]['code_r'])
else:
code_item.update(code[qq]['code'])
qdata = data.stack().reset_index()
qdata.columns = ['userid', 'qn_an', 'code']
qdata['qnum'] = qdata['qn_an'].map((lambda x: x.split('_')[0]))
qdata['itemnum'] = qdata['qn_an'].map((lambda x: '_'.join(x.split('_')[1:])))
if quesid:
qdata['quesid'] = quesid
qdata = qdata[['userid', 'quesid', 'qnum', 'itemnum', 'code']]
else:
qdata = qdata[['userid', 'qnum', 'itemnum', 'code']]
samplelen = qdata.groupby(['userid', 'qnum'])['code'].sum().map((lambda x: int((x > 0)))).unstack().sum()
quesinfo = qdata.groupby(['qnum', 'itemnum', 'code'])['code'].count()
quesinfo.name = 'count'
quesinfo = quesinfo.reset_index()
quesinfo = quesinfo[(quesinfo['code'] != 0)]
quesinfo['samplelen'] = quesinfo['qnum'].replace(samplelen.to_dict())
quesinfo['percent(%)'] = 0
quesinfo.loc[((quesinfo['samplelen'] > 0), 'percent(%)')] = ((100 * quesinfo.loc[((quesinfo['samplelen'] > 0), 'count')]) / quesinfo.loc[((quesinfo['samplelen'] > 0), 'samplelen')])
quesinfo['qname'] = quesinfo['qnum'].map((lambda x: code[x]['content']))
quesinfo['qtype'] = quesinfo['qnum'].map((lambda x: code[x]['qtype']))
quesinfo['itemname'] = (quesinfo['qnum'] + quesinfo['itemnum'].map((lambda x: ('_%s' % x))))
quesinfo['itemname'] = quesinfo['itemname'].replace(code_item)
quesinfo['codename'] = ''
quesinfo.loc[((quesinfo['code'] == 0), 'codename')] = '否'
quesinfo.loc[((quesinfo['code'] == 1), 'codename')] = '是'
quesinfo['tmp'] = (quesinfo['qnum'] + quesinfo['code'].map((lambda x: ('_%s' % int(x)))))
quesinfo['codename'].update(quesinfo.loc[(((quesinfo['code'] > 0) & (quesinfo['qtype'] == '矩阵单选题')), 'tmp')].map((lambda x: code[x.split('_')[0]]['code'][int(x.split('_')[1])])))
quesinfo['codename'].update(quesinfo.loc[(((quesinfo['code'] > 0) & (quesinfo['qtype'] == '排序题')), 'tmp')].map((lambda x: 'Top{}'.format(x.split('_')[1]))))
quesinfo['begin_date'] = begin_date
quesinfo['end_date'] = end_date
if quesid:
quesinfo['quesid'] = quesid
quesinfo = quesinfo[['quesid', 'begin_date', 'end_date', 'qnum', 'qname', 'qtype', 'samplelen', 'itemnum', 'itemname', 'code', 'codename', 'count', 'percent(%)']]
else:
quesinfo = quesinfo[['qnum', 'qname', 'qtype', 'samplelen', 'itemnum', 'itemname', 'code', 'codename', 'count', 'percent(%)']]
quesinfo['qnum'] = quesinfo['qnum'].astype('category')
quesinfo['qnum'].cat.set_categories(sorted(list(quesinfo['qnum'].unique()), key=(lambda x: int(re.findall('\\d+', x)[0]))), inplace=True)
quesinfo['itemnum'] = quesinfo['itemnum'].astype('category')
quesinfo['itemnum'].cat.set_categories(sorted(list(quesinfo['itemnum'].unique()), key=(lambda x: int(re.findall('\\d+', x)[0]))), inplace=True)
quesinfo = quesinfo.sort_values(['qnum', 'itemnum', 'code'])
return (qdata, quesinfo)
| 8,205,630,172,292,489,000
|
将问卷数据展平,字段如下
userid: 用户ID
quesid: 问卷ID
qnum: 题号
qname: 题目内容
qtype: 题目类型
samplelen:题目的样本数
itemnum: 选项序号
itemname: 选项内容
code: 用户的选择
codename: 用户选择的具体值
count: 计数
percent(%): 计数占比(百分比)
|
reportgen/questionnaire/questionnaire.py
|
qdata_flatten
|
brightgeng/reportgen
|
python
|
def qdata_flatten(data, code, quesid=None, userid_begin=None):
'将问卷数据展平,字段如下\n userid: 用户ID\n quesid: 问卷ID\n qnum: 题号\n qname: 题目内容\n qtype: 题目类型\n samplelen:题目的样本数\n itemnum: 选项序号\n itemname: 选项内容\n code: 用户的选择\n codename: 用户选择的具体值\n count: 计数\n percent(%): 计数占比(百分比)\n '
if (not userid_begin):
userid_begin = 1000000
data.index = [((userid_begin + i) + 1) for i in range(len(data))]
if ('提交答卷时间' in data.columns):
begin_date = pd.to_datetime(data['提交答卷时间']).min().strftime('%Y-%m-%d')
end_date = pd.to_datetime(data['提交答卷时间']).max().strftime('%Y-%m-%d')
else:
begin_date =
end_date =
(data, code) = to_dummpy(data, code, qtype_new='单选题')
code_item = {}
for qq in code:
if (code[qq]['qtype'] == '矩阵单选题'):
code_item.update(code[qq]['code_r'])
else:
code_item.update(code[qq]['code'])
qdata = data.stack().reset_index()
qdata.columns = ['userid', 'qn_an', 'code']
qdata['qnum'] = qdata['qn_an'].map((lambda x: x.split('_')[0]))
qdata['itemnum'] = qdata['qn_an'].map((lambda x: '_'.join(x.split('_')[1:])))
if quesid:
qdata['quesid'] = quesid
qdata = qdata[['userid', 'quesid', 'qnum', 'itemnum', 'code']]
else:
qdata = qdata[['userid', 'qnum', 'itemnum', 'code']]
samplelen = qdata.groupby(['userid', 'qnum'])['code'].sum().map((lambda x: int((x > 0)))).unstack().sum()
quesinfo = qdata.groupby(['qnum', 'itemnum', 'code'])['code'].count()
quesinfo.name = 'count'
quesinfo = quesinfo.reset_index()
quesinfo = quesinfo[(quesinfo['code'] != 0)]
quesinfo['samplelen'] = quesinfo['qnum'].replace(samplelen.to_dict())
quesinfo['percent(%)'] = 0
quesinfo.loc[((quesinfo['samplelen'] > 0), 'percent(%)')] = ((100 * quesinfo.loc[((quesinfo['samplelen'] > 0), 'count')]) / quesinfo.loc[((quesinfo['samplelen'] > 0), 'samplelen')])
quesinfo['qname'] = quesinfo['qnum'].map((lambda x: code[x]['content']))
quesinfo['qtype'] = quesinfo['qnum'].map((lambda x: code[x]['qtype']))
quesinfo['itemname'] = (quesinfo['qnum'] + quesinfo['itemnum'].map((lambda x: ('_%s' % x))))
quesinfo['itemname'] = quesinfo['itemname'].replace(code_item)
quesinfo['codename'] =
quesinfo.loc[((quesinfo['code'] == 0), 'codename')] = '否'
quesinfo.loc[((quesinfo['code'] == 1), 'codename')] = '是'
quesinfo['tmp'] = (quesinfo['qnum'] + quesinfo['code'].map((lambda x: ('_%s' % int(x)))))
quesinfo['codename'].update(quesinfo.loc[(((quesinfo['code'] > 0) & (quesinfo['qtype'] == '矩阵单选题')), 'tmp')].map((lambda x: code[x.split('_')[0]]['code'][int(x.split('_')[1])])))
quesinfo['codename'].update(quesinfo.loc[(((quesinfo['code'] > 0) & (quesinfo['qtype'] == '排序题')), 'tmp')].map((lambda x: 'Top{}'.format(x.split('_')[1]))))
quesinfo['begin_date'] = begin_date
quesinfo['end_date'] = end_date
if quesid:
quesinfo['quesid'] = quesid
quesinfo = quesinfo[['quesid', 'begin_date', 'end_date', 'qnum', 'qname', 'qtype', 'samplelen', 'itemnum', 'itemname', 'code', 'codename', 'count', 'percent(%)']]
else:
quesinfo = quesinfo[['qnum', 'qname', 'qtype', 'samplelen', 'itemnum', 'itemname', 'code', 'codename', 'count', 'percent(%)']]
quesinfo['qnum'] = quesinfo['qnum'].astype('category')
quesinfo['qnum'].cat.set_categories(sorted(list(quesinfo['qnum'].unique()), key=(lambda x: int(re.findall('\\d+', x)[0]))), inplace=True)
quesinfo['itemnum'] = quesinfo['itemnum'].astype('category')
quesinfo['itemnum'].cat.set_categories(sorted(list(quesinfo['itemnum'].unique()), key=(lambda x: int(re.findall('\\d+', x)[0]))), inplace=True)
quesinfo = quesinfo.sort_values(['qnum', 'itemnum', 'code'])
return (qdata, quesinfo)
|
def sample_size_cal(interval, N, alpha=0.05):
'调研样本量的计算\n 参考:https://www.surveysystem.com/sscalc.htm\n sample_size_cal(interval,N,alpha=0.05)\n 输入:\n interval: 误差范围,例如0.03\n N: 总体的大小,一般1万以上就没啥差别啦\n alpha:置信水平,默认95%\n '
import scipy.stats as stats
p = stats.norm.ppf((1 - (alpha / 2)))
if (interval > 1):
interval = (interval / 100)
samplesize = (((p ** 2) / 4) / (interval ** 2))
if N:
samplesize = ((samplesize * N) / (samplesize + N))
samplesize = int(round(samplesize))
return samplesize
| 906,193,507,839,740,700
|
调研样本量的计算
参考:https://www.surveysystem.com/sscalc.htm
sample_size_cal(interval,N,alpha=0.05)
输入:
interval: 误差范围,例如0.03
N: 总体的大小,一般1万以上就没啥差别啦
alpha:置信水平,默认95%
|
reportgen/questionnaire/questionnaire.py
|
sample_size_cal
|
brightgeng/reportgen
|
python
|
def sample_size_cal(interval, N, alpha=0.05):
'调研样本量的计算\n 参考:https://www.surveysystem.com/sscalc.htm\n sample_size_cal(interval,N,alpha=0.05)\n 输入:\n interval: 误差范围,例如0.03\n N: 总体的大小,一般1万以上就没啥差别啦\n alpha:置信水平,默认95%\n '
import scipy.stats as stats
p = stats.norm.ppf((1 - (alpha / 2)))
if (interval > 1):
interval = (interval / 100)
samplesize = (((p ** 2) / 4) / (interval ** 2))
if N:
samplesize = ((samplesize * N) / (samplesize + N))
samplesize = int(round(samplesize))
return samplesize
|
def gof_test(fo, fe=None, alpha=0.05):
'拟合优度检验\n 输入:\n fo:观察频数\n fe:期望频数,缺省为平均数\n 返回:\n 1: 样本与总体有差异\n 0:样本与总体无差异\n 例子:\n gof_test(np.array([0.3,0.4,0.3])*222)\n '
import scipy.stats as stats
fo = np.array(fo).flatten()
C = len(fo)
if (not fe):
N = fo.sum()
fe = np.array(([(N / C)] * C))
else:
fe = np.array(fe).flatten()
chi_value = (((fo - fe) ** 2) / fe)
chi_value = chi_value.sum()
chi_value_fit = stats.chi2.ppf(q=(1 - alpha), df=(C - 1))
if (chi_value > chi_value_fit):
result = 1
else:
result = 0
return result
| -1,421,774,208,672,722,700
|
拟合优度检验
输入:
fo:观察频数
fe:期望频数,缺省为平均数
返回:
1: 样本与总体有差异
0:样本与总体无差异
例子:
gof_test(np.array([0.3,0.4,0.3])*222)
|
reportgen/questionnaire/questionnaire.py
|
gof_test
|
brightgeng/reportgen
|
python
|
def gof_test(fo, fe=None, alpha=0.05):
'拟合优度检验\n 输入:\n fo:观察频数\n fe:期望频数,缺省为平均数\n 返回:\n 1: 样本与总体有差异\n 0:样本与总体无差异\n 例子:\n gof_test(np.array([0.3,0.4,0.3])*222)\n '
import scipy.stats as stats
fo = np.array(fo).flatten()
C = len(fo)
if (not fe):
N = fo.sum()
fe = np.array(([(N / C)] * C))
else:
fe = np.array(fe).flatten()
chi_value = (((fo - fe) ** 2) / fe)
chi_value = chi_value.sum()
chi_value_fit = stats.chi2.ppf(q=(1 - alpha), df=(C - 1))
if (chi_value > chi_value_fit):
result = 1
else:
result = 0
return result
|
def fisher_exact(fo, alpha=0.05):
'fisher_exact 显著性检验函数\n 此处采用的是调用R的解决方案,需要安装包 pyper\n python解决方案参见\n https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/\n 但还有些问题,所以没用.\n '
import pyper as pr
r = pr.R(use_pandas=True, use_numpy=True)
r.assign('fo', fo)
r('b<-fisher.test(fo)')
pdata = r['b']
p_value = pdata['p.value']
if (p_value < alpha):
result = 1
else:
result = 0
return (result, p_value)
| 8,313,948,059,721,519,000
|
fisher_exact 显著性检验函数
此处采用的是调用R的解决方案,需要安装包 pyper
python解决方案参见
https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/
但还有些问题,所以没用.
|
reportgen/questionnaire/questionnaire.py
|
fisher_exact
|
brightgeng/reportgen
|
python
|
def fisher_exact(fo, alpha=0.05):
'fisher_exact 显著性检验函数\n 此处采用的是调用R的解决方案,需要安装包 pyper\n python解决方案参见\n https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/\n 但还有些问题,所以没用.\n '
import pyper as pr
r = pr.R(use_pandas=True, use_numpy=True)
r.assign('fo', fo)
r('b<-fisher.test(fo)')
pdata = r['b']
p_value = pdata['p.value']
if (p_value < alpha):
result = 1
else:
result = 0
return (result, p_value)
|
def anova(data, formula):
'方差分析\n 输入\n --data: DataFrame格式,包含数值型变量和分类型变量\n --formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]\n\n 返回[方差分析表]\n [总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]\n --df:自由度\n --sum_sq:误差平方和\n --mean_sq:误差平方和/对应的自由度\n --F:mean_sq之比\n --PR(>F):p值,比如<0.05则代表有显著性差异\n '
import statsmodels.api as sm
from statsmodels.formula.api import ols
cw_lm = ols(formula, data=data).fit()
r = sm.stats.anova_lm(cw_lm)
return r
| 3,996,825,466,765,998,600
|
方差分析
输入
--data: DataFrame格式,包含数值型变量和分类型变量
--formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]
返回[方差分析表]
[总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]
--df:自由度
--sum_sq:误差平方和
--mean_sq:误差平方和/对应的自由度
--F:mean_sq之比
--PR(>F):p值,比如<0.05则代表有显著性差异
|
reportgen/questionnaire/questionnaire.py
|
anova
|
brightgeng/reportgen
|
python
|
def anova(data, formula):
'方差分析\n 输入\n --data: DataFrame格式,包含数值型变量和分类型变量\n --formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]\n\n 返回[方差分析表]\n [总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]\n --df:自由度\n --sum_sq:误差平方和\n --mean_sq:误差平方和/对应的自由度\n --F:mean_sq之比\n --PR(>F):p值,比如<0.05则代表有显著性差异\n '
import statsmodels.api as sm
from statsmodels.formula.api import ols
cw_lm = ols(formula, data=data).fit()
r = sm.stats.anova_lm(cw_lm)
return r
|
def mca(X, N=2):
"对应分析函数,暂时支持双因素\n X:观察频数表\n N:返回的维数,默认2维\n 可以通过scatter函数绘制:\n fig=scatter([pr,pc])\n fig.savefig('mca.png')\n "
from scipy.linalg import diagsvd
S = X.sum().sum()
Z = (X / S)
r = Z.sum(axis=1)
c = Z.sum()
D_r = np.diag((1 / np.sqrt(r)))
Z_c = (Z - np.outer(r, c))
D_c = np.diag((1 / np.sqrt(c)))
(P, s, Q) = np.linalg.svd(np.dot(np.dot(D_r, Z_c), D_c))
pr = np.dot(np.dot(D_r, P), diagsvd(s[:N], P.shape[0], N))
pc = np.dot(np.dot(D_c, Q.T), diagsvd(s[:N], Q.shape[0], N))
inertia = (np.cumsum((s ** 2)) / np.sum((s ** 2)))
inertia = inertia.tolist()
if isinstance(X, pd.DataFrame):
pr = pd.DataFrame(pr, index=X.index, columns=list('XYZUVW')[:N])
pc = pd.DataFrame(pc, index=X.columns, columns=list('XYZUVW')[:N])
return (pr, pc, inertia)
"\n w=pd.ExcelWriter(u'mca_.xlsx')\n pr.to_excel(w,startrow=0,index_label=True)\n pc.to_excel(w,startrow=len(pr)+2,index_label=True)\n w.save()\n "
| 8,680,992,238,391,971,000
|
对应分析函数,暂时支持双因素
X:观察频数表
N:返回的维数,默认2维
可以通过scatter函数绘制:
fig=scatter([pr,pc])
fig.savefig('mca.png')
|
reportgen/questionnaire/questionnaire.py
|
mca
|
brightgeng/reportgen
|
python
|
def mca(X, N=2):
"对应分析函数,暂时支持双因素\n X:观察频数表\n N:返回的维数,默认2维\n 可以通过scatter函数绘制:\n fig=scatter([pr,pc])\n fig.savefig('mca.png')\n "
from scipy.linalg import diagsvd
S = X.sum().sum()
Z = (X / S)
r = Z.sum(axis=1)
c = Z.sum()
D_r = np.diag((1 / np.sqrt(r)))
Z_c = (Z - np.outer(r, c))
D_c = np.diag((1 / np.sqrt(c)))
(P, s, Q) = np.linalg.svd(np.dot(np.dot(D_r, Z_c), D_c))
pr = np.dot(np.dot(D_r, P), diagsvd(s[:N], P.shape[0], N))
pc = np.dot(np.dot(D_c, Q.T), diagsvd(s[:N], Q.shape[0], N))
inertia = (np.cumsum((s ** 2)) / np.sum((s ** 2)))
inertia = inertia.tolist()
if isinstance(X, pd.DataFrame):
pr = pd.DataFrame(pr, index=X.index, columns=list('XYZUVW')[:N])
pc = pd.DataFrame(pc, index=X.columns, columns=list('XYZUVW')[:N])
return (pr, pc, inertia)
"\n w=pd.ExcelWriter(u'mca_.xlsx')\n pr.to_excel(w,startrow=0,index_label=True)\n pc.to_excel(w,startrow=len(pr)+2,index_label=True)\n w.save()\n "
|
def cluster(data, code, cluster_qq, n_clusters='auto', max_clusters=7):
'对态度题进行聚类\n '
from sklearn.cluster import KMeans
from sklearn import metrics
qq_max = sorted(code, key=(lambda x: int(re.findall('\\d+', x)[0])))[(- 1)]
new_cluster = 'Q{}'.format((int(re.findall('\\d+', qq_max)[0]) + 1))
qlist = code[cluster_qq]['qlist']
X = data[qlist]
std_t = (min((1.41 / np.sqrt(len(qlist))), 0.4) if (len(qlist) >= 8) else 0.1)
X = X[(X.T.std() > std_t)]
index_bk = X.index
X.fillna(0, inplace=True)
X1 = X.T
X1 = ((X1 - X1.mean()) / X1.std())
X1 = X1.T.as_matrix()
if (n_clusters == 'auto'):
silhouette_score = []
SSE_score = []
klist = np.arange(2, 15)
for k in klist:
est = KMeans(k)
est.fit(X1)
tmp = np.sum(((X1 - est.cluster_centers_[est.labels_]) ** 2))
SSE_score.append(tmp)
tmp = metrics.silhouette_score(X1, est.labels_)
silhouette_score.append(tmp)
'\n fig = plt.figure(1)\n ax = fig.add_subplot(111)\n fig = plt.figure(2)\n ax.plot(klist,np.array(silhouette_score))\n ax = fig.add_subplot(111)\n ax.plot(klist,np.array(SSE_score))\n '
ss = np.array(silhouette_score)
t1 = ([False] + list((ss[1:] > ss[:(- 1)])))
t2 = (list((ss[:(- 1)] > ss[1:])) + [False])
k_log = [(t1[i] & t2[i]) for i in range(len(t1))]
if (True in k_log):
k = k_log.index(True)
else:
k = 1
k = (k if (k <= (max_clusters - 2)) else (max_clusters - 2))
k_best = klist[k]
else:
k_best = n_clusters
est = KMeans(k_best)
est.fit(X1)
SSE = np.sqrt((np.sum(((X1 - est.cluster_centers_[est.labels_]) ** 2)) / len(X1)))
silhouette_score = metrics.silhouette_score(X1, est.labels_)
print('有效样本数:{},特征数:{},最佳分类个数:{} 类'.format(len(X1), len(qlist), k_best))
print('SSE(样本到所在类的质心的距离)为:{:.2f},轮廊系数为: {:.2f}'.format(SSE, silhouette_score))
"\n X_PCA = PCA(2).fit_transform(X1)\n kwargs = dict(cmap = plt.cm.get_cmap('rainbow', 10),\n edgecolor='none', alpha=0.6)\n labels=pd.Series(est.labels_)\n plt.figure()\n plt.scatter(X_PCA[:, 0], X_PCA[:, 1], c=labels, **kwargs)\n "
"\n # 三维立体图\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X_PCA[:, 0], X_PCA[:, 1],X_PCA[:, 2], c=labels, **kwargs)\n "
parameters = {'methods': 'kmeans', 'inertia': est.inertia_, 'SSE': SSE, 'silhouette': silhouette_score, 'n_clusters': k_best, 'n_features': len(qlist), 'n_samples': len(X1), 'qnum': new_cluster, 'data': X1, 'labels': est.labels_}
data[new_cluster] = pd.Series(est.labels_, index=index_bk)
code[new_cluster] = {'content': '态度题聚类结果', 'qtype': '单选题', 'qlist': [new_cluster], 'code': dict(zip(range(k_best), ['cluster{}'.format((i + 1)) for i in range(k_best)]))}
print('结果已经存进数据, 题号为:{}'.format(new_cluster))
return (data, code, parameters)
"\n # 对应分析\n t=data.groupby([new_cluster])[code[cluster_qq]['qlist']].mean()\n t.columns=['R{}'.format(i+1) for i in range(len(code[cluster_qq]['qlist']))]\n t=t.rename(index=code[new_cluster]['code'])\n ca=prince.CA(t)\n ca.plot_rows_columns(show_row_labels=True,show_column_labels=True)\n "
| 1,234,331,575,149,067,300
|
对态度题进行聚类
|
reportgen/questionnaire/questionnaire.py
|
cluster
|
brightgeng/reportgen
|
python
|
def cluster(data, code, cluster_qq, n_clusters='auto', max_clusters=7):
'\n '
from sklearn.cluster import KMeans
from sklearn import metrics
qq_max = sorted(code, key=(lambda x: int(re.findall('\\d+', x)[0])))[(- 1)]
new_cluster = 'Q{}'.format((int(re.findall('\\d+', qq_max)[0]) + 1))
qlist = code[cluster_qq]['qlist']
X = data[qlist]
std_t = (min((1.41 / np.sqrt(len(qlist))), 0.4) if (len(qlist) >= 8) else 0.1)
X = X[(X.T.std() > std_t)]
index_bk = X.index
X.fillna(0, inplace=True)
X1 = X.T
X1 = ((X1 - X1.mean()) / X1.std())
X1 = X1.T.as_matrix()
if (n_clusters == 'auto'):
silhouette_score = []
SSE_score = []
klist = np.arange(2, 15)
for k in klist:
est = KMeans(k)
est.fit(X1)
tmp = np.sum(((X1 - est.cluster_centers_[est.labels_]) ** 2))
SSE_score.append(tmp)
tmp = metrics.silhouette_score(X1, est.labels_)
silhouette_score.append(tmp)
'\n fig = plt.figure(1)\n ax = fig.add_subplot(111)\n fig = plt.figure(2)\n ax.plot(klist,np.array(silhouette_score))\n ax = fig.add_subplot(111)\n ax.plot(klist,np.array(SSE_score))\n '
ss = np.array(silhouette_score)
t1 = ([False] + list((ss[1:] > ss[:(- 1)])))
t2 = (list((ss[:(- 1)] > ss[1:])) + [False])
k_log = [(t1[i] & t2[i]) for i in range(len(t1))]
if (True in k_log):
k = k_log.index(True)
else:
k = 1
k = (k if (k <= (max_clusters - 2)) else (max_clusters - 2))
k_best = klist[k]
else:
k_best = n_clusters
est = KMeans(k_best)
est.fit(X1)
SSE = np.sqrt((np.sum(((X1 - est.cluster_centers_[est.labels_]) ** 2)) / len(X1)))
silhouette_score = metrics.silhouette_score(X1, est.labels_)
print('有效样本数:{},特征数:{},最佳分类个数:{} 类'.format(len(X1), len(qlist), k_best))
print('SSE(样本到所在类的质心的距离)为:{:.2f},轮廊系数为: {:.2f}'.format(SSE, silhouette_score))
"\n X_PCA = PCA(2).fit_transform(X1)\n kwargs = dict(cmap = plt.cm.get_cmap('rainbow', 10),\n edgecolor='none', alpha=0.6)\n labels=pd.Series(est.labels_)\n plt.figure()\n plt.scatter(X_PCA[:, 0], X_PCA[:, 1], c=labels, **kwargs)\n "
"\n # 三维立体图\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X_PCA[:, 0], X_PCA[:, 1],X_PCA[:, 2], c=labels, **kwargs)\n "
parameters = {'methods': 'kmeans', 'inertia': est.inertia_, 'SSE': SSE, 'silhouette': silhouette_score, 'n_clusters': k_best, 'n_features': len(qlist), 'n_samples': len(X1), 'qnum': new_cluster, 'data': X1, 'labels': est.labels_}
data[new_cluster] = pd.Series(est.labels_, index=index_bk)
code[new_cluster] = {'content': '态度题聚类结果', 'qtype': '单选题', 'qlist': [new_cluster], 'code': dict(zip(range(k_best), ['cluster{}'.format((i + 1)) for i in range(k_best)]))}
print('结果已经存进数据, 题号为:{}'.format(new_cluster))
return (data, code, parameters)
"\n # 对应分析\n t=data.groupby([new_cluster])[code[cluster_qq]['qlist']].mean()\n t.columns=['R{}'.format(i+1) for i in range(len(code[cluster_qq]['qlist']))]\n t=t.rename(index=code[new_cluster]['code'])\n ca=prince.CA(t)\n ca.plot_rows_columns(show_row_labels=True,show_column_labels=True)\n "
|
def scatter(data, legend=False, title=None, font_ch=None, find_path=None):
'\n 绘制带数据标签的散点图\n '
import matplotlib.font_manager as fm
if (font_ch is None):
fontlist = ['calibri.ttf', 'simfang.ttf', 'simkai.ttf', 'simhei.ttf', 'simsun.ttc', 'msyh.ttf', 'msyh.ttc']
myfont = ''
if (not find_path):
find_paths = ['C:\\Windows\\Fonts', '']
for find_path in find_paths:
for f in fontlist:
if os.path.exists(os.path.join(find_path, f)):
myfont = os.path.join(find_path, f)
if (len(myfont) == 0):
print('没有找到合适的中文字体绘图,请检查.')
myfont = None
else:
myfont = fm.FontProperties(fname=myfont)
else:
myfont = fm.FontProperties(fname=font_ch)
(fig, ax) = plt.subplots()
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.axhline(y=0, linestyle='-', linewidth=1.2, alpha=0.6)
ax.axvline(x=0, linestyle='-', linewidth=1.2, alpha=0.6)
color = ['blue', 'red', 'green', 'dark']
if (not isinstance(data, list)):
data = [data]
for (i, dd) in enumerate(data):
ax.scatter(dd.iloc[:, 0], dd.iloc[:, 1], c=color[i], s=50, label=dd.columns[1])
for (_, row) in dd.iterrows():
ax.annotate(row.name, (row.iloc[0], row.iloc[1]), color=color[i], fontproperties=myfont, fontsize=10)
ax.axis('equal')
if legend:
ax.legend(loc='best')
if title:
ax.set_title(title, fontproperties=myfont)
return fig
| -3,198,626,937,060,132,000
|
绘制带数据标签的散点图
|
reportgen/questionnaire/questionnaire.py
|
scatter
|
brightgeng/reportgen
|
python
|
def scatter(data, legend=False, title=None, font_ch=None, find_path=None):
'\n \n '
import matplotlib.font_manager as fm
if (font_ch is None):
fontlist = ['calibri.ttf', 'simfang.ttf', 'simkai.ttf', 'simhei.ttf', 'simsun.ttc', 'msyh.ttf', 'msyh.ttc']
myfont =
if (not find_path):
find_paths = ['C:\\Windows\\Fonts', ]
for find_path in find_paths:
for f in fontlist:
if os.path.exists(os.path.join(find_path, f)):
myfont = os.path.join(find_path, f)
if (len(myfont) == 0):
print('没有找到合适的中文字体绘图,请检查.')
myfont = None
else:
myfont = fm.FontProperties(fname=myfont)
else:
myfont = fm.FontProperties(fname=font_ch)
(fig, ax) = plt.subplots()
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.axhline(y=0, linestyle='-', linewidth=1.2, alpha=0.6)
ax.axvline(x=0, linestyle='-', linewidth=1.2, alpha=0.6)
color = ['blue', 'red', 'green', 'dark']
if (not isinstance(data, list)):
data = [data]
for (i, dd) in enumerate(data):
ax.scatter(dd.iloc[:, 0], dd.iloc[:, 1], c=color[i], s=50, label=dd.columns[1])
for (_, row) in dd.iterrows():
ax.annotate(row.name, (row.iloc[0], row.iloc[1]), color=color[i], fontproperties=myfont, fontsize=10)
ax.axis('equal')
if legend:
ax.legend(loc='best')
if title:
ax.set_title(title, fontproperties=myfont)
return fig
|
def sankey(df, filename=None):
'SanKey图绘制\n df的列是左节点,行是右节点\n 注:暂时没找到好的Python方法,所以只生成R语言所需数据\n 返回links 和 nodes\n # R code 参考\n library(networkD3)\n dd=read.csv(\'price_links.csv\')\n links<-data.frame(source=dd$from,target=dd$to,value=dd$value)\n nodes=read.csv(\'price_nodes.csv\',encoding = \'UTF-8\')\n nodes<-nodes[\'name\']\n Energy=c(links=links,nodes=nodes)\n sankeyNetwork(Links = links, Nodes = nodes, Source = "source",\n Target = "target", Value = "value", NodeID = "name",\n units = "TWh",fontSize = 20,fontFamily=\'微软雅黑\',nodeWidth=20)\n '
nodes = ['Total']
nodes = ((nodes + list(df.columns)) + list(df.index))
nodes = pd.DataFrame(nodes)
nodes['id'] = range(len(nodes))
nodes.columns = ['name', 'id']
(R, C) = df.shape
t1 = pd.DataFrame(df.as_matrix(), columns=range(1, (C + 1)), index=range((C + 1), ((R + C) + 1)))
t1.index.name = 'to'
t1.columns.name = 'from'
links = t1.unstack().reset_index(name='value')
links0 = pd.DataFrame({'from': ([0] * C), 'to': range(1, (C + 1)), 'value': list(df.sum())})
links = links0.append(links)
if filename:
links.to_csv((filename + '_links.csv'), index=False, encoding='utf-8')
nodes.to_csv((filename + '_nodes.csv'), index=False, encoding='utf-8')
return (links, nodes)
| -9,141,936,434,518,142,000
|
SanKey图绘制
df的列是左节点,行是右节点
注:暂时没找到好的Python方法,所以只生成R语言所需数据
返回links 和 nodes
# R code 参考
library(networkD3)
dd=read.csv('price_links.csv')
links<-data.frame(source=dd$from,target=dd$to,value=dd$value)
nodes=read.csv('price_nodes.csv',encoding = 'UTF-8')
nodes<-nodes['name']
Energy=c(links=links,nodes=nodes)
sankeyNetwork(Links = links, Nodes = nodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
units = "TWh",fontSize = 20,fontFamily='微软雅黑',nodeWidth=20)
|
reportgen/questionnaire/questionnaire.py
|
sankey
|
brightgeng/reportgen
|
python
|
def sankey(df, filename=None):
'SanKey图绘制\n df的列是左节点,行是右节点\n 注:暂时没找到好的Python方法,所以只生成R语言所需数据\n 返回links 和 nodes\n # R code 参考\n library(networkD3)\n dd=read.csv(\'price_links.csv\')\n links<-data.frame(source=dd$from,target=dd$to,value=dd$value)\n nodes=read.csv(\'price_nodes.csv\',encoding = \'UTF-8\')\n nodes<-nodes[\'name\']\n Energy=c(links=links,nodes=nodes)\n sankeyNetwork(Links = links, Nodes = nodes, Source = "source",\n Target = "target", Value = "value", NodeID = "name",\n units = "TWh",fontSize = 20,fontFamily=\'微软雅黑\',nodeWidth=20)\n '
nodes = ['Total']
nodes = ((nodes + list(df.columns)) + list(df.index))
nodes = pd.DataFrame(nodes)
nodes['id'] = range(len(nodes))
nodes.columns = ['name', 'id']
(R, C) = df.shape
t1 = pd.DataFrame(df.as_matrix(), columns=range(1, (C + 1)), index=range((C + 1), ((R + C) + 1)))
t1.index.name = 'to'
t1.columns.name = 'from'
links = t1.unstack().reset_index(name='value')
links0 = pd.DataFrame({'from': ([0] * C), 'to': range(1, (C + 1)), 'value': list(df.sum())})
links = links0.append(links)
if filename:
links.to_csv((filename + '_links.csv'), index=False, encoding='utf-8')
nodes.to_csv((filename + '_nodes.csv'), index=False, encoding='utf-8')
return (links, nodes)
|
def table(data, code, total=True):
"\n 单个题目描述统计\n code是data的编码,列数大于1\n 返回字典格式数据:\n 'fop':百分比, 对于单选题和为1,多选题分母为样本数\n 'fo': 观察频数表,其中添加了合计项\n 'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有\n "
qtype = code['qtype']
index = code['qlist']
data = pd.DataFrame(data)
sample_len = data[code['qlist']].notnull().T.any().sum()
result = {}
if (qtype == u'单选题'):
fo = data.iloc[:, 0].value_counts()
if ('weight' in code):
w = pd.Series(code['weight'])
fo1 = fo[w.index][fo[w.index].notnull()]
fw = ((fo1 * w).sum() / fo1.sum())
result['fw'] = fw
fo.sort_values(ascending=False, inplace=True)
fop = fo.copy()
fop = ((fop / fop.sum()) * 1.0)
fop[u'合计'] = fop.sum()
fo[u'合计'] = fo.sum()
if ('code' in code):
fop.rename(index=code['code'], inplace=True)
fo.rename(index=code['code'], inplace=True)
fop.name = u'占比'
fo.name = u'频数'
fop = pd.DataFrame(fop)
fo = pd.DataFrame(fo)
result['fo'] = fo
result['fop'] = fop
elif (qtype == u'多选题'):
fo = data.sum()
fo.sort_values(ascending=False, inplace=True)
fo[u'合计'] = fo.sum()
if ('code' in code):
fo.rename(index=code['code'], inplace=True)
fop = fo.copy()
fop = (fop / sample_len)
fop.name = u'占比'
fo.name = u'频数'
fop = pd.DataFrame(fop)
fo = pd.DataFrame(fo)
result['fop'] = fop
result['fo'] = fo
elif (qtype == u'矩阵单选题'):
fo = pd.DataFrame(columns=code['qlist'], index=sorted(code['code']))
for i in fo.columns:
fo.loc[:, i] = data[i].value_counts()
if ('weight' not in code):
code['weight'] = dict(zip(code['code'].keys(), code['code'].keys()))
fw = pd.DataFrame(columns=[u'加权'], index=code['qlist'])
w = pd.Series(code['weight'])
for c in fo.columns:
t = fo[c]
t = t[w.index][t[w.index].notnull()]
if (t.sum() > 1e-17):
fw.loc[(c, u'加权')] = ((t * w).sum() / t.sum())
else:
fw.loc[(c, u'加权')] = 0
fw.rename(index=code['code_r'], inplace=True)
result['fw'] = fw
result['weight'] = ','.join(['{}:{}'.format(code['code'][c], code['weight'][c]) for c in code['code']])
fo.rename(columns=code['code_r'], index=code['code'], inplace=True)
fop = fo.copy()
fop = (fop / sample_len)
result['fop'] = fop
result['fo'] = fo
elif (qtype == u'排序题'):
topn = data[index].fillna(0).max().max()
topn = int(topn)
qsort = dict(zip([(i + 1) for i in range(topn)], [((((topn - i) * 2.0) / (topn + 1)) / topn) for i in range(topn)]))
top1 = data.applymap((lambda x: int((x == 1))))
data_weight = data.replace(qsort)
t1 = pd.DataFrame()
t1['TOP1'] = top1.sum()
t1[u'综合'] = data_weight.sum()
t1.sort_values(by=u'综合', ascending=False, inplace=True)
t1.rename(index=code['code'], inplace=True)
t = t1.copy()
t = (t / sample_len)
result['fop'] = t
result['fo'] = t1
t_topn = pd.DataFrame()
for i in range(topn):
t_topn[('TOP%d' % (i + 1))] = data.applymap((lambda x: int((x == (i + 1))))).sum()
t_topn.sort_values(by=u'TOP1', ascending=False, inplace=True)
if ('code' in code):
t_topn.rename(index=code['code'], inplace=True)
result['TOPN_fo'] = t_topn
result['TOPN'] = (t_topn / sample_len)
result['weight'] = '+'.join(['TOP{}*{:.2f}'.format((i + 1), ((((topn - i) * 2.0) / (topn + 1)) / topn)) for i in range(topn)])
else:
result['fop'] = None
result['fo'] = None
if ((not total) and (not (result['fo'] is None)) and (u'合计' in result['fo'].index)):
result['fo'].drop([u'合计'], axis=0, inplace=True)
result['fop'].drop([u'合计'], axis=0, inplace=True)
if ((not (result['fo'] is None)) and ('code_order' in code)):
code_order = [q for q in code['code_order'] if (q in result['fo'].index)]
if (u'合计' in result['fo'].index):
code_order = (code_order + [u'合计'])
result['fo'] = pd.DataFrame(result['fo'], index=code_order)
result['fop'] = pd.DataFrame(result['fop'], index=code_order)
return result
| -7,405,818,918,458,051,000
|
单个题目描述统计
code是data的编码,列数大于1
返回字典格式数据:
'fop':百分比, 对于单选题和为1,多选题分母为样本数
'fo': 观察频数表,其中添加了合计项
'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有
|
reportgen/questionnaire/questionnaire.py
|
table
|
brightgeng/reportgen
|
python
|
def table(data, code, total=True):
"\n 单个题目描述统计\n code是data的编码,列数大于1\n 返回字典格式数据:\n 'fop':百分比, 对于单选题和为1,多选题分母为样本数\n 'fo': 观察频数表,其中添加了合计项\n 'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有\n "
qtype = code['qtype']
index = code['qlist']
data = pd.DataFrame(data)
sample_len = data[code['qlist']].notnull().T.any().sum()
result = {}
if (qtype == u'单选题'):
fo = data.iloc[:, 0].value_counts()
if ('weight' in code):
w = pd.Series(code['weight'])
fo1 = fo[w.index][fo[w.index].notnull()]
fw = ((fo1 * w).sum() / fo1.sum())
result['fw'] = fw
fo.sort_values(ascending=False, inplace=True)
fop = fo.copy()
fop = ((fop / fop.sum()) * 1.0)
fop[u'合计'] = fop.sum()
fo[u'合计'] = fo.sum()
if ('code' in code):
fop.rename(index=code['code'], inplace=True)
fo.rename(index=code['code'], inplace=True)
fop.name = u'占比'
fo.name = u'频数'
fop = pd.DataFrame(fop)
fo = pd.DataFrame(fo)
result['fo'] = fo
result['fop'] = fop
elif (qtype == u'多选题'):
fo = data.sum()
fo.sort_values(ascending=False, inplace=True)
fo[u'合计'] = fo.sum()
if ('code' in code):
fo.rename(index=code['code'], inplace=True)
fop = fo.copy()
fop = (fop / sample_len)
fop.name = u'占比'
fo.name = u'频数'
fop = pd.DataFrame(fop)
fo = pd.DataFrame(fo)
result['fop'] = fop
result['fo'] = fo
elif (qtype == u'矩阵单选题'):
fo = pd.DataFrame(columns=code['qlist'], index=sorted(code['code']))
for i in fo.columns:
fo.loc[:, i] = data[i].value_counts()
if ('weight' not in code):
code['weight'] = dict(zip(code['code'].keys(), code['code'].keys()))
fw = pd.DataFrame(columns=[u'加权'], index=code['qlist'])
w = pd.Series(code['weight'])
for c in fo.columns:
t = fo[c]
t = t[w.index][t[w.index].notnull()]
if (t.sum() > 1e-17):
fw.loc[(c, u'加权')] = ((t * w).sum() / t.sum())
else:
fw.loc[(c, u'加权')] = 0
fw.rename(index=code['code_r'], inplace=True)
result['fw'] = fw
result['weight'] = ','.join(['{}:{}'.format(code['code'][c], code['weight'][c]) for c in code['code']])
fo.rename(columns=code['code_r'], index=code['code'], inplace=True)
fop = fo.copy()
fop = (fop / sample_len)
result['fop'] = fop
result['fo'] = fo
elif (qtype == u'排序题'):
topn = data[index].fillna(0).max().max()
topn = int(topn)
qsort = dict(zip([(i + 1) for i in range(topn)], [((((topn - i) * 2.0) / (topn + 1)) / topn) for i in range(topn)]))
top1 = data.applymap((lambda x: int((x == 1))))
data_weight = data.replace(qsort)
t1 = pd.DataFrame()
t1['TOP1'] = top1.sum()
t1[u'综合'] = data_weight.sum()
t1.sort_values(by=u'综合', ascending=False, inplace=True)
t1.rename(index=code['code'], inplace=True)
t = t1.copy()
t = (t / sample_len)
result['fop'] = t
result['fo'] = t1
t_topn = pd.DataFrame()
for i in range(topn):
t_topn[('TOP%d' % (i + 1))] = data.applymap((lambda x: int((x == (i + 1))))).sum()
t_topn.sort_values(by=u'TOP1', ascending=False, inplace=True)
if ('code' in code):
t_topn.rename(index=code['code'], inplace=True)
result['TOPN_fo'] = t_topn
result['TOPN'] = (t_topn / sample_len)
result['weight'] = '+'.join(['TOP{}*{:.2f}'.format((i + 1), ((((topn - i) * 2.0) / (topn + 1)) / topn)) for i in range(topn)])
else:
result['fop'] = None
result['fo'] = None
if ((not total) and (not (result['fo'] is None)) and (u'合计' in result['fo'].index)):
result['fo'].drop([u'合计'], axis=0, inplace=True)
result['fop'].drop([u'合计'], axis=0, inplace=True)
if ((not (result['fo'] is None)) and ('code_order' in code)):
code_order = [q for q in code['code_order'] if (q in result['fo'].index)]
if (u'合计' in result['fo'].index):
code_order = (code_order + [u'合计'])
result['fo'] = pd.DataFrame(result['fo'], index=code_order)
result['fop'] = pd.DataFrame(result['fop'], index=code_order)
return result
|
def crosstab(data_index, data_column, code_index=None, code_column=None, qtype=None, total=True):
"适用于问卷数据的交叉统计\n 输入参数:\n data_index: 因变量,放在行中\n data_column:自变量,放在列中\n code_index: dict格式,指定data_index的编码等信息\n code_column: dict格式,指定data_column的编码等信息\n qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个的\n 返回字典格式数据\n 'fop':默认的百分比表,行是data_index,列是data_column\n 'fo':原始频数表,且添加了总体项\n 'fw': 加权平均值\n\n 简要说明:\n 因为要处理各类题型,这里将单选题处理为多选题\n\n fo:观察频数表\n nij是同时选择了Ri和Cj的频数\n 总体的频数是选择了Ri的频数,与所在行的总和无关\n 行变量\\列变量 C1 |C2 | C3| C4|总体\n R1| n11|n12|n13|n14|n1:\n R2| n21|n22|n23|n23|n2:\n R3| n31|n32|n33|n34|n3:\n fop: 观察百分比表(列变量)\n 这里比较难处理,data_column各个类别的样本量和总体的样本量不一样,各类别的样本量为同时\n 选择了行变量和列类别的频数。而总体的样本量为选择了行变量的频数\n fw: 加权平均值\n 如果data_index的编码code含有weight字段,则我们会输出分组的加权平均值\n\n\n "
data_index = pd.DataFrame(data_index)
data_column = pd.DataFrame(data_column)
if (data_index.shape[1] == 1):
qtype1 = u'单选题'
else:
qtype1 = u'多选题'
if (data_column.shape[1] == 1):
qtype2 = u'单选题'
else:
qtype2 = u'多选题'
if code_index:
qtype1 = code_index['qtype']
if (qtype1 == u'单选题'):
data_index.replace(code_index['code'], inplace=True)
elif (qtype1 in [u'多选题', u'排序题']):
data_index.rename(columns=code_index['code'], inplace=True)
elif (qtype1 == u'矩阵单选题'):
data_index.rename(columns=code_index['code_r'], inplace=True)
if code_column:
qtype2 = code_column['qtype']
if (qtype2 == u'单选题'):
data_column.replace(code_column['code'], inplace=True)
elif (qtype2 in [u'多选题', u'排序题']):
data_column.rename(columns=code_column['code'], inplace=True)
elif (qtype2 == u'矩阵单选题'):
data_column.rename(columns=code_column['code_r'], inplace=True)
if qtype:
if (isinstance(qtype, list) and (len(qtype) == 2)):
qtype1 = qtype[0]
qtype2 = qtype[1]
elif isinstance(qtype, str):
qtype1 = qtype
if (qtype1 == u'单选题'):
data_index = sa_to_ma(data_index)
qtype1 = u'多选题'
if (qtype2 == u'单选题'):
data_column = sa_to_ma(data_column)
qtype2 = u'多选题'
index_list = list(data_index.columns)
columns_list = list(data_column.columns)
column_freq = data_column.iloc[list(data_index.notnull().T.any()), :].sum()
column_freq[u'总体'] = data_index.notnull().T.any().sum()
R = len(index_list)
C = len(columns_list)
result = {}
result['sample_size'] = column_freq
if ((qtype1 == u'多选题') and (qtype2 == u'多选题')):
data_index.fillna(0, inplace=True)
t = pd.DataFrame(np.dot(data_index.fillna(0).T, data_column.fillna(0)))
t.rename(index=dict(zip(range(R), index_list)), columns=dict(zip(range(C), columns_list)), inplace=True)
if (code_index and ('weight' in code_index)):
w = pd.Series(code_index['weight'])
w.rename(index=code_index['code'], inplace=True)
fw = pd.DataFrame(columns=[u'加权'], index=t.columns)
for c in t.columns:
tmp = t[c]
tmp = tmp[w.index][tmp[w.index].notnull()]
if (abs(tmp.sum()) > 0):
fw.loc[(c, u'加权')] = ((tmp * w).sum() / tmp.sum())
else:
fw.loc[(c, u'加权')] = 0
fo1 = data_index.sum()[w.index][data_index.sum()[w.index].notnull()]
if (abs(fo1.sum()) > 0):
fw.loc[(u'总体', u'加权')] = ((fo1 * w).sum() / fo1.sum())
else:
fw.loc[(u'总体', u'加权')] = 0
result['fw'] = fw
t[u'总体'] = data_index.sum()
t.sort_values([u'总体'], ascending=False, inplace=True)
t1 = t.copy()
for i in t.columns:
if (column_freq[i] != 0):
t.loc[:, i] = (t.loc[:, i] / column_freq[i])
result['fop'] = t
result['fo'] = t1
elif ((qtype1 == u'矩阵单选题') and (qtype2 == u'多选题')):
if (code_index and ('weight' in code_index)):
data_index.replace(code_index['weight'], inplace=True)
t = pd.DataFrame(np.dot(data_index.fillna(0).T, data_column.fillna(0)))
t = pd.DataFrame(np.dot(t, np.diag((1 / data_column.sum()))))
t.rename(index=dict(zip(range(R), index_list)), columns=dict(zip(range(C), columns_list)), inplace=True)
t[u'总体'] = data_index.mean()
t.sort_values([u'总体'], ascending=False, inplace=True)
t1 = t.copy()
result['fop'] = t
result['fo'] = t1
elif ((qtype1 == u'排序题') and (qtype2 == u'多选题')):
topn = int(data_index.max().max())
qsort = dict(zip([(i + 1) for i in range(topn)], [((((topn - i) * 2.0) / (topn + 1)) / topn) for i in range(topn)]))
data_index_zh = data_index.replace(qsort)
t = pd.DataFrame(np.dot(data_index_zh.fillna(0).T, data_column.fillna(0)))
t.rename(index=dict(zip(range(R), index_list)), columns=dict(zip(range(C), columns_list)), inplace=True)
t[u'总体'] = data_index_zh.sum()
t.sort_values([u'总体'], ascending=False, inplace=True)
t1 = t.copy()
for i in t.columns:
if (column_freq[i] != 0):
t.loc[:, i] = (t.loc[:, i] / column_freq[i])
result['fop'] = t
result['fo'] = t1
data_index_top1 = data_index.applymap((lambda x: int((x == 1))))
top1 = pd.DataFrame(np.dot(data_index_top1.fillna(0).T, data_column.fillna(0)))
top1.rename(index=dict(zip(range(R), index_list)), columns=dict(zip(range(C), columns_list)), inplace=True)
top1[u'总体'] = data_index_top1.fillna(0).sum()
top1.sort_values([u'总体'], ascending=False, inplace=True)
for i in top1.columns:
if (column_freq[i] != 0):
top1.loc[:, i] = (top1.loc[:, i] / column_freq[i])
result['TOP1'] = top1
else:
result['fop'] = None
result['fo'] = None
if ((not total) and (not (result['fo'] is None)) and ('总体' in result['fo'].columns)):
result['fo'].drop(['总体'], axis=1, inplace=True)
result['fop'].drop(['总体'], axis=1, inplace=True)
if ((not (result['fo'] is None)) and code_index and ('code_order' in code_index) and (qtype1 != '矩阵单选题')):
code_order = code_index['code_order']
code_order = [q for q in code_order if (q in result['fo'].index)]
if (u'总体' in result['fo'].index):
code_order = (code_order + [u'总体'])
result['fo'] = pd.DataFrame(result['fo'], index=code_order)
result['fop'] = pd.DataFrame(result['fop'], index=code_order)
if ((not (result['fo'] is None)) and code_column and ('code_order' in code_column) and (qtype2 != '矩阵单选题')):
code_order = code_column['code_order']
code_order = [q for q in code_order if (q in result['fo'].columns)]
if (u'总体' in result['fo'].columns):
code_order = (code_order + [u'总体'])
result['fo'] = pd.DataFrame(result['fo'], columns=code_order)
result['fop'] = pd.DataFrame(result['fop'], columns=code_order)
return result
| -7,027,066,733,633,316,000
|
适用于问卷数据的交叉统计
输入参数:
data_index: 因变量,放在行中
data_column:自变量,放在列中
code_index: dict格式,指定data_index的编码等信息
code_column: dict格式,指定data_column的编码等信息
qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个的
返回字典格式数据
'fop':默认的百分比表,行是data_index,列是data_column
'fo':原始频数表,且添加了总体项
'fw': 加权平均值
简要说明:
因为要处理各类题型,这里将单选题处理为多选题
fo:观察频数表
nij是同时选择了Ri和Cj的频数
总体的频数是选择了Ri的频数,与所在行的总和无关
行变量\列变量 C1 |C2 | C3| C4|总体
R1| n11|n12|n13|n14|n1:
R2| n21|n22|n23|n23|n2:
R3| n31|n32|n33|n34|n3:
fop: 观察百分比表(列变量)
这里比较难处理,data_column各个类别的样本量和总体的样本量不一样,各类别的样本量为同时
选择了行变量和列类别的频数。而总体的样本量为选择了行变量的频数
fw: 加权平均值
如果data_index的编码code含有weight字段,则我们会输出分组的加权平均值
|
reportgen/questionnaire/questionnaire.py
|
crosstab
|
brightgeng/reportgen
|
python
|
def crosstab(data_index, data_column, code_index=None, code_column=None, qtype=None, total=True):
"适用于问卷数据的交叉统计\n 输入参数:\n data_index: 因变量,放在行中\n data_column:自变量,放在列中\n code_index: dict格式,指定data_index的编码等信息\n code_column: dict格式,指定data_column的编码等信息\n qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个的\n 返回字典格式数据\n 'fop':默认的百分比表,行是data_index,列是data_column\n 'fo':原始频数表,且添加了总体项\n 'fw': 加权平均值\n\n 简要说明:\n 因为要处理各类题型,这里将单选题处理为多选题\n\n fo:观察频数表\n nij是同时选择了Ri和Cj的频数\n 总体的频数是选择了Ri的频数,与所在行的总和无关\n 行变量\\列变量 C1 |C2 | C3| C4|总体\n R1| n11|n12|n13|n14|n1:\n R2| n21|n22|n23|n23|n2:\n R3| n31|n32|n33|n34|n3:\n fop: 观察百分比表(列变量)\n 这里比较难处理,data_column各个类别的样本量和总体的样本量不一样,各类别的样本量为同时\n 选择了行变量和列类别的频数。而总体的样本量为选择了行变量的频数\n fw: 加权平均值\n 如果data_index的编码code含有weight字段,则我们会输出分组的加权平均值\n\n\n "
data_index = pd.DataFrame(data_index)
data_column = pd.DataFrame(data_column)
if (data_index.shape[1] == 1):
qtype1 = u'单选题'
else:
qtype1 = u'多选题'
if (data_column.shape[1] == 1):
qtype2 = u'单选题'
else:
qtype2 = u'多选题'
if code_index:
qtype1 = code_index['qtype']
if (qtype1 == u'单选题'):
data_index.replace(code_index['code'], inplace=True)
elif (qtype1 in [u'多选题', u'排序题']):
data_index.rename(columns=code_index['code'], inplace=True)
elif (qtype1 == u'矩阵单选题'):
data_index.rename(columns=code_index['code_r'], inplace=True)
if code_column:
qtype2 = code_column['qtype']
if (qtype2 == u'单选题'):
data_column.replace(code_column['code'], inplace=True)
elif (qtype2 in [u'多选题', u'排序题']):
data_column.rename(columns=code_column['code'], inplace=True)
elif (qtype2 == u'矩阵单选题'):
data_column.rename(columns=code_column['code_r'], inplace=True)
if qtype:
if (isinstance(qtype, list) and (len(qtype) == 2)):
qtype1 = qtype[0]
qtype2 = qtype[1]
elif isinstance(qtype, str):
qtype1 = qtype
if (qtype1 == u'单选题'):
data_index = sa_to_ma(data_index)
qtype1 = u'多选题'
if (qtype2 == u'单选题'):
data_column = sa_to_ma(data_column)
qtype2 = u'多选题'
index_list = list(data_index.columns)
columns_list = list(data_column.columns)
column_freq = data_column.iloc[list(data_index.notnull().T.any()), :].sum()
column_freq[u'总体'] = data_index.notnull().T.any().sum()
R = len(index_list)
C = len(columns_list)
result = {}
result['sample_size'] = column_freq
if ((qtype1 == u'多选题') and (qtype2 == u'多选题')):
data_index.fillna(0, inplace=True)
t = pd.DataFrame(np.dot(data_index.fillna(0).T, data_column.fillna(0)))
t.rename(index=dict(zip(range(R), index_list)), columns=dict(zip(range(C), columns_list)), inplace=True)
if (code_index and ('weight' in code_index)):
w = pd.Series(code_index['weight'])
w.rename(index=code_index['code'], inplace=True)
fw = pd.DataFrame(columns=[u'加权'], index=t.columns)
for c in t.columns:
tmp = t[c]
tmp = tmp[w.index][tmp[w.index].notnull()]
if (abs(tmp.sum()) > 0):
fw.loc[(c, u'加权')] = ((tmp * w).sum() / tmp.sum())
else:
fw.loc[(c, u'加权')] = 0
fo1 = data_index.sum()[w.index][data_index.sum()[w.index].notnull()]
if (abs(fo1.sum()) > 0):
fw.loc[(u'总体', u'加权')] = ((fo1 * w).sum() / fo1.sum())
else:
fw.loc[(u'总体', u'加权')] = 0
result['fw'] = fw
t[u'总体'] = data_index.sum()
t.sort_values([u'总体'], ascending=False, inplace=True)
t1 = t.copy()
for i in t.columns:
if (column_freq[i] != 0):
t.loc[:, i] = (t.loc[:, i] / column_freq[i])
result['fop'] = t
result['fo'] = t1
elif ((qtype1 == u'矩阵单选题') and (qtype2 == u'多选题')):
if (code_index and ('weight' in code_index)):
data_index.replace(code_index['weight'], inplace=True)
t = pd.DataFrame(np.dot(data_index.fillna(0).T, data_column.fillna(0)))
t = pd.DataFrame(np.dot(t, np.diag((1 / data_column.sum()))))
t.rename(index=dict(zip(range(R), index_list)), columns=dict(zip(range(C), columns_list)), inplace=True)
t[u'总体'] = data_index.mean()
t.sort_values([u'总体'], ascending=False, inplace=True)
t1 = t.copy()
result['fop'] = t
result['fo'] = t1
elif ((qtype1 == u'排序题') and (qtype2 == u'多选题')):
topn = int(data_index.max().max())
qsort = dict(zip([(i + 1) for i in range(topn)], [((((topn - i) * 2.0) / (topn + 1)) / topn) for i in range(topn)]))
data_index_zh = data_index.replace(qsort)
t = pd.DataFrame(np.dot(data_index_zh.fillna(0).T, data_column.fillna(0)))
t.rename(index=dict(zip(range(R), index_list)), columns=dict(zip(range(C), columns_list)), inplace=True)
t[u'总体'] = data_index_zh.sum()
t.sort_values([u'总体'], ascending=False, inplace=True)
t1 = t.copy()
for i in t.columns:
if (column_freq[i] != 0):
t.loc[:, i] = (t.loc[:, i] / column_freq[i])
result['fop'] = t
result['fo'] = t1
data_index_top1 = data_index.applymap((lambda x: int((x == 1))))
top1 = pd.DataFrame(np.dot(data_index_top1.fillna(0).T, data_column.fillna(0)))
top1.rename(index=dict(zip(range(R), index_list)), columns=dict(zip(range(C), columns_list)), inplace=True)
top1[u'总体'] = data_index_top1.fillna(0).sum()
top1.sort_values([u'总体'], ascending=False, inplace=True)
for i in top1.columns:
if (column_freq[i] != 0):
top1.loc[:, i] = (top1.loc[:, i] / column_freq[i])
result['TOP1'] = top1
else:
result['fop'] = None
result['fo'] = None
if ((not total) and (not (result['fo'] is None)) and ('总体' in result['fo'].columns)):
result['fo'].drop(['总体'], axis=1, inplace=True)
result['fop'].drop(['总体'], axis=1, inplace=True)
if ((not (result['fo'] is None)) and code_index and ('code_order' in code_index) and (qtype1 != '矩阵单选题')):
code_order = code_index['code_order']
code_order = [q for q in code_order if (q in result['fo'].index)]
if (u'总体' in result['fo'].index):
code_order = (code_order + [u'总体'])
result['fo'] = pd.DataFrame(result['fo'], index=code_order)
result['fop'] = pd.DataFrame(result['fop'], index=code_order)
if ((not (result['fo'] is None)) and code_column and ('code_order' in code_column) and (qtype2 != '矩阵单选题')):
code_order = code_column['code_order']
code_order = [q for q in code_order if (q in result['fo'].columns)]
if (u'总体' in result['fo'].columns):
code_order = (code_order + [u'总体'])
result['fo'] = pd.DataFrame(result['fo'], columns=code_order)
result['fop'] = pd.DataFrame(result['fop'], columns=code_order)
return result
|
def qtable(data, *args, **kwargs):
"简易频数统计函数\n 输入\n data:数据框,可以是所有的数据\n code:数据编码\n q1: 题目序号\n q2: 题目序号\n # 单个变量的频数统计\n qtable(data,code,'Q1')\n # 两个变量的交叉统计\n qtable(data,code,'Q1','Q2')\n\n "
code = None
q1 = None
q2 = None
for a in args:
if (isinstance(a, str) and (not q1)):
q1 = a
elif (isinstance(a, str) and q1):
q2 = a
elif isinstance(a, dict):
code = a
if (not code):
code = data_auto_code(data)
if (not q1):
print('please input the q1,such as Q1.')
return
total = False
for key in kwargs:
if (key == 'total'):
total = kwargs['total']
if (q2 is None):
result = table(data[code[q1]['qlist']], code[q1], total=total)
else:
result = crosstab(data[code[q1]['qlist']], data[code[q2]['qlist']], code[q1], code[q2], total=total)
return result
| -5,886,488,093,965,830,000
|
简易频数统计函数
输入
data:数据框,可以是所有的数据
code:数据编码
q1: 题目序号
q2: 题目序号
# 单个变量的频数统计
qtable(data,code,'Q1')
# 两个变量的交叉统计
qtable(data,code,'Q1','Q2')
|
reportgen/questionnaire/questionnaire.py
|
qtable
|
brightgeng/reportgen
|
python
|
def qtable(data, *args, **kwargs):
"简易频数统计函数\n 输入\n data:数据框,可以是所有的数据\n code:数据编码\n q1: 题目序号\n q2: 题目序号\n # 单个变量的频数统计\n qtable(data,code,'Q1')\n # 两个变量的交叉统计\n qtable(data,code,'Q1','Q2')\n\n "
code = None
q1 = None
q2 = None
for a in args:
if (isinstance(a, str) and (not q1)):
q1 = a
elif (isinstance(a, str) and q1):
q2 = a
elif isinstance(a, dict):
code = a
if (not code):
code = data_auto_code(data)
if (not q1):
print('please input the q1,such as Q1.')
return
total = False
for key in kwargs:
if (key == 'total'):
total = kwargs['total']
if (q2 is None):
result = table(data[code[q1]['qlist']], code[q1], total=total)
else:
result = crosstab(data[code[q1]['qlist']], data[code[q2]['qlist']], code[q1], code[q2], total=total)
return result
|
def association_rules(df, minSup=0.08, minConf=0.4, Y=None):
'关联规则分析\n df: DataFrame,bool 类型。是一个类似购物篮数据 \n\n '
try:
df = df.astype(bool)
except:
print('df 必须为 bool 类型')
return (None, None, None)
columns = np.array(df.columns)
gen = associate.frequent_itemsets(np.array(df), minSup)
itemsets = dict(gen)
rules = associate.association_rules(itemsets, minConf)
rules = pd.DataFrame(list(rules))
if (len(rules) == 0):
return (None, None, None)
rules.columns = ['antecedent', 'consequent', 'sup', 'conf']
rules['sup'] = (rules['sup'] / len(df))
rules['antecedent'] = rules['antecedent'].map((lambda x: [columns[i] for i in list(x)]))
rules['consequent'] = rules['consequent'].map((lambda x: [columns[i] for i in list(x)]))
rules['rule'] = ((rules['antecedent'].map((lambda x: ','.join([('%s' % i) for i in x]))) + '-->') + rules['consequent'].map((lambda x: ','.join([('%s' % i) for i in x]))))
result = ';\n'.join(['{}: 支持度={:.1f}%, 置信度={:.1f}%'.format(rules.loc[(ii, 'rule')], (100 * rules.loc[(ii, 'sup')]), (100 * rules.loc[(ii, 'conf')])) for ii in rules.index[:4]])
return (result, rules, itemsets)
| -8,448,162,522,908,191,000
|
关联规则分析
df: DataFrame,bool 类型。是一个类似购物篮数据
|
reportgen/questionnaire/questionnaire.py
|
association_rules
|
brightgeng/reportgen
|
python
|
def association_rules(df, minSup=0.08, minConf=0.4, Y=None):
'关联规则分析\n df: DataFrame,bool 类型。是一个类似购物篮数据 \n\n '
try:
df = df.astype(bool)
except:
print('df 必须为 bool 类型')
return (None, None, None)
columns = np.array(df.columns)
gen = associate.frequent_itemsets(np.array(df), minSup)
itemsets = dict(gen)
rules = associate.association_rules(itemsets, minConf)
rules = pd.DataFrame(list(rules))
if (len(rules) == 0):
return (None, None, None)
rules.columns = ['antecedent', 'consequent', 'sup', 'conf']
rules['sup'] = (rules['sup'] / len(df))
rules['antecedent'] = rules['antecedent'].map((lambda x: [columns[i] for i in list(x)]))
rules['consequent'] = rules['consequent'].map((lambda x: [columns[i] for i in list(x)]))
rules['rule'] = ((rules['antecedent'].map((lambda x: ','.join([('%s' % i) for i in x]))) + '-->') + rules['consequent'].map((lambda x: ','.join([('%s' % i) for i in x]))))
result = ';\n'.join(['{}: 支持度={:.1f}%, 置信度={:.1f}%'.format(rules.loc[(ii, 'rule')], (100 * rules.loc[(ii, 'sup')]), (100 * rules.loc[(ii, 'conf')])) for ii in rules.index[:4]])
return (result, rules, itemsets)
|
def contingency(fo, alpha=0.05):
" 列联表分析:(观察频数表分析)\n # 预增加一个各类别之间的距离\n 1、生成TGI指数、TWI指数、CHI指数\n 2、独立性检验\n 3、当两个变量不显著时,考虑单个之间的显著性\n 返回字典格式\n chi_test: 卡方检验结果,1:显著;0:不显著;-1:期望值不满足条件\n coef: 包含chi2、p值、V相关系数\n log: 记录一些异常情况\n FO: 观察频数\n FE: 期望频数\n TGI:fo/fe\n TWI:fo-fe\n CHI:sqrt((fo-fe)(fo/fe-1))*sign(fo-fe)\n significant:{\n .'result': 显著性结果[1(显著),0(不显著),-1(fe小于5的过多)]\n .'pvalue':\n .'method': chi_test or fisher_test\n .'vcoef':\n .'threshold':\n }\n summary:{\n .'summary': 结论提取\n .'fit_test': 拟合优度检验\n .'chi_std':\n .'chi_mean':\n "
import scipy.stats as stats
cdata = {}
if isinstance(fo, pd.core.series.Series):
fo = pd.DataFrame(fo)
if (not isinstance(fo, pd.core.frame.DataFrame)):
return cdata
(R, C) = fo.shape
if (u'总体' in fo.columns):
fo.drop([u'总体'], axis=1, inplace=True)
if any([((u'其他' in ('%s' % s)) or (u'其它' in ('%s' % s))) for s in fo.columns]):
tmp = [s for s in fo.columns if ((u'其他' in s) or (u'其它' in s))]
for t in tmp:
fo.drop([t], axis=1, inplace=True)
if (u'合计' in fo.index):
fo.drop([u'合计'], axis=0, inplace=True)
if any([((u'其他' in ('%s' % s)) or (u'其它' in ('%s' % s))) for s in fo.index]):
tmp = [s for s in fo.index if ((u'其他' in s) or (u'其它' in s))]
for t in tmp:
fo.drop([t], axis=0, inplace=True)
fe = fo.copy()
N = fo.sum().sum()
if (N == 0):
return cdata
for i in fe.index:
for j in fe.columns:
fe.loc[(i, j)] = ((fe.loc[i, :].sum() * fe.loc[:, j].sum()) / float(N))
TGI = (fo / fe)
TWI = (fo - fe)
CHI = (np.sqrt((((fo - fe) ** 2) / fe)) * ((TWI.applymap((lambda x: int((x > 0)))) * 2) - 1))
PCHI = (1 / (1 + np.exp(((- 1) * CHI))))
cdata['FO'] = fo
cdata['FE'] = fe
cdata['TGI'] = (TGI * 100)
cdata['TWI'] = TWI
cdata['CHI'] = CHI
cdata['PCHI'] = PCHI
significant = {}
significant['threshold'] = stats.chi2.ppf(q=(1 - alpha), df=(C - 1))
threshold = max(3, min(30, (N * 0.05)))
ind1 = (fo.sum(axis=1) >= threshold)
ind2 = (fo.sum() >= threshold)
fo = fo.loc[(ind1, ind2)]
if ((fo.shape[0] <= 1) or np.any((fo.sum() == 0)) or np.any((fo.sum(axis=1) == 0))):
significant['result'] = (- 2)
significant['pvalue'] = (- 2)
significant['method'] = 'fo not frequency'
"fisher_exact运行所需时间极其的长,此处还是不作检验\n fisher_r,fisher_p=fisher_exact(fo)\n significant['pvalue']=fisher_p\n significant['method']='fisher_exact'\n significant['result']=fisher_r\n "
else:
try:
chiStats = stats.chi2_contingency(observed=fo)
except:
chiStats = (1, np.nan)
significant['pvalue'] = chiStats[1]
significant['method'] = 'chi-test'
if (chiStats[1] <= alpha):
significant['result'] = 1
elif np.isnan(chiStats[1]):
significant['pvalue'] = (- 2)
significant['result'] = (- 1)
else:
significant['result'] = 0
cdata['significant'] = significant
chi_sum = (CHI ** 2).sum(axis=1)
chi_value_fit = stats.chi2.ppf(q=(1 - alpha), df=(C - 1))
fit_test = chi_sum.map((lambda x: int((x > chi_value_fit))))
summary = {}
summary['fit_test'] = fit_test
summary['chi_std'] = CHI.unstack().std()
summary['chi_mean'] = CHI.unstack().mean()
conclusion = ''
fo_rank = fo.sum().rank(ascending=False)
for c in fo_rank[(fo_rank < 5)].index:
tmp = list(CHI.loc[(((CHI[c] - summary['chi_mean']) > summary['chi_std']), c)].sort_values(ascending=False)[:3].index)
tmp = [('%s' % s) for s in tmp]
if tmp:
tmp1 = u'{col}:{s}'.format(col=c, s=' || '.join(tmp))
conclusion = ((conclusion + tmp1) + '; \n')
if (significant['result'] == 1):
if conclusion:
tmp = '在95%置信水平下显著性检验(卡方检验)结果为*显著*, 且CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp = '在95%置信水平下显著性检验(卡方检验)结果为*显著*,但没有找到相对有差异的配对'
elif (significant['result'] == 0):
if conclusion:
tmp = '在95%置信水平下显著性检验(卡方检验)结果为*不显著*, 但CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp = '在95%置信水平下显著性检验(卡方检验)结果为*不显著*,且没有找到相对有差异的配对'
elif conclusion:
tmp = '不满足显著性检验(卡方检验)条件, 但CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp = '不满足显著性检验(卡方检验)条件,且没有找到相对有差异的配对'
conclusion = (tmp + conclusion)
summary['summary'] = conclusion
cdata['summary'] = summary
return cdata
| -9,149,343,796,111,427,000
|
列联表分析:(观察频数表分析)
# 预增加一个各类别之间的距离
1、生成TGI指数、TWI指数、CHI指数
2、独立性检验
3、当两个变量不显著时,考虑单个之间的显著性
返回字典格式
chi_test: 卡方检验结果,1:显著;0:不显著;-1:期望值不满足条件
coef: 包含chi2、p值、V相关系数
log: 记录一些异常情况
FO: 观察频数
FE: 期望频数
TGI:fo/fe
TWI:fo-fe
CHI:sqrt((fo-fe)(fo/fe-1))*sign(fo-fe)
significant:{
.'result': 显著性结果[1(显著),0(不显著),-1(fe小于5的过多)]
.'pvalue':
.'method': chi_test or fisher_test
.'vcoef':
.'threshold':
}
summary:{
.'summary': 结论提取
.'fit_test': 拟合优度检验
.'chi_std':
.'chi_mean':
|
reportgen/questionnaire/questionnaire.py
|
contingency
|
brightgeng/reportgen
|
python
|
def contingency(fo, alpha=0.05):
" 列联表分析:(观察频数表分析)\n # 预增加一个各类别之间的距离\n 1、生成TGI指数、TWI指数、CHI指数\n 2、独立性检验\n 3、当两个变量不显著时,考虑单个之间的显著性\n 返回字典格式\n chi_test: 卡方检验结果,1:显著;0:不显著;-1:期望值不满足条件\n coef: 包含chi2、p值、V相关系数\n log: 记录一些异常情况\n FO: 观察频数\n FE: 期望频数\n TGI:fo/fe\n TWI:fo-fe\n CHI:sqrt((fo-fe)(fo/fe-1))*sign(fo-fe)\n significant:{\n .'result': 显著性结果[1(显著),0(不显著),-1(fe小于5的过多)]\n .'pvalue':\n .'method': chi_test or fisher_test\n .'vcoef':\n .'threshold':\n }\n summary:{\n .'summary': 结论提取\n .'fit_test': 拟合优度检验\n .'chi_std':\n .'chi_mean':\n "
import scipy.stats as stats
cdata = {}
if isinstance(fo, pd.core.series.Series):
fo = pd.DataFrame(fo)
if (not isinstance(fo, pd.core.frame.DataFrame)):
return cdata
(R, C) = fo.shape
if (u'总体' in fo.columns):
fo.drop([u'总体'], axis=1, inplace=True)
if any([((u'其他' in ('%s' % s)) or (u'其它' in ('%s' % s))) for s in fo.columns]):
tmp = [s for s in fo.columns if ((u'其他' in s) or (u'其它' in s))]
for t in tmp:
fo.drop([t], axis=1, inplace=True)
if (u'合计' in fo.index):
fo.drop([u'合计'], axis=0, inplace=True)
if any([((u'其他' in ('%s' % s)) or (u'其它' in ('%s' % s))) for s in fo.index]):
tmp = [s for s in fo.index if ((u'其他' in s) or (u'其它' in s))]
for t in tmp:
fo.drop([t], axis=0, inplace=True)
fe = fo.copy()
N = fo.sum().sum()
if (N == 0):
return cdata
for i in fe.index:
for j in fe.columns:
fe.loc[(i, j)] = ((fe.loc[i, :].sum() * fe.loc[:, j].sum()) / float(N))
TGI = (fo / fe)
TWI = (fo - fe)
CHI = (np.sqrt((((fo - fe) ** 2) / fe)) * ((TWI.applymap((lambda x: int((x > 0)))) * 2) - 1))
PCHI = (1 / (1 + np.exp(((- 1) * CHI))))
cdata['FO'] = fo
cdata['FE'] = fe
cdata['TGI'] = (TGI * 100)
cdata['TWI'] = TWI
cdata['CHI'] = CHI
cdata['PCHI'] = PCHI
significant = {}
significant['threshold'] = stats.chi2.ppf(q=(1 - alpha), df=(C - 1))
threshold = max(3, min(30, (N * 0.05)))
ind1 = (fo.sum(axis=1) >= threshold)
ind2 = (fo.sum() >= threshold)
fo = fo.loc[(ind1, ind2)]
if ((fo.shape[0] <= 1) or np.any((fo.sum() == 0)) or np.any((fo.sum(axis=1) == 0))):
significant['result'] = (- 2)
significant['pvalue'] = (- 2)
significant['method'] = 'fo not frequency'
"fisher_exact运行所需时间极其的长,此处还是不作检验\n fisher_r,fisher_p=fisher_exact(fo)\n significant['pvalue']=fisher_p\n significant['method']='fisher_exact'\n significant['result']=fisher_r\n "
else:
try:
chiStats = stats.chi2_contingency(observed=fo)
except:
chiStats = (1, np.nan)
significant['pvalue'] = chiStats[1]
significant['method'] = 'chi-test'
if (chiStats[1] <= alpha):
significant['result'] = 1
elif np.isnan(chiStats[1]):
significant['pvalue'] = (- 2)
significant['result'] = (- 1)
else:
significant['result'] = 0
cdata['significant'] = significant
chi_sum = (CHI ** 2).sum(axis=1)
chi_value_fit = stats.chi2.ppf(q=(1 - alpha), df=(C - 1))
fit_test = chi_sum.map((lambda x: int((x > chi_value_fit))))
summary = {}
summary['fit_test'] = fit_test
summary['chi_std'] = CHI.unstack().std()
summary['chi_mean'] = CHI.unstack().mean()
conclusion =
fo_rank = fo.sum().rank(ascending=False)
for c in fo_rank[(fo_rank < 5)].index:
tmp = list(CHI.loc[(((CHI[c] - summary['chi_mean']) > summary['chi_std']), c)].sort_values(ascending=False)[:3].index)
tmp = [('%s' % s) for s in tmp]
if tmp:
tmp1 = u'{col}:{s}'.format(col=c, s=' || '.join(tmp))
conclusion = ((conclusion + tmp1) + '; \n')
if (significant['result'] == 1):
if conclusion:
tmp = '在95%置信水平下显著性检验(卡方检验)结果为*显著*, 且CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp = '在95%置信水平下显著性检验(卡方检验)结果为*显著*,但没有找到相对有差异的配对'
elif (significant['result'] == 0):
if conclusion:
tmp = '在95%置信水平下显著性检验(卡方检验)结果为*不显著*, 但CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp = '在95%置信水平下显著性检验(卡方检验)结果为*不显著*,且没有找到相对有差异的配对'
elif conclusion:
tmp = '不满足显著性检验(卡方检验)条件, 但CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp = '不满足显著性检验(卡方检验)条件,且没有找到相对有差异的配对'
conclusion = (tmp + conclusion)
summary['summary'] = conclusion
cdata['summary'] = summary
return cdata
|
def pre_cross_qlist(data, code):
'自适应给出可以进行交叉分析的变量和相应选项\n 满足以下条件的将一键交叉分析:\n 1、单选题\n 2、如果选项是文本,则平均长度应小于10\n ...\n 返回:\n cross_qlist: [[题目序号,变量选项],]\n '
cross_qlist = []
for qq in code:
qtype = code[qq]['qtype']
qlist = code[qq]['qlist']
content = code[qq]['content']
sample_len_qq = data[code[qq]['qlist']].notnull().T.any().sum()
if (qtype not in ['单选题']):
continue
if (not (set(qlist) <= set(data.columns))):
continue
t = qtable(data, code, qq)['fo']
if ('code_order' in code[qq]):
code_order = code[qq]['code_order']
code_order = [q for q in code_order if (q in t.index)]
t = pd.DataFrame(t, index=code_order)
items = list(t.index)
code_values = list(code[qq]['code'].values())
if (len(items) <= 1):
continue
if all([isinstance(t, str) for t in code_values]):
if ((sum([len(t) for t in code_values]) / len(code_values)) > 15):
continue
if (('code_order' in code[qq]) and (len(items) < 10)):
code_order = [q for q in code[qq]['code_order'] if (q in t.index)]
t = pd.DataFrame(t, index=code_order)
ind = np.where((t['频数'] >= 10))[0]
if (len(ind) > 0):
cross_order = list(t.index[range(ind[0], (ind[(- 1)] + 1))])
cross_qlist.append([qq, cross_order])
continue
if re.findall('性别|年龄|gender|age', content.lower()):
cross_qlist.append([qq, items])
continue
if ((len(items) <= (sample_len_qq / 30)) and (len(items) < 10)):
cross_order = list(t.index[(t['频数'] >= 10)])
if cross_order:
cross_qlist.append([qq, cross_order])
continue
return cross_qlist
| 8,495,738,686,203,361,000
|
自适应给出可以进行交叉分析的变量和相应选项
满足以下条件的将一键交叉分析:
1、单选题
2、如果选项是文本,则平均长度应小于10
...
返回:
cross_qlist: [[题目序号,变量选项],]
|
reportgen/questionnaire/questionnaire.py
|
pre_cross_qlist
|
brightgeng/reportgen
|
python
|
def pre_cross_qlist(data, code):
'自适应给出可以进行交叉分析的变量和相应选项\n 满足以下条件的将一键交叉分析:\n 1、单选题\n 2、如果选项是文本,则平均长度应小于10\n ...\n 返回:\n cross_qlist: [[题目序号,变量选项],]\n '
cross_qlist = []
for qq in code:
qtype = code[qq]['qtype']
qlist = code[qq]['qlist']
content = code[qq]['content']
sample_len_qq = data[code[qq]['qlist']].notnull().T.any().sum()
if (qtype not in ['单选题']):
continue
if (not (set(qlist) <= set(data.columns))):
continue
t = qtable(data, code, qq)['fo']
if ('code_order' in code[qq]):
code_order = code[qq]['code_order']
code_order = [q for q in code_order if (q in t.index)]
t = pd.DataFrame(t, index=code_order)
items = list(t.index)
code_values = list(code[qq]['code'].values())
if (len(items) <= 1):
continue
if all([isinstance(t, str) for t in code_values]):
if ((sum([len(t) for t in code_values]) / len(code_values)) > 15):
continue
if (('code_order' in code[qq]) and (len(items) < 10)):
code_order = [q for q in code[qq]['code_order'] if (q in t.index)]
t = pd.DataFrame(t, index=code_order)
ind = np.where((t['频数'] >= 10))[0]
if (len(ind) > 0):
cross_order = list(t.index[range(ind[0], (ind[(- 1)] + 1))])
cross_qlist.append([qq, cross_order])
continue
if re.findall('性别|年龄|gender|age', content.lower()):
cross_qlist.append([qq, items])
continue
if ((len(items) <= (sample_len_qq / 30)) and (len(items) < 10)):
cross_order = list(t.index[(t['频数'] >= 10)])
if cross_order:
cross_qlist.append([qq, cross_order])
continue
return cross_qlist
|
def cross_chart(data, code, cross_class, filename=u'交叉分析报告', cross_qlist=None, delclass=None, plt_dstyle=None, cross_order=None, reverse_display=False, total_display=True, max_column_chart=20, save_dstyle=None, template=None):
"使用帮助\n data: 问卷数据,包含交叉变量和所有的因变量\n code: 数据编码\n cross_class: 交叉变量,单选题或者多选题,例如:Q1\n filename:文件名,用于PPT和保存相关数据\n cross_list: 需要进行交叉分析的变量,缺省为code中的所有变量\n delclass: 交叉变量中需要删除的单个变量,缺省空\n plt_dstyle: 绘制图表需要用的数据类型,默认为百分比表,可以选择['TGI'、'CHI'、'TWI']等\n save_dstyle: 需要保存的数据类型,格式为列表。\n cross_order: 交叉变量中各个类别的顺序,可以缺少\n total_display: PPT绘制图表中是否显示总体情况\n max_column_chart: 列联表的列数,小于则用柱状图,大于则用条形图\n template: PPT模板信息,{'path': 'layouts':}缺省用自带的。\n "
if plt_dstyle:
plt_dstyle = plt_dstyle.upper()
if (not cross_qlist):
try:
cross_qlist = list(sorted(code, key=(lambda c: int(re.findall('\\d+', c)[0]))))
except:
cross_qlist = list(code.keys())
if (cross_class in cross_qlist):
cross_qlist.remove(cross_class)
sample_len = data[code[cross_class]['qlist']].notnull().T.any().sum()
if (code[cross_class]['qtype'] == u'单选题'):
cross_class_freq = data[code[cross_class]['qlist'][0]].value_counts()
cross_class_freq[u'合计'] = cross_class_freq.sum()
cross_class_freq.rename(index=code[cross_class]['code'], inplace=True)
elif (code[cross_class]['qtype'] == u'多选题'):
cross_class_freq = data[code[cross_class]['qlist']].sum()
cross_class_freq[u'合计'] = cross_class_freq.sum()
cross_class_freq.rename(index=code[cross_class]['code'], inplace=True)
elif (code[cross_class]['qtype'] == u'排序题'):
tmp = qtable(data, code, cross_class)
cross_class_freq = tmp['fo'][u'综合']
cross_class_freq[u'合计'] = cross_class_freq.sum()
prs = (rpt.Report(template) if template else rpt.Report())
if (not os.path.exists('.\\out')):
os.mkdir('.\\out')
Writer = pd.ExcelWriter((('.\\out\\' + filename) + u'.xlsx'))
Writer_save = {}
if save_dstyle:
for dstyle in save_dstyle:
Writer_save[(u'Writer_' + dstyle)] = pd.ExcelWriter((((('.\\out\\' + filename) + u'_') + dstyle) + '.xlsx'))
result = {}
cross_columns = list(cross_class_freq.index)
cross_columns = [r for r in cross_columns if (r != u'合计')]
cross_columns = ((['内容', '题型'] + cross_columns) + [u'总体', u'显著性检验'])
conclusion = pd.DataFrame(index=cross_qlist, columns=cross_columns)
conclusion.to_excel(Writer, u'索引')
prs.add_cover(title=filename)
title = u'说明'
summary = (((u'交叉题目为' + cross_class) + u': ') + code[cross_class]['content'])
summary = ((summary + '\n') + u'各类别样本量如下:')
prs.add_slide(data={'data': cross_class_freq, 'slide_type': 'table'}, title=title, summary=summary)
data_column = data[code[cross_class]['qlist']]
for qq in cross_qlist:
qtitle = code[qq]['content']
qlist = code[qq]['qlist']
qtype = code[qq]['qtype']
if (not (set(qlist) <= set(data.columns))):
continue
data_index = data[qlist]
sample_len = data_column.iloc[list(data_index.notnull().T.any()), :].notnull().T.any().sum()
summary = None
if (qtype not in [u'单选题', u'多选题', u'排序题', u'矩阵单选题']):
continue
try:
if reverse_display:
result_t = crosstab(data_column, data_index, code_index=code[cross_class], code_column=code[qq])
else:
result_t = crosstab(data_index, data_column, code_index=code[qq], code_column=code[cross_class])
except:
print('脚本在处理{}时出了一天小问题.....')
continue
if (('fo' in result_t) and ('fop' in result_t)):
t = result_t['fop']
t1 = result_t['fo']
qsample = result_t['sample_size']
else:
continue
if (t is None):
continue
if (cross_order and (not reverse_display)):
if (u'总体' not in cross_order):
cross_order = (cross_order + [u'总体'])
cross_order = [q for q in cross_order if (q in t.columns)]
t = pd.DataFrame(t, columns=cross_order)
t1 = pd.DataFrame(t1, columns=cross_order)
if (cross_order and reverse_display):
cross_order = [q for q in cross_order if (q in t.index)]
t = pd.DataFrame(t, index=cross_order)
t1 = pd.DataFrame(t1, index=cross_order)
"在crosstab中已经重排了\n if 'code_order' in code[qq] and qtype!='矩阵单选题':\n code_order=code[qq]['code_order']\n if reverse_display:\n #code_order=[q for q in code_order if q in t.columns]\n if u'总体' in t1.columns:\n code_order=code_order+[u'总体']\n t=pd.DataFrame(t,columns=code_order)\n t1=pd.DataFrame(t1,columns=code_order)\n else:\n #code_order=[q for q in code_order if q in t.index]\n t=pd.DataFrame(t,index=code_order)\n t1=pd.DataFrame(t1,index=code_order)\n "
t.fillna(0, inplace=True)
t1.fillna(0, inplace=True)
t2 = pd.concat([t, t1], axis=1)
t2.to_excel(Writer, qq, index_label=qq, float_format='%.3f')
Writer_rows = len(t2)
pd.DataFrame(qsample, columns=['样本数']).to_excel(Writer, qq, startrow=(Writer_rows + 2))
Writer_rows += (len(qsample) + 2)
cdata = contingency(t1, alpha=0.05)
result[qq] = cdata
if cdata:
summary = cdata['summary']['summary']
if save_dstyle:
for dstyle in save_dstyle:
cdata[dstyle].to_excel(Writer_save[(u'Writer_' + dstyle)], qq, index_label=qq, float_format='%.2f')
if (qtype in [u'单选题', u'多选题', u'排序题']):
plt_data = (t * 100)
else:
plt_data = t.copy()
if (abs((1 - plt_data.sum())) <= (0.01 + 1e-17)).all():
plt_data = (plt_data * 100)
if ('fw' in result_t):
plt_data = result_t['fw']
if (cross_order and (not reverse_display)):
if (u'总体' not in cross_order):
cross_order = (cross_order + [u'总体'])
cross_order = [q for q in cross_order if (q in plt_data.index)]
plt_data = pd.DataFrame(plt_data, index=cross_order)
plt_data.to_excel(Writer, qq, startrow=(Writer_rows + 2))
Writer_rows += len(plt_data)
if (plt_dstyle and isinstance(cdata, dict) and (plt_dstyle in cdata)):
plt_data = cdata[plt_dstyle]
title = ((((qq + '[') + qtype) + ']: ') + qtitle)
if (not summary):
summary = u'这里是结论区域.'
if ('significant' in cdata):
sing_result = cdata['significant']['result']
sing_pvalue = cdata['significant']['pvalue']
else:
sing_result = (- 2)
sing_pvalue = (- 2)
footnote = u'显著性检验的p值为{:.3f},数据来源于{},样本N={}'.format(sing_pvalue, qq, sample_len)
conclusion.loc[qq, :] = qsample
conclusion.loc[(qq, [u'内容', u'题型'])] = pd.Series({u'内容': code[qq]['content'], u'题型': code[qq]['qtype']})
conclusion.loc[(qq, u'显著性检验')] = sing_result
if ((not total_display) and (u'总体' in plt_data.columns)):
plt_data.drop([u'总体'], axis=1, inplace=True)
if (len(plt_data) > max_column_chart):
prs.add_slide(data={'data': plt_data[::(- 1)], 'slide_type': 'chart', 'type': 'BAR_CLUSTERED'}, title=title, summary=summary, footnote=footnote)
else:
prs.add_slide(data={'data': plt_data, 'slide_type': 'chart', 'type': 'COLUMN_CLUSTERED'}, title=title, summary=summary, footnote=footnote)
if ((qtype == u'排序题') and ('TOP1' in result_t)):
plt_data = (result_t['TOP1'] * 100)
if (cross_order and (not reverse_display)):
if (u'总体' not in cross_order):
cross_order = (cross_order + [u'总体'])
cross_order = [q for q in cross_order if (q in plt_data.columns)]
plt_data = pd.DataFrame(plt_data, columns=cross_order)
if (cross_order and reverse_display):
cross_order = [q for q in cross_order if (q in plt_data.index)]
plt_data = pd.DataFrame(plt_data, index=cross_order)
if ('code_order' in code[qq]):
code_order = code[qq]['code_order']
if reverse_display:
if (u'总体' in t1.columns):
code_order = (code_order + [u'总体'])
plt_data = pd.DataFrame(plt_data, columns=code_order)
else:
plt_data = pd.DataFrame(plt_data, index=code_order)
plt_data.fillna(0, inplace=True)
title = ('[TOP1]' + title)
if (len(plt_data) > max_column_chart):
prs.add_slide(data={'data': plt_data[::(- 1)], 'slide_type': 'chart', 'type': 'BAR_CLUSTERED'}, title=title, summary=summary, footnote=footnote)
else:
prs.add_slide(data={'data': plt_data, 'slide_type': 'chart', 'type': 'COLUMN_CLUSTERED'}, title=title, summary=summary, footnote=footnote)
'\n # ==============小结页=====================\n difference=pd.Series(difference,index=total_qlist_0)\n '
if plt_dstyle:
filename = ((filename + '_') + plt_dstyle)
try:
prs.save((('.\\out\\' + filename) + u'.pptx'))
except:
prs.save((('.\\out\\' + filename) + u'_副本.pptx'))
conclusion.to_excel(Writer, '索引')
Writer.save()
if save_dstyle:
for dstyle in save_dstyle:
Writer_save[(u'Writer_' + dstyle)].save()
return result
| -7,219,620,354,715,659,000
|
使用帮助
data: 问卷数据,包含交叉变量和所有的因变量
code: 数据编码
cross_class: 交叉变量,单选题或者多选题,例如:Q1
filename:文件名,用于PPT和保存相关数据
cross_list: 需要进行交叉分析的变量,缺省为code中的所有变量
delclass: 交叉变量中需要删除的单个变量,缺省空
plt_dstyle: 绘制图表需要用的数据类型,默认为百分比表,可以选择['TGI'、'CHI'、'TWI']等
save_dstyle: 需要保存的数据类型,格式为列表。
cross_order: 交叉变量中各个类别的顺序,可以缺少
total_display: PPT绘制图表中是否显示总体情况
max_column_chart: 列联表的列数,小于则用柱状图,大于则用条形图
template: PPT模板信息,{'path': 'layouts':}缺省用自带的。
|
reportgen/questionnaire/questionnaire.py
|
cross_chart
|
brightgeng/reportgen
|
python
|
def cross_chart(data, code, cross_class, filename=u'交叉分析报告', cross_qlist=None, delclass=None, plt_dstyle=None, cross_order=None, reverse_display=False, total_display=True, max_column_chart=20, save_dstyle=None, template=None):
"使用帮助\n data: 问卷数据,包含交叉变量和所有的因变量\n code: 数据编码\n cross_class: 交叉变量,单选题或者多选题,例如:Q1\n filename:文件名,用于PPT和保存相关数据\n cross_list: 需要进行交叉分析的变量,缺省为code中的所有变量\n delclass: 交叉变量中需要删除的单个变量,缺省空\n plt_dstyle: 绘制图表需要用的数据类型,默认为百分比表,可以选择['TGI'、'CHI'、'TWI']等\n save_dstyle: 需要保存的数据类型,格式为列表。\n cross_order: 交叉变量中各个类别的顺序,可以缺少\n total_display: PPT绘制图表中是否显示总体情况\n max_column_chart: 列联表的列数,小于则用柱状图,大于则用条形图\n template: PPT模板信息,{'path': 'layouts':}缺省用自带的。\n "
if plt_dstyle:
plt_dstyle = plt_dstyle.upper()
if (not cross_qlist):
try:
cross_qlist = list(sorted(code, key=(lambda c: int(re.findall('\\d+', c)[0]))))
except:
cross_qlist = list(code.keys())
if (cross_class in cross_qlist):
cross_qlist.remove(cross_class)
sample_len = data[code[cross_class]['qlist']].notnull().T.any().sum()
if (code[cross_class]['qtype'] == u'单选题'):
cross_class_freq = data[code[cross_class]['qlist'][0]].value_counts()
cross_class_freq[u'合计'] = cross_class_freq.sum()
cross_class_freq.rename(index=code[cross_class]['code'], inplace=True)
elif (code[cross_class]['qtype'] == u'多选题'):
cross_class_freq = data[code[cross_class]['qlist']].sum()
cross_class_freq[u'合计'] = cross_class_freq.sum()
cross_class_freq.rename(index=code[cross_class]['code'], inplace=True)
elif (code[cross_class]['qtype'] == u'排序题'):
tmp = qtable(data, code, cross_class)
cross_class_freq = tmp['fo'][u'综合']
cross_class_freq[u'合计'] = cross_class_freq.sum()
prs = (rpt.Report(template) if template else rpt.Report())
if (not os.path.exists('.\\out')):
os.mkdir('.\\out')
Writer = pd.ExcelWriter((('.\\out\\' + filename) + u'.xlsx'))
Writer_save = {}
if save_dstyle:
for dstyle in save_dstyle:
Writer_save[(u'Writer_' + dstyle)] = pd.ExcelWriter((((('.\\out\\' + filename) + u'_') + dstyle) + '.xlsx'))
result = {}
cross_columns = list(cross_class_freq.index)
cross_columns = [r for r in cross_columns if (r != u'合计')]
cross_columns = ((['内容', '题型'] + cross_columns) + [u'总体', u'显著性检验'])
conclusion = pd.DataFrame(index=cross_qlist, columns=cross_columns)
conclusion.to_excel(Writer, u'索引')
prs.add_cover(title=filename)
title = u'说明'
summary = (((u'交叉题目为' + cross_class) + u': ') + code[cross_class]['content'])
summary = ((summary + '\n') + u'各类别样本量如下:')
prs.add_slide(data={'data': cross_class_freq, 'slide_type': 'table'}, title=title, summary=summary)
data_column = data[code[cross_class]['qlist']]
for qq in cross_qlist:
qtitle = code[qq]['content']
qlist = code[qq]['qlist']
qtype = code[qq]['qtype']
if (not (set(qlist) <= set(data.columns))):
continue
data_index = data[qlist]
sample_len = data_column.iloc[list(data_index.notnull().T.any()), :].notnull().T.any().sum()
summary = None
if (qtype not in [u'单选题', u'多选题', u'排序题', u'矩阵单选题']):
continue
try:
if reverse_display:
result_t = crosstab(data_column, data_index, code_index=code[cross_class], code_column=code[qq])
else:
result_t = crosstab(data_index, data_column, code_index=code[qq], code_column=code[cross_class])
except:
print('脚本在处理{}时出了一天小问题.....')
continue
if (('fo' in result_t) and ('fop' in result_t)):
t = result_t['fop']
t1 = result_t['fo']
qsample = result_t['sample_size']
else:
continue
if (t is None):
continue
if (cross_order and (not reverse_display)):
if (u'总体' not in cross_order):
cross_order = (cross_order + [u'总体'])
cross_order = [q for q in cross_order if (q in t.columns)]
t = pd.DataFrame(t, columns=cross_order)
t1 = pd.DataFrame(t1, columns=cross_order)
if (cross_order and reverse_display):
cross_order = [q for q in cross_order if (q in t.index)]
t = pd.DataFrame(t, index=cross_order)
t1 = pd.DataFrame(t1, index=cross_order)
"在crosstab中已经重排了\n if 'code_order' in code[qq] and qtype!='矩阵单选题':\n code_order=code[qq]['code_order']\n if reverse_display:\n #code_order=[q for q in code_order if q in t.columns]\n if u'总体' in t1.columns:\n code_order=code_order+[u'总体']\n t=pd.DataFrame(t,columns=code_order)\n t1=pd.DataFrame(t1,columns=code_order)\n else:\n #code_order=[q for q in code_order if q in t.index]\n t=pd.DataFrame(t,index=code_order)\n t1=pd.DataFrame(t1,index=code_order)\n "
t.fillna(0, inplace=True)
t1.fillna(0, inplace=True)
t2 = pd.concat([t, t1], axis=1)
t2.to_excel(Writer, qq, index_label=qq, float_format='%.3f')
Writer_rows = len(t2)
pd.DataFrame(qsample, columns=['样本数']).to_excel(Writer, qq, startrow=(Writer_rows + 2))
Writer_rows += (len(qsample) + 2)
cdata = contingency(t1, alpha=0.05)
result[qq] = cdata
if cdata:
summary = cdata['summary']['summary']
if save_dstyle:
for dstyle in save_dstyle:
cdata[dstyle].to_excel(Writer_save[(u'Writer_' + dstyle)], qq, index_label=qq, float_format='%.2f')
if (qtype in [u'单选题', u'多选题', u'排序题']):
plt_data = (t * 100)
else:
plt_data = t.copy()
if (abs((1 - plt_data.sum())) <= (0.01 + 1e-17)).all():
plt_data = (plt_data * 100)
if ('fw' in result_t):
plt_data = result_t['fw']
if (cross_order and (not reverse_display)):
if (u'总体' not in cross_order):
cross_order = (cross_order + [u'总体'])
cross_order = [q for q in cross_order if (q in plt_data.index)]
plt_data = pd.DataFrame(plt_data, index=cross_order)
plt_data.to_excel(Writer, qq, startrow=(Writer_rows + 2))
Writer_rows += len(plt_data)
if (plt_dstyle and isinstance(cdata, dict) and (plt_dstyle in cdata)):
plt_data = cdata[plt_dstyle]
title = ((((qq + '[') + qtype) + ']: ') + qtitle)
if (not summary):
summary = u'这里是结论区域.'
if ('significant' in cdata):
sing_result = cdata['significant']['result']
sing_pvalue = cdata['significant']['pvalue']
else:
sing_result = (- 2)
sing_pvalue = (- 2)
footnote = u'显著性检验的p值为{:.3f},数据来源于{},样本N={}'.format(sing_pvalue, qq, sample_len)
conclusion.loc[qq, :] = qsample
conclusion.loc[(qq, [u'内容', u'题型'])] = pd.Series({u'内容': code[qq]['content'], u'题型': code[qq]['qtype']})
conclusion.loc[(qq, u'显著性检验')] = sing_result
if ((not total_display) and (u'总体' in plt_data.columns)):
plt_data.drop([u'总体'], axis=1, inplace=True)
if (len(plt_data) > max_column_chart):
prs.add_slide(data={'data': plt_data[::(- 1)], 'slide_type': 'chart', 'type': 'BAR_CLUSTERED'}, title=title, summary=summary, footnote=footnote)
else:
prs.add_slide(data={'data': plt_data, 'slide_type': 'chart', 'type': 'COLUMN_CLUSTERED'}, title=title, summary=summary, footnote=footnote)
if ((qtype == u'排序题') and ('TOP1' in result_t)):
plt_data = (result_t['TOP1'] * 100)
if (cross_order and (not reverse_display)):
if (u'总体' not in cross_order):
cross_order = (cross_order + [u'总体'])
cross_order = [q for q in cross_order if (q in plt_data.columns)]
plt_data = pd.DataFrame(plt_data, columns=cross_order)
if (cross_order and reverse_display):
cross_order = [q for q in cross_order if (q in plt_data.index)]
plt_data = pd.DataFrame(plt_data, index=cross_order)
if ('code_order' in code[qq]):
code_order = code[qq]['code_order']
if reverse_display:
if (u'总体' in t1.columns):
code_order = (code_order + [u'总体'])
plt_data = pd.DataFrame(plt_data, columns=code_order)
else:
plt_data = pd.DataFrame(plt_data, index=code_order)
plt_data.fillna(0, inplace=True)
title = ('[TOP1]' + title)
if (len(plt_data) > max_column_chart):
prs.add_slide(data={'data': plt_data[::(- 1)], 'slide_type': 'chart', 'type': 'BAR_CLUSTERED'}, title=title, summary=summary, footnote=footnote)
else:
prs.add_slide(data={'data': plt_data, 'slide_type': 'chart', 'type': 'COLUMN_CLUSTERED'}, title=title, summary=summary, footnote=footnote)
'\n # ==============小结页=====================\n difference=pd.Series(difference,index=total_qlist_0)\n '
if plt_dstyle:
filename = ((filename + '_') + plt_dstyle)
try:
prs.save((('.\\out\\' + filename) + u'.pptx'))
except:
prs.save((('.\\out\\' + filename) + u'_副本.pptx'))
conclusion.to_excel(Writer, '索引')
Writer.save()
if save_dstyle:
for dstyle in save_dstyle:
Writer_save[(u'Writer_' + dstyle)].save()
return result
|
def onekey_gen(data, code, filename=u'reprotgen 报告自动生成', template=None):
'一键生成所有可能需要的报告\n 包括\n 描述统计报告\n 单选题的交叉分析报告\n '
try:
summary_chart(data, code, filename=filename, template=template)
except:
print('整体报告生成过程中出现错误,将跳过..')
pass
print(('已生成 ' + filename))
cross_qlist = pre_cross_qlist(data, code)
if (len(cross_qlist) == 0):
return None
for cross_qq in cross_qlist:
qq = cross_qq[0]
cross_order = cross_qq[1]
if (('name' in code[qq]) and (code[qq]['name'] != '')):
filename = '{}_差异分析'.format(code[qq]['name'])
else:
filename = '{}_差异分析'.format(qq)
save_dstyle = None
try:
cross_chart(data, code, qq, filename=filename, cross_order=cross_order, save_dstyle=save_dstyle, template=template)
print(('已生成 ' + filename))
except:
print((filename + '生成过程中出现错误,将跳过...'))
pass
return None
| -4,211,264,381,394,803,000
|
一键生成所有可能需要的报告
包括
描述统计报告
单选题的交叉分析报告
|
reportgen/questionnaire/questionnaire.py
|
onekey_gen
|
brightgeng/reportgen
|
python
|
def onekey_gen(data, code, filename=u'reprotgen 报告自动生成', template=None):
'一键生成所有可能需要的报告\n 包括\n 描述统计报告\n 单选题的交叉分析报告\n '
try:
summary_chart(data, code, filename=filename, template=template)
except:
print('整体报告生成过程中出现错误,将跳过..')
pass
print(('已生成 ' + filename))
cross_qlist = pre_cross_qlist(data, code)
if (len(cross_qlist) == 0):
return None
for cross_qq in cross_qlist:
qq = cross_qq[0]
cross_order = cross_qq[1]
if (('name' in code[qq]) and (code[qq]['name'] != )):
filename = '{}_差异分析'.format(code[qq]['name'])
else:
filename = '{}_差异分析'.format(qq)
save_dstyle = None
try:
cross_chart(data, code, qq, filename=filename, cross_order=cross_order, save_dstyle=save_dstyle, template=template)
print(('已生成 ' + filename))
except:
print((filename + '生成过程中出现错误,将跳过...'))
pass
return None
|
def scorpion(data, code, filename='scorpion'):
'天蝎X计划\n 返回一个excel文件\n 1、索引\n 2、各个题目的频数表\n 3、所有可能的交叉分析\n '
if (not os.path.exists('.\\out')):
os.mkdir('.\\out')
Writer = pd.ExcelWriter((('.\\out\\' + filename) + '.xlsx'))
try:
qqlist = list(sorted(code, key=(lambda c: int(re.findall('\\d+', c)[0]))))
except:
qqlist = list(code.keys())
qIndex = pd.DataFrame(index=qqlist, columns=[u'content', u'qtype', u'SampleSize'])
qIndex.to_excel(Writer, u'索引')
Writer_rows = 0
for qq in qqlist:
qtitle = code[qq]['content']
qlist = code[qq]['qlist']
qtype = code[qq]['qtype']
if (not (set(qlist) <= set(data.columns))):
continue
sample_len_qq = data[code[qq]['qlist']].notnull().T.any().sum()
qIndex.loc[(qq, u'content')] = qtitle
qIndex.loc[(qq, u'qtype')] = qtype
qIndex.loc[(qq, u'SampleSize')] = sample_len_qq
if (qtype not in [u'单选题', u'多选题', u'排序题', u'矩阵单选题']):
continue
try:
result_t = table(data[qlist], code=code[qq])
except:
print(u'脚本处理 {} 时出了一点小问题.....'.format(qq))
continue
fop = result_t['fop']
fo = result_t['fo']
if ((qtype == u'排序题') and ('TOPN' in result_t)):
tmp = result_t['TOPN']
tmp[u'综合'] = fo[u'综合']
fo = tmp.copy()
tmp = result_t['TOPN_fo']
tmp[u'综合'] = fop[u'综合']
fop = tmp.copy()
fo_fop = pd.concat([fo, fop], axis=1)
fo_fop.to_excel(Writer, u'频数表', startrow=Writer_rows, startcol=1, index_label=code[qq]['content'], float_format='%.3f')
tmp = pd.DataFrame({'name': [qq]})
tmp.to_excel(Writer, u'频数表', index=False, header=False, startrow=Writer_rows)
Writer_rows += (len(fo_fop) + 3)
qIndex.to_excel(Writer, '索引')
crossAna = pd.DataFrame(columns=['RowVar', 'ColVar', 'SampleSize', 'pvalue', 'significant', 'summary'])
N = 0
qqlist = [qq for qq in qqlist if (code[qq]['qtype'] in ['单选题', '多选题', '矩阵单选题', '排序题'])]
start_time = time.clock()
N_cal = ((len(qqlist) * (len(qqlist) - 1)) * 0.1)
for qq1 in qqlist:
for qq2 in qqlist:
if ((N >= N_cal) and (N < (N_cal + 1.0))):
tmp = ((time.clock() - start_time) * 9)
if (tmp > 60):
print('请耐心等待, 预计还需要{:.1f}秒'.format(tmp))
qtype2 = code[qq2]['qtype']
if ((qq1 == qq2) or (qtype2 not in [u'单选题', u'多选题'])):
continue
data_index = data[code[qq1]['qlist']]
data_column = data[code[qq2]['qlist']]
samplesize = data_column.iloc[list(data_index.notnull().T.any()), :].notnull().T.any().sum()
try:
fo = qtable(data, code, qq1, qq2)['fo']
except:
crossAna.loc[N, :] = [qq1, qq2, samplesize, '', '', '']
N += 1
continue
try:
cdata = contingency(fo, alpha=0.05)
except:
crossAna.loc[N, :] = [qq1, qq2, samplesize, '', '', '']
N += 1
continue
if cdata:
result = cdata['significant']['result']
pvalue = cdata['significant']['pvalue']
summary = cdata['summary']['summary']
else:
result = (- 2)
pvalue = (- 2)
summary = '没有找到结论'
summary = '\n'.join(summary.splitlines()[1:])
if (len(summary) == 0):
summary = '没有找到结论'
crossAna.loc[N, :] = [qq1, qq2, samplesize, pvalue, result, summary]
N += 1
crossAna.to_excel(Writer, '交叉分析表', index=False)
Writer.save()
| -6,475,010,946,498,080,000
|
天蝎X计划
返回一个excel文件
1、索引
2、各个题目的频数表
3、所有可能的交叉分析
|
reportgen/questionnaire/questionnaire.py
|
scorpion
|
brightgeng/reportgen
|
python
|
def scorpion(data, code, filename='scorpion'):
'天蝎X计划\n 返回一个excel文件\n 1、索引\n 2、各个题目的频数表\n 3、所有可能的交叉分析\n '
if (not os.path.exists('.\\out')):
os.mkdir('.\\out')
Writer = pd.ExcelWriter((('.\\out\\' + filename) + '.xlsx'))
try:
qqlist = list(sorted(code, key=(lambda c: int(re.findall('\\d+', c)[0]))))
except:
qqlist = list(code.keys())
qIndex = pd.DataFrame(index=qqlist, columns=[u'content', u'qtype', u'SampleSize'])
qIndex.to_excel(Writer, u'索引')
Writer_rows = 0
for qq in qqlist:
qtitle = code[qq]['content']
qlist = code[qq]['qlist']
qtype = code[qq]['qtype']
if (not (set(qlist) <= set(data.columns))):
continue
sample_len_qq = data[code[qq]['qlist']].notnull().T.any().sum()
qIndex.loc[(qq, u'content')] = qtitle
qIndex.loc[(qq, u'qtype')] = qtype
qIndex.loc[(qq, u'SampleSize')] = sample_len_qq
if (qtype not in [u'单选题', u'多选题', u'排序题', u'矩阵单选题']):
continue
try:
result_t = table(data[qlist], code=code[qq])
except:
print(u'脚本处理 {} 时出了一点小问题.....'.format(qq))
continue
fop = result_t['fop']
fo = result_t['fo']
if ((qtype == u'排序题') and ('TOPN' in result_t)):
tmp = result_t['TOPN']
tmp[u'综合'] = fo[u'综合']
fo = tmp.copy()
tmp = result_t['TOPN_fo']
tmp[u'综合'] = fop[u'综合']
fop = tmp.copy()
fo_fop = pd.concat([fo, fop], axis=1)
fo_fop.to_excel(Writer, u'频数表', startrow=Writer_rows, startcol=1, index_label=code[qq]['content'], float_format='%.3f')
tmp = pd.DataFrame({'name': [qq]})
tmp.to_excel(Writer, u'频数表', index=False, header=False, startrow=Writer_rows)
Writer_rows += (len(fo_fop) + 3)
qIndex.to_excel(Writer, '索引')
crossAna = pd.DataFrame(columns=['RowVar', 'ColVar', 'SampleSize', 'pvalue', 'significant', 'summary'])
N = 0
qqlist = [qq for qq in qqlist if (code[qq]['qtype'] in ['单选题', '多选题', '矩阵单选题', '排序题'])]
start_time = time.clock()
N_cal = ((len(qqlist) * (len(qqlist) - 1)) * 0.1)
for qq1 in qqlist:
for qq2 in qqlist:
if ((N >= N_cal) and (N < (N_cal + 1.0))):
tmp = ((time.clock() - start_time) * 9)
if (tmp > 60):
print('请耐心等待, 预计还需要{:.1f}秒'.format(tmp))
qtype2 = code[qq2]['qtype']
if ((qq1 == qq2) or (qtype2 not in [u'单选题', u'多选题'])):
continue
data_index = data[code[qq1]['qlist']]
data_column = data[code[qq2]['qlist']]
samplesize = data_column.iloc[list(data_index.notnull().T.any()), :].notnull().T.any().sum()
try:
fo = qtable(data, code, qq1, qq2)['fo']
except:
crossAna.loc[N, :] = [qq1, qq2, samplesize, , , ]
N += 1
continue
try:
cdata = contingency(fo, alpha=0.05)
except:
crossAna.loc[N, :] = [qq1, qq2, samplesize, , , ]
N += 1
continue
if cdata:
result = cdata['significant']['result']
pvalue = cdata['significant']['pvalue']
summary = cdata['summary']['summary']
else:
result = (- 2)
pvalue = (- 2)
summary = '没有找到结论'
summary = '\n'.join(summary.splitlines()[1:])
if (len(summary) == 0):
summary = '没有找到结论'
crossAna.loc[N, :] = [qq1, qq2, samplesize, pvalue, result, summary]
N += 1
crossAna.to_excel(Writer, '交叉分析表', index=False)
Writer.save()
|
def timer_callback(self):
' Calculate Mx1, My1, ...... Mx6, My6 '
if (self.t == 0):
self.Phix1 = 0
self.Phiy1 = 0
self.Phix3 = 0
self.Phiy3 = 0
self.t += 1
Mx1 = (self.x3 - self.x1)
My1 = (self.y3 - self.y1)
Mx3 = (self.x1 - self.x3)
My3 = (self.y1 - self.y3)
' Use MLP to Predict control inputs '
relative_pose_1 = [Mx1, My1, self.Phix1, self.Phiy1]
relative_pose_3 = [Mx3, My3, self.Phix3, self.Phiy3]
u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model)
u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model)
self.Phix1 = u3_predicted[0][0]
self.Phiy1 = u3_predicted[0][1]
self.Phix3 = u1_predicted[0][0]
self.Phiy3 = u1_predicted[0][1]
u1_predicted_np = np.array([[u1_predicted[0][0]], [u1_predicted[0][1]]])
u3_predicted_np = np.array([[u3_predicted[0][0]], [u3_predicted[0][1]]])
' Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 '
S1 = np.array([[self.v1], [self.w1]])
G1 = np.array([[1, 0], [0, (1 / L)]])
R1 = np.array([[math.cos(self.Theta1), math.sin(self.Theta1)], [(- math.sin(self.Theta1)), math.cos(self.Theta1)]])
S1 = np.dot(np.dot(G1, R1), u1_predicted_np)
S3 = np.array([[self.v3], [self.w3]])
G3 = np.array([[1, 0], [0, (1 / L)]])
R3 = np.array([[math.cos(self.Theta3), math.sin(self.Theta3)], [(- math.sin(self.Theta3)), math.cos(self.Theta3)]])
S3 = np.dot(np.dot(G3, R3), u3_predicted_np)
' Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 '
D = np.array([[(1 / 2), (1 / 2)], [((- 1) / (2 * d)), (1 / (2 * d))]])
Di = np.linalg.inv(D)
Speed_L1 = np.array([[self.vL1], [self.vR1]])
Speed_L3 = np.array([[self.vL3], [self.vR3]])
M1 = np.array([[S1[0]], [S1[1]]]).reshape(2, 1)
M3 = np.array([[S3[0]], [S3[1]]]).reshape(2, 1)
Speed_L1 = np.dot(Di, M1)
Speed_L3 = np.dot(Di, M3)
VL1 = float(Speed_L1[0])
VR1 = float(Speed_L1[1])
VL3 = float(Speed_L3[0])
VR3 = float(Speed_L3[1])
' Publish Speed Commands to Robot 1 '
msgl1 = Float32()
msgr1 = Float32()
msgl1.data = VL1
msgr1.data = VR1
self.publisher_l1.publish(msgl1)
self.publisher_r1.publish(msgr1)
' Publish Speed Commands to Robot 3 '
msgl3 = Float32()
msgr3 = Float32()
msgl3.data = VL3
msgr3.data = VR3
self.publisher_l3.publish(msgl3)
self.publisher_r3.publish(msgr3)
self.i += 1
| -1,594,632,542,043,036,000
|
Calculate Mx1, My1, ...... Mx6, My6
|
Real Topology Graph/GNN Model 2/Cyclic Graph/test_n2_robot3.py
|
timer_callback
|
HusseinLezzaik/Consensus-Algorithm-for-2-Mobile-Robots
|
python
|
def timer_callback(self):
' '
if (self.t == 0):
self.Phix1 = 0
self.Phiy1 = 0
self.Phix3 = 0
self.Phiy3 = 0
self.t += 1
Mx1 = (self.x3 - self.x1)
My1 = (self.y3 - self.y1)
Mx3 = (self.x1 - self.x3)
My3 = (self.y1 - self.y3)
' Use MLP to Predict control inputs '
relative_pose_1 = [Mx1, My1, self.Phix1, self.Phiy1]
relative_pose_3 = [Mx3, My3, self.Phix3, self.Phiy3]
u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model)
u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model)
self.Phix1 = u3_predicted[0][0]
self.Phiy1 = u3_predicted[0][1]
self.Phix3 = u1_predicted[0][0]
self.Phiy3 = u1_predicted[0][1]
u1_predicted_np = np.array([[u1_predicted[0][0]], [u1_predicted[0][1]]])
u3_predicted_np = np.array([[u3_predicted[0][0]], [u3_predicted[0][1]]])
' Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 '
S1 = np.array([[self.v1], [self.w1]])
G1 = np.array([[1, 0], [0, (1 / L)]])
R1 = np.array([[math.cos(self.Theta1), math.sin(self.Theta1)], [(- math.sin(self.Theta1)), math.cos(self.Theta1)]])
S1 = np.dot(np.dot(G1, R1), u1_predicted_np)
S3 = np.array([[self.v3], [self.w3]])
G3 = np.array([[1, 0], [0, (1 / L)]])
R3 = np.array([[math.cos(self.Theta3), math.sin(self.Theta3)], [(- math.sin(self.Theta3)), math.cos(self.Theta3)]])
S3 = np.dot(np.dot(G3, R3), u3_predicted_np)
' Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 '
D = np.array([[(1 / 2), (1 / 2)], [((- 1) / (2 * d)), (1 / (2 * d))]])
Di = np.linalg.inv(D)
Speed_L1 = np.array([[self.vL1], [self.vR1]])
Speed_L3 = np.array([[self.vL3], [self.vR3]])
M1 = np.array([[S1[0]], [S1[1]]]).reshape(2, 1)
M3 = np.array([[S3[0]], [S3[1]]]).reshape(2, 1)
Speed_L1 = np.dot(Di, M1)
Speed_L3 = np.dot(Di, M3)
VL1 = float(Speed_L1[0])
VR1 = float(Speed_L1[1])
VL3 = float(Speed_L3[0])
VR3 = float(Speed_L3[1])
' Publish Speed Commands to Robot 1 '
msgl1 = Float32()
msgr1 = Float32()
msgl1.data = VL1
msgr1.data = VR1
self.publisher_l1.publish(msgl1)
self.publisher_r1.publish(msgr1)
' Publish Speed Commands to Robot 3 '
msgl3 = Float32()
msgr3 = Float32()
msgl3.data = VL3
msgr3.data = VR3
self.publisher_l3.publish(msgl3)
self.publisher_r3.publish(msgr3)
self.i += 1
|
def get_samplesheet(self):
'Return path of an annotation samplesheet.'
files_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'files'))
samplesheet_name = 'annotation_spreadsheet.xlsm'
return os.path.join(files_path, samplesheet_name)
| -6,796,047,194,800,837,000
|
Return path of an annotation samplesheet.
|
resdk/tests/functional/data_upload/e2e_upload.py
|
get_samplesheet
|
tristanbrown/resolwe-bio-py
|
python
|
def get_samplesheet(self):
files_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'files'))
samplesheet_name = 'annotation_spreadsheet.xlsm'
return os.path.join(files_path, samplesheet_name)
|
def r2(self):
'Calculate R2 for either the train model or the test model'
sse_sst = (self.sse() / self.sst())
return (1 - sse_sst)
| 9,113,696,639,111,318,000
|
Calculate R2 for either the train model or the test model
|
metrics/__init__.py
|
r2
|
nathan-bennett/skellam
|
python
|
def r2(self):
sse_sst = (self.sse() / self.sst())
return (1 - sse_sst)
|
def adjusted_r2(self):
'Calculate adjusted R2 for either the train model or the test model'
r2 = self.r2()
return (1 - (((1 - r2) * (self.train_length - 1)) / ((self.train_length - self.coeff_size) - 1)))
| -1,238,627,807,371,270,000
|
Calculate adjusted R2 for either the train model or the test model
|
metrics/__init__.py
|
adjusted_r2
|
nathan-bennett/skellam
|
python
|
def adjusted_r2(self):
r2 = self.r2()
return (1 - (((1 - r2) * (self.train_length - 1)) / ((self.train_length - self.coeff_size) - 1)))
|
def log_likelihood(self):
'Returns the maximum of the log likelihood function'
return self.max_ll
| -512,140,616,830,212,860
|
Returns the maximum of the log likelihood function
|
metrics/__init__.py
|
log_likelihood
|
nathan-bennett/skellam
|
python
|
def log_likelihood(self):
return self.max_ll
|
def _calculate_lambda(self):
'Create arrays for our predictions of the two Poisson distributions\n '
_lambda0 = ArrayUtils.convert_to_array(np.exp(np.squeeze((self._x0 @ self.lambda_0_coefficients))))
_lambda1 = ArrayUtils.convert_to_array(np.exp(np.squeeze((self._x1 @ self.lambda_1_coefficients))))
return (_lambda0, _lambda1)
| 1,492,520,975,505,939,200
|
Create arrays for our predictions of the two Poisson distributions
|
metrics/__init__.py
|
_calculate_lambda
|
nathan-bennett/skellam
|
python
|
def _calculate_lambda(self):
'\n '
_lambda0 = ArrayUtils.convert_to_array(np.exp(np.squeeze((self._x0 @ self.lambda_0_coefficients))))
_lambda1 = ArrayUtils.convert_to_array(np.exp(np.squeeze((self._x1 @ self.lambda_1_coefficients))))
return (_lambda0, _lambda1)
|
def _calculate_v(self):
'Create diagonal matrix consisting of our predictions of the Poisson distributions\n '
(_lambda0, _lambda1) = self._calculate_lambda()
_v0 = np.diagflat(_lambda0)
_v1 = np.diagflat(_lambda1)
return (_v0, _v1)
| -1,923,027,407,107,608,300
|
Create diagonal matrix consisting of our predictions of the Poisson distributions
|
metrics/__init__.py
|
_calculate_v
|
nathan-bennett/skellam
|
python
|
def _calculate_v(self):
'\n '
(_lambda0, _lambda1) = self._calculate_lambda()
_v0 = np.diagflat(_lambda0)
_v1 = np.diagflat(_lambda1)
return (_v0, _v1)
|
def _calculate_w(self):
'Create a diagonal matrix consisting of the difference between our predictions of the 2 Poisson distributions\n with their observed values\n '
(_lambda0, _lambda1) = self._calculate_lambda()
_w0 = np.diagflat(((self.l0 - _lambda0.reshape((- 1), 1)) ** 2))
_w1 = np.diagflat(((self.l1 - _lambda1.reshape((- 1), 1)) ** 2))
return (_w0, _w1)
| -9,050,369,303,904,403,000
|
Create a diagonal matrix consisting of the difference between our predictions of the 2 Poisson distributions
with their observed values
|
metrics/__init__.py
|
_calculate_w
|
nathan-bennett/skellam
|
python
|
def _calculate_w(self):
'Create a diagonal matrix consisting of the difference between our predictions of the 2 Poisson distributions\n with their observed values\n '
(_lambda0, _lambda1) = self._calculate_lambda()
_w0 = np.diagflat(((self.l0 - _lambda0.reshape((- 1), 1)) ** 2))
_w1 = np.diagflat(((self.l1 - _lambda1.reshape((- 1), 1)) ** 2))
return (_w0, _w1)
|
def _calculate_robust_covariance(self):
'Calculate robust variance covariance matrices for our two sets of coefficients\n '
(_v0, _v1) = self._calculate_v()
(_w0, _w1) = self._calculate_w()
_robust_cov0 = ((np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0)) * np.dot(np.dot(self._x0.T, _w0), self._x0)) * np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0)))
_robust_cov1 = ((np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1)) * np.dot(np.dot(self._x1.T, _w1), self._x1)) * np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1)))
return (_robust_cov0, _robust_cov1)
| -5,582,450,034,621,296,000
|
Calculate robust variance covariance matrices for our two sets of coefficients
|
metrics/__init__.py
|
_calculate_robust_covariance
|
nathan-bennett/skellam
|
python
|
def _calculate_robust_covariance(self):
'\n '
(_v0, _v1) = self._calculate_v()
(_w0, _w1) = self._calculate_w()
_robust_cov0 = ((np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0)) * np.dot(np.dot(self._x0.T, _w0), self._x0)) * np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0)))
_robust_cov1 = ((np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1)) * np.dot(np.dot(self._x1.T, _w1), self._x1)) * np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1)))
return (_robust_cov0, _robust_cov1)
|
def _calculate_robust_standard_errors(self):
'Calculate robust standard errors for our two sets of coefficients by taking the square root of the diagonal\n values in the variance covariance matrices\n '
(_robust_cov0, _robust_cov1) = self._calculate_robust_covariance()
_std_error0 = np.sqrt(np.diag(_robust_cov0))
_std_error1 = np.sqrt(np.diag(_robust_cov1))
return (_std_error0, _std_error1)
| -3,722,754,812,175,301,600
|
Calculate robust standard errors for our two sets of coefficients by taking the square root of the diagonal
values in the variance covariance matrices
|
metrics/__init__.py
|
_calculate_robust_standard_errors
|
nathan-bennett/skellam
|
python
|
def _calculate_robust_standard_errors(self):
'Calculate robust standard errors for our two sets of coefficients by taking the square root of the diagonal\n values in the variance covariance matrices\n '
(_robust_cov0, _robust_cov1) = self._calculate_robust_covariance()
_std_error0 = np.sqrt(np.diag(_robust_cov0))
_std_error1 = np.sqrt(np.diag(_robust_cov1))
return (_std_error0, _std_error1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.