body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def abort_iocb(self, addr, iocbid, err):
'Called when the client or server receives an abort request.'
if _debug:
IOProxyServer._debug('abort_iocb %r %r %r', addr, iocbid, err)
if (not self.localIOCB.has_key(iocbid)):
raise RuntimeError(('no reference to aborting iocb: %r' % (iocbid,)))
iocb = self.localIOCB[iocbid]
del self.localIOCB[iocbid]
iocb.ioState = ABORTED
iocb.ioError = err
iocb.trigger()
| 7,061,142,439,102,379,000
|
Called when the client or server receives an abort request.
|
sandbox/io.py
|
abort_iocb
|
DB-CL/bacpypes
|
python
|
def abort_iocb(self, addr, iocbid, err):
if _debug:
IOProxyServer._debug('abort_iocb %r %r %r', addr, iocbid, err)
if (not self.localIOCB.has_key(iocbid)):
raise RuntimeError(('no reference to aborting iocb: %r' % (iocbid,)))
iocb = self.localIOCB[iocbid]
del self.localIOCB[iocbid]
iocb.ioState = ABORTED
iocb.ioError = err
iocb.trigger()
|
@classmethod
def title(cls):
'Display title'
CLIOutput.empty_line(1)
CLIOutput.center(cls._day.get_title())
| -9,188,701,884,550,395,000
|
Display title
|
cli/day.py
|
title
|
sunarch/woyo
|
python
|
@classmethod
def title(cls):
CLIOutput.empty_line(1)
CLIOutput.center(cls._day.get_title())
|
@classmethod
def new_words(cls, display_in_full=True):
'Display new words section'
regular = list()
phonetic = list()
for unit in cls._day.get_new_words():
regular.append(unit['regular'])
phonetic.append(unit['phonetic'])
if display_in_full:
CLIOutput.section_title('NEW WORDS')
CLIOutput.empty_line(1)
CLIOutput.empty_line(1)
CLIOutput.words_table(regular, phonetic)
| 1,520,179,378,384,501,800
|
Display new words section
|
cli/day.py
|
new_words
|
sunarch/woyo
|
python
|
@classmethod
def new_words(cls, display_in_full=True):
regular = list()
phonetic = list()
for unit in cls._day.get_new_words():
regular.append(unit['regular'])
phonetic.append(unit['phonetic'])
if display_in_full:
CLIOutput.section_title('NEW WORDS')
CLIOutput.empty_line(1)
CLIOutput.empty_line(1)
CLIOutput.words_table(regular, phonetic)
|
@classmethod
def intro_text(cls):
'Display intro text'
parts = cls._day.get_intro_text()
CLIOutput.empty_line(2)
CLIOutput.framed(parts, cls.INTRO_TEXT_WIDTH)
| -2,856,950,738,659,759,000
|
Display intro text
|
cli/day.py
|
intro_text
|
sunarch/woyo
|
python
|
@classmethod
def intro_text(cls):
parts = cls._day.get_intro_text()
CLIOutput.empty_line(2)
CLIOutput.framed(parts, cls.INTRO_TEXT_WIDTH)
|
@classmethod
def _answer_cycle(cls, prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
'Answer cycle'
while True:
CLIOutput.empty_line(1)
(a_type, a_content) = CLIUserInput.get_answer(prompt)
if (a_type == CLIUserInput.TYPE_ANSWER):
if (a_content in answers):
CLIOutput.empty_line(1)
l_pr_answer()
CLIOutput.empty_line(1)
CLIOutput.simple('Correct!')
return True
else:
CLIOutput.warning('Incorrect, try again.')
elif (a_type == CLIUserInput.TYPE_COMMAND):
if (a_content in cls.CMD_WORDS_ALIASES):
cls.new_words(False)
CLIOutput.empty_line(1)
l_pr_question()
elif (a_content in cls.CMD_SKIP_ALIASES):
return True
elif (a_content in cls.CMD_NEXT_ALIASES):
l_next_msg()
return False
elif (a_content in cls.CMD_PREV_ALIASES):
l_prev_msg()
cls._next_action = prev_action
return False
elif (a_content in cls.CMD_EXIT_ALIASES):
cls._next_action = cls.ACTION_EXIT
return False
elif (a_content in cls.CMD_HELP_ALIASES):
cls.help_cmd_in_task()
else:
CLIOutput.warning('Invalid command.')
else:
raise ValueError('Unknown answer type.')
| -3,586,564,492,552,391,000
|
Answer cycle
|
cli/day.py
|
_answer_cycle
|
sunarch/woyo
|
python
|
@classmethod
def _answer_cycle(cls, prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
while True:
CLIOutput.empty_line(1)
(a_type, a_content) = CLIUserInput.get_answer(prompt)
if (a_type == CLIUserInput.TYPE_ANSWER):
if (a_content in answers):
CLIOutput.empty_line(1)
l_pr_answer()
CLIOutput.empty_line(1)
CLIOutput.simple('Correct!')
return True
else:
CLIOutput.warning('Incorrect, try again.')
elif (a_type == CLIUserInput.TYPE_COMMAND):
if (a_content in cls.CMD_WORDS_ALIASES):
cls.new_words(False)
CLIOutput.empty_line(1)
l_pr_question()
elif (a_content in cls.CMD_SKIP_ALIASES):
return True
elif (a_content in cls.CMD_NEXT_ALIASES):
l_next_msg()
return False
elif (a_content in cls.CMD_PREV_ALIASES):
l_prev_msg()
cls._next_action = prev_action
return False
elif (a_content in cls.CMD_EXIT_ALIASES):
cls._next_action = cls.ACTION_EXIT
return False
elif (a_content in cls.CMD_HELP_ALIASES):
cls.help_cmd_in_task()
else:
CLIOutput.warning('Invalid command.')
else:
raise ValueError('Unknown answer type.')
|
@classmethod
def sample_sentences(cls):
"Display 'sample sentences' task"
data = cls._day.get_sample_sentences()
CLIOutput.section_title('SAMPLE SENTENCES')
CLIOutput.empty_line(1)
CLIOutput.simple(data['prompt'])
CLIOutput.empty_line(1)
for sentence in data['sentences']:
CLIOutput.numbered_sentence(sentence['id'], ((sentence['beginning'] + CLIOutput.BLANK) + sentence['end']), CLIOutput.FORMAT_INDENTED)
new_words_extension = cls._day.get_new_words_extension()
CLIOutput.new_words_extension(new_words_extension)
CLIOutput.empty_line(1)
for sentence in data['sentences']:
prompt = '{}. '.format(sentence['id'])
l_pr_question = (lambda : CLIOutput.numbered_sentence(sentence['id'], ((sentence['beginning'] + CLIOutput.BLANK) + sentence['end']), CLIOutput.FORMAT_REGULAR))
answers = list()
answers.append(sentence['answer'])
full_answer = sentence['answer']
if (len(sentence['beginning']) > 0):
full_answer = ((sentence['beginning'] + ' ') + full_answer)
if (len(sentence['end']) > 0):
if (sentence['end'] not in ['.', '!', '?', '?!', '!?']):
full_answer += ' '
full_answer += sentence['end']
l_pr_answer = (lambda : CLIOutput.simple(full_answer))
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = (lambda : CLIOutput.general_message('This is the first task: Starting from the beginning.'))
l_next_msg = (lambda : None)
cls.new_words(False)
CLIOutput.empty_line(1)
l_pr_question()
if (not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg)):
return
| -4,594,590,147,745,163,000
|
Display 'sample sentences' task
|
cli/day.py
|
sample_sentences
|
sunarch/woyo
|
python
|
@classmethod
def sample_sentences(cls):
data = cls._day.get_sample_sentences()
CLIOutput.section_title('SAMPLE SENTENCES')
CLIOutput.empty_line(1)
CLIOutput.simple(data['prompt'])
CLIOutput.empty_line(1)
for sentence in data['sentences']:
CLIOutput.numbered_sentence(sentence['id'], ((sentence['beginning'] + CLIOutput.BLANK) + sentence['end']), CLIOutput.FORMAT_INDENTED)
new_words_extension = cls._day.get_new_words_extension()
CLIOutput.new_words_extension(new_words_extension)
CLIOutput.empty_line(1)
for sentence in data['sentences']:
prompt = '{}. '.format(sentence['id'])
l_pr_question = (lambda : CLIOutput.numbered_sentence(sentence['id'], ((sentence['beginning'] + CLIOutput.BLANK) + sentence['end']), CLIOutput.FORMAT_REGULAR))
answers = list()
answers.append(sentence['answer'])
full_answer = sentence['answer']
if (len(sentence['beginning']) > 0):
full_answer = ((sentence['beginning'] + ' ') + full_answer)
if (len(sentence['end']) > 0):
if (sentence['end'] not in ['.', '!', '?', '?!', '!?']):
full_answer += ' '
full_answer += sentence['end']
l_pr_answer = (lambda : CLIOutput.simple(full_answer))
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = (lambda : CLIOutput.general_message('This is the first task: Starting from the beginning.'))
l_next_msg = (lambda : None)
cls.new_words(False)
CLIOutput.empty_line(1)
l_pr_question()
if (not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg)):
return
|
@classmethod
def definitions(cls):
"Display 'definitions' task"
return
data = cls._day.get_definitions()
CLIOutput.section_title('DEFINITIONS')
CLIOutput.empty_line(1)
CLIOutput.simple(data['prompt'])
CLIOutput.empty_line(1)
for definition in data['definitions']:
CLIOutput.numbered_sentence(definition['id'], definition['text'], CLIOutput.FORMAT_INDENTED)
l_words = (lambda : [CLIOutput.numbered_sentence(word['id'], word['text'], CLIOutput.FORMAT_INDENTED) for word in data['words']])
for definition in data['definitions']:
prompt = '{}. '.format(definition['id'])
l_pr_question = (lambda : CLIOutput.numbered_sentence(definition['id'], definition['text'], CLIOutput.FORMAT_REGULAR))
answers = list()
answer_id = [value for (id, value) in data['answers'] if (id == definition['id'])][0]
answers.append(answer_id)
answer_text = [item['text'] for item in data['words'] if (item['id'] == answer_id)][0]
answers.append(answer_text)
l_pr_answer = (lambda : CLIOutput.numbered_sentence(answer_id, answer_text, CLIOutput.FORMAT_REGULAR))
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = (lambda : None)
l_next_msg = (lambda : None)
CLIOutput.empty_line(2)
l_words()
CLIOutput.empty_line(1)
l_pr_question()
if (not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg)):
return
| -6,911,846,803,249,598,000
|
Display 'definitions' task
|
cli/day.py
|
definitions
|
sunarch/woyo
|
python
|
@classmethod
def definitions(cls):
return
data = cls._day.get_definitions()
CLIOutput.section_title('DEFINITIONS')
CLIOutput.empty_line(1)
CLIOutput.simple(data['prompt'])
CLIOutput.empty_line(1)
for definition in data['definitions']:
CLIOutput.numbered_sentence(definition['id'], definition['text'], CLIOutput.FORMAT_INDENTED)
l_words = (lambda : [CLIOutput.numbered_sentence(word['id'], word['text'], CLIOutput.FORMAT_INDENTED) for word in data['words']])
for definition in data['definitions']:
prompt = '{}. '.format(definition['id'])
l_pr_question = (lambda : CLIOutput.numbered_sentence(definition['id'], definition['text'], CLIOutput.FORMAT_REGULAR))
answers = list()
answer_id = [value for (id, value) in data['answers'] if (id == definition['id'])][0]
answers.append(answer_id)
answer_text = [item['text'] for item in data['words'] if (item['id'] == answer_id)][0]
answers.append(answer_text)
l_pr_answer = (lambda : CLIOutput.numbered_sentence(answer_id, answer_text, CLIOutput.FORMAT_REGULAR))
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = (lambda : None)
l_next_msg = (lambda : None)
CLIOutput.empty_line(2)
l_words()
CLIOutput.empty_line(1)
l_pr_question()
if (not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg)):
return
|
@classmethod
def matching(cls):
"Display 'matching' task"
return
data = cls._day.get_matching()
CLIOutput.section_title(data['name'])
CLIOutput.empty_line(1)
CLIOutput.simple(data['prompt'])
CLIOutput.empty_line(1)
for sentence in data['sentences']:
CLIOutput.numbered_sentence(sentence['id'], sentence['text'], CLIOutput.FORMAT_INDENTED)
l_words = (lambda : [CLIOutput.numbered_sentence(word['id'], word['text'], CLIOutput.FORMAT_INDENTED) for word in data['words']])
for sentence in data['sentences']:
prompt = '{}. '.format(sentence['id'])
l_pr_question = (lambda : CLIOutput.numbered_sentence(sentence['id'], sentence['text'], CLIOutput.FORMAT_REGULAR))
answers = list()
answer_id = [value for (id, value) in data['answers'] if (id == sentence['id'])][0]
answers.append(answer_id)
answer_text = [item['text'] for item in data['words'] if (item['id'] == answer_id)][0]
answers.append(answer_text)
l_pr_answer = (lambda : CLIOutput.numbered_sentence(answer_id, answer_text, CLIOutput.FORMAT_REGULAR))
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = (lambda : None)
l_next_msg = (lambda : None)
CLIOutput.empty_line(2)
l_words()
CLIOutput.empty_line(1)
l_pr_question()
if (not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg)):
return
| 4,291,799,636,644,809,000
|
Display 'matching' task
|
cli/day.py
|
matching
|
sunarch/woyo
|
python
|
@classmethod
def matching(cls):
return
data = cls._day.get_matching()
CLIOutput.section_title(data['name'])
CLIOutput.empty_line(1)
CLIOutput.simple(data['prompt'])
CLIOutput.empty_line(1)
for sentence in data['sentences']:
CLIOutput.numbered_sentence(sentence['id'], sentence['text'], CLIOutput.FORMAT_INDENTED)
l_words = (lambda : [CLIOutput.numbered_sentence(word['id'], word['text'], CLIOutput.FORMAT_INDENTED) for word in data['words']])
for sentence in data['sentences']:
prompt = '{}. '.format(sentence['id'])
l_pr_question = (lambda : CLIOutput.numbered_sentence(sentence['id'], sentence['text'], CLIOutput.FORMAT_REGULAR))
answers = list()
answer_id = [value for (id, value) in data['answers'] if (id == sentence['id'])][0]
answers.append(answer_id)
answer_text = [item['text'] for item in data['words'] if (item['id'] == answer_id)][0]
answers.append(answer_text)
l_pr_answer = (lambda : CLIOutput.numbered_sentence(answer_id, answer_text, CLIOutput.FORMAT_REGULAR))
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = (lambda : None)
l_next_msg = (lambda : None)
CLIOutput.empty_line(2)
l_words()
CLIOutput.empty_line(1)
l_pr_question()
if (not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg)):
return
|
@classmethod
def other_new_words(cls):
'Display other new words section'
data = cls._day.get_other_new_words()
CLIOutput.section_title('OTHER NEW WORDS:')
CLIOutput.empty_line(1)
CLIOutput.simple(data['prompt'])
CLIOutput.empty_line(1)
(a_type, a_content) = CLIUserInput.get_answer('')
CLIOutput.empty_line(1)
| -1,445,370,580,328,307,200
|
Display other new words section
|
cli/day.py
|
other_new_words
|
sunarch/woyo
|
python
|
@classmethod
def other_new_words(cls):
data = cls._day.get_other_new_words()
CLIOutput.section_title('OTHER NEW WORDS:')
CLIOutput.empty_line(1)
CLIOutput.simple(data['prompt'])
CLIOutput.empty_line(1)
(a_type, a_content) = CLIUserInput.get_answer()
CLIOutput.empty_line(1)
|
def read(handle):
'Get output from primersearch into a PrimerSearchOutputRecord.'
record = OutputRecord()
for line in handle:
if (not line.strip()):
continue
elif line.startswith('Primer name'):
name = line.split()[(- 1)]
record.amplifiers[name] = []
elif line.startswith('Amplimer'):
amplifier = Amplifier()
record.amplifiers[name].append(amplifier)
elif line.startswith('\tSequence: '):
amplifier.hit_info = line.replace('\tSequence: ', '')
elif line.startswith('\tAmplimer length: '):
length = line.split()[(- 2)]
amplifier.length = int(length)
else:
amplifier.hit_info += line
for name in record.amplifiers:
for amplifier in record.amplifiers[name]:
amplifier.hit_info = amplifier.hit_info.rstrip()
return record
| -1,677,030,615,956,668,700
|
Get output from primersearch into a PrimerSearchOutputRecord.
|
Bio/Emboss/PrimerSearch.py
|
read
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
python
|
def read(handle):
record = OutputRecord()
for line in handle:
if (not line.strip()):
continue
elif line.startswith('Primer name'):
name = line.split()[(- 1)]
record.amplifiers[name] = []
elif line.startswith('Amplimer'):
amplifier = Amplifier()
record.amplifiers[name].append(amplifier)
elif line.startswith('\tSequence: '):
amplifier.hit_info = line.replace('\tSequence: ', )
elif line.startswith('\tAmplimer length: '):
length = line.split()[(- 2)]
amplifier.length = int(length)
else:
amplifier.hit_info += line
for name in record.amplifiers:
for amplifier in record.amplifiers[name]:
amplifier.hit_info = amplifier.hit_info.rstrip()
return record
|
def add_primer_set(self, primer_name, first_primer_seq, second_primer_seq):
'Add primer information to the record.'
self.primer_info.append((primer_name, first_primer_seq, second_primer_seq))
| -2,234,440,617,279,405,600
|
Add primer information to the record.
|
Bio/Emboss/PrimerSearch.py
|
add_primer_set
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
python
|
def add_primer_set(self, primer_name, first_primer_seq, second_primer_seq):
self.primer_info.append((primer_name, first_primer_seq, second_primer_seq))
|
def _decompose_entangle(cmd):
' Decompose the entangle gate. '
qr = cmd.qubits[0]
eng = cmd.engine
with Control(eng, cmd.control_qubits):
(H | qr[0])
with Control(eng, qr[0]):
(All(X) | qr[1:])
| -1,206,310,081,312,765,700
|
Decompose the entangle gate.
|
projectq/setups/decompositions/entangle.py
|
_decompose_entangle
|
VirtueQuantumCloud/Ex
|
python
|
def _decompose_entangle(cmd):
' '
qr = cmd.qubits[0]
eng = cmd.engine
with Control(eng, cmd.control_qubits):
(H | qr[0])
with Control(eng, qr[0]):
(All(X) | qr[1:])
|
def predict(self, params, exog=None, exog_precision=None, which='mean'):
'Predict values for mean or precision\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for precision parameter.\n which : str\n\n - "mean" : mean, conditional expectation E(endog | exog)\n - "precision" : predicted precision\n - "linear" : linear predictor for the mean function\n - "linear-precision" : linear predictor for the precision parameter\n\n Returns\n -------\n ndarray, predicted values\n '
if (which == 'linpred'):
which = 'linear'
if (which in ['linpred_precision', 'linear_precision']):
which = 'linear-precision'
k_mean = self.exog.shape[1]
if (which in ['mean', 'linear']):
if (exog is None):
exog = self.exog
params_mean = params[:k_mean]
linpred = np.dot(exog, params_mean)
if (which == 'mean'):
mu = self.link.inverse(linpred)
res = mu
else:
res = linpred
elif (which in ['precision', 'linear-precision']):
if (exog_precision is None):
exog_precision = self.exog_precision
params_prec = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_prec)
if (which == 'precision'):
phi = self.link_precision.inverse(linpred_prec)
res = phi
else:
res = linpred_prec
elif (which == 'var'):
res = self._predict_var(params, exog=exog, exog_precision=exog_precision)
else:
raise ValueError(('which = %s is not available' % which))
return res
| -7,767,475,233,672,883,000
|
Predict values for mean or precision
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision parameter.
which : str
- "mean" : mean, conditional expectation E(endog | exog)
- "precision" : predicted precision
- "linear" : linear predictor for the mean function
- "linear-precision" : linear predictor for the precision parameter
Returns
-------
ndarray, predicted values
|
statsmodels/othermod/betareg.py
|
predict
|
EC-AI/statsmodels
|
python
|
def predict(self, params, exog=None, exog_precision=None, which='mean'):
'Predict values for mean or precision\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for precision parameter.\n which : str\n\n - "mean" : mean, conditional expectation E(endog | exog)\n - "precision" : predicted precision\n - "linear" : linear predictor for the mean function\n - "linear-precision" : linear predictor for the precision parameter\n\n Returns\n -------\n ndarray, predicted values\n '
if (which == 'linpred'):
which = 'linear'
if (which in ['linpred_precision', 'linear_precision']):
which = 'linear-precision'
k_mean = self.exog.shape[1]
if (which in ['mean', 'linear']):
if (exog is None):
exog = self.exog
params_mean = params[:k_mean]
linpred = np.dot(exog, params_mean)
if (which == 'mean'):
mu = self.link.inverse(linpred)
res = mu
else:
res = linpred
elif (which in ['precision', 'linear-precision']):
if (exog_precision is None):
exog_precision = self.exog_precision
params_prec = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_prec)
if (which == 'precision'):
phi = self.link_precision.inverse(linpred_prec)
res = phi
else:
res = linpred_prec
elif (which == 'var'):
res = self._predict_var(params, exog=exog, exog_precision=exog_precision)
else:
raise ValueError(('which = %s is not available' % which))
return res
|
def _predict_precision(self, params, exog_precision=None):
'Predict values for precision function for given exog_precision.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog_precision : array_like\n Array of predictor variables for precision.\n\n Returns\n -------\n Predicted precision.\n '
if (exog_precision is None):
exog_precision = self.exog_precision
k_mean = self.exog.shape[1]
params_precision = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_precision)
phi = self.link_precision.inverse(linpred_prec)
return phi
| 5,514,060,033,690,942,000
|
Predict values for precision function for given exog_precision.
Parameters
----------
params : array_like
The model parameters.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted precision.
|
statsmodels/othermod/betareg.py
|
_predict_precision
|
EC-AI/statsmodels
|
python
|
def _predict_precision(self, params, exog_precision=None):
'Predict values for precision function for given exog_precision.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog_precision : array_like\n Array of predictor variables for precision.\n\n Returns\n -------\n Predicted precision.\n '
if (exog_precision is None):
exog_precision = self.exog_precision
k_mean = self.exog.shape[1]
params_precision = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_precision)
phi = self.link_precision.inverse(linpred_prec)
return phi
|
def _predict_var(self, params, exog=None, exog_precision=None):
'predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for precision.\n\n Returns\n -------\n Predicted conditional variance.\n '
mean = self.predict(params, exog=exog)
precision = self._predict_precision(params, exog_precision=exog_precision)
var_endog = ((mean * (1 - mean)) / (1 + precision))
return var_endog
| 755,736,759,623,645,600
|
predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted conditional variance.
|
statsmodels/othermod/betareg.py
|
_predict_var
|
EC-AI/statsmodels
|
python
|
def _predict_var(self, params, exog=None, exog_precision=None):
'predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for precision.\n\n Returns\n -------\n Predicted conditional variance.\n '
mean = self.predict(params, exog=exog)
precision = self._predict_precision(params, exog_precision=exog_precision)
var_endog = ((mean * (1 - mean)) / (1 + precision))
return var_endog
|
def loglikeobs(self, params):
'\n Loglikelihood for observations of the Beta regressionmodel.\n\n Parameters\n ----------\n params : ndarray\n The parameters of the model, coefficients for linear predictors\n of the mean and of the precision function.\n\n Returns\n -------\n loglike : ndarray\n The log likelihood for each observation of the model evaluated\n at `params`.\n '
return self._llobs(self.endog, self.exog, self.exog_precision, params)
| 2,338,312,586,881,387,000
|
Loglikelihood for observations of the Beta regressionmodel.
Parameters
----------
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
|
statsmodels/othermod/betareg.py
|
loglikeobs
|
EC-AI/statsmodels
|
python
|
def loglikeobs(self, params):
'\n Loglikelihood for observations of the Beta regressionmodel.\n\n Parameters\n ----------\n params : ndarray\n The parameters of the model, coefficients for linear predictors\n of the mean and of the precision function.\n\n Returns\n -------\n loglike : ndarray\n The log likelihood for each observation of the model evaluated\n at `params`.\n '
return self._llobs(self.endog, self.exog, self.exog_precision, params)
|
def _llobs(self, endog, exog, exog_precision, params):
'\n Loglikelihood for observations with data arguments.\n\n Parameters\n ----------\n endog : ndarray\n 1d array of endogenous variable.\n exog : ndarray\n 2d array of explanatory variables.\n exog_precision : ndarray\n 2d array of explanatory variables for precision.\n params : ndarray\n The parameters of the model, coefficients for linear predictors\n of the mean and of the precision function.\n\n Returns\n -------\n loglike : ndarray\n The log likelihood for each observation of the model evaluated\n at `params`.\n '
(y, X, Z) = (endog, exog, exog_precision)
nz = Z.shape[1]
params_mean = params[:(- nz)]
params_prec = params[(- nz):]
linpred = np.dot(X, params_mean)
linpred_prec = np.dot(Z, params_prec)
mu = self.link.inverse(linpred)
phi = self.link_precision.inverse(linpred_prec)
eps_lb = 1e-200
alpha = np.clip((mu * phi), eps_lb, np.inf)
beta = np.clip(((1 - mu) * phi), eps_lb, np.inf)
ll = ((((lgamma(phi) - lgamma(alpha)) - lgamma(beta)) + (((mu * phi) - 1) * np.log(y))) + ((((1 - mu) * phi) - 1) * np.log((1 - y))))
return ll
| -8,845,710,265,055,055,000
|
Loglikelihood for observations with data arguments.
Parameters
----------
endog : ndarray
1d array of endogenous variable.
exog : ndarray
2d array of explanatory variables.
exog_precision : ndarray
2d array of explanatory variables for precision.
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
|
statsmodels/othermod/betareg.py
|
_llobs
|
EC-AI/statsmodels
|
python
|
def _llobs(self, endog, exog, exog_precision, params):
'\n Loglikelihood for observations with data arguments.\n\n Parameters\n ----------\n endog : ndarray\n 1d array of endogenous variable.\n exog : ndarray\n 2d array of explanatory variables.\n exog_precision : ndarray\n 2d array of explanatory variables for precision.\n params : ndarray\n The parameters of the model, coefficients for linear predictors\n of the mean and of the precision function.\n\n Returns\n -------\n loglike : ndarray\n The log likelihood for each observation of the model evaluated\n at `params`.\n '
(y, X, Z) = (endog, exog, exog_precision)
nz = Z.shape[1]
params_mean = params[:(- nz)]
params_prec = params[(- nz):]
linpred = np.dot(X, params_mean)
linpred_prec = np.dot(Z, params_prec)
mu = self.link.inverse(linpred)
phi = self.link_precision.inverse(linpred_prec)
eps_lb = 1e-200
alpha = np.clip((mu * phi), eps_lb, np.inf)
beta = np.clip(((1 - mu) * phi), eps_lb, np.inf)
ll = ((((lgamma(phi) - lgamma(alpha)) - lgamma(beta)) + (((mu * phi) - 1) * np.log(y))) + ((((1 - mu) * phi) - 1) * np.log((1 - y))))
return ll
|
def score(self, params):
'\n Returns the score vector of the log-likelihood.\n\n http://www.tandfonline.com/doi/pdf/10.1080/00949650903389993\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score : ndarray\n First derivative of loglikelihood function.\n '
(sf1, sf2) = self.score_factor(params)
d1 = np.dot(sf1, self.exog)
d2 = np.dot(sf2, self.exog_precision)
return np.concatenate((d1, d2))
| 6,804,556,562,903,432,000
|
Returns the score vector of the log-likelihood.
http://www.tandfonline.com/doi/pdf/10.1080/00949650903389993
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score : ndarray
First derivative of loglikelihood function.
|
statsmodels/othermod/betareg.py
|
score
|
EC-AI/statsmodels
|
python
|
def score(self, params):
'\n Returns the score vector of the log-likelihood.\n\n http://www.tandfonline.com/doi/pdf/10.1080/00949650903389993\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score : ndarray\n First derivative of loglikelihood function.\n '
(sf1, sf2) = self.score_factor(params)
d1 = np.dot(sf1, self.exog)
d2 = np.dot(sf2, self.exog_precision)
return np.concatenate((d1, d2))
|
def _score_check(self, params):
'Inherited score with finite differences\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score based on numerical derivatives\n '
return super(BetaModel, self).score(params)
| -8,920,612,915,008,268,000
|
Inherited score with finite differences
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score based on numerical derivatives
|
statsmodels/othermod/betareg.py
|
_score_check
|
EC-AI/statsmodels
|
python
|
def _score_check(self, params):
'Inherited score with finite differences\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score based on numerical derivatives\n '
return super(BetaModel, self).score(params)
|
def score_factor(self, params, endog=None):
'Derivative of loglikelihood function w.r.t. linear predictors.\n\n This needs to be multiplied with the exog to obtain the score_obs.\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score_factor : ndarray, 2-D\n A 2d weight vector used in the calculation of the score_obs.\n\n Notes\n -----\n The score_obs can be obtained from score_factor ``sf`` using\n\n - d1 = sf[:, :1] * exog\n - d2 = sf[:, 1:2] * exog_precision\n\n '
from scipy import special
digamma = special.psi
y = (self.endog if (endog is None) else endog)
(X, Z) = (self.exog, self.exog_precision)
nz = Z.shape[1]
Xparams = params[:(- nz)]
Zparams = params[(- nz):]
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
eps_lb = 1e-200
alpha = np.clip((mu * phi), eps_lb, np.inf)
beta = np.clip(((1 - mu) * phi), eps_lb, np.inf)
ystar = np.log((y / (1.0 - y)))
dig_beta = digamma(beta)
mustar = (digamma(alpha) - dig_beta)
yt = np.log((1 - y))
mut = (dig_beta - digamma(phi))
t = (1.0 / self.link.deriv(mu))
h = (1.0 / self.link_precision.deriv(phi))
sf1 = ((phi * t) * (ystar - mustar))
sf2 = (h * (((mu * (ystar - mustar)) + yt) - mut))
return (sf1, sf2)
| -6,567,238,699,126,569,000
|
Derivative of loglikelihood function w.r.t. linear predictors.
This needs to be multiplied with the exog to obtain the score_obs.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
Notes
-----
The score_obs can be obtained from score_factor ``sf`` using
- d1 = sf[:, :1] * exog
- d2 = sf[:, 1:2] * exog_precision
|
statsmodels/othermod/betareg.py
|
score_factor
|
EC-AI/statsmodels
|
python
|
def score_factor(self, params, endog=None):
'Derivative of loglikelihood function w.r.t. linear predictors.\n\n This needs to be multiplied with the exog to obtain the score_obs.\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score_factor : ndarray, 2-D\n A 2d weight vector used in the calculation of the score_obs.\n\n Notes\n -----\n The score_obs can be obtained from score_factor ``sf`` using\n\n - d1 = sf[:, :1] * exog\n - d2 = sf[:, 1:2] * exog_precision\n\n '
from scipy import special
digamma = special.psi
y = (self.endog if (endog is None) else endog)
(X, Z) = (self.exog, self.exog_precision)
nz = Z.shape[1]
Xparams = params[:(- nz)]
Zparams = params[(- nz):]
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
eps_lb = 1e-200
alpha = np.clip((mu * phi), eps_lb, np.inf)
beta = np.clip(((1 - mu) * phi), eps_lb, np.inf)
ystar = np.log((y / (1.0 - y)))
dig_beta = digamma(beta)
mustar = (digamma(alpha) - dig_beta)
yt = np.log((1 - y))
mut = (dig_beta - digamma(phi))
t = (1.0 / self.link.deriv(mu))
h = (1.0 / self.link_precision.deriv(phi))
sf1 = ((phi * t) * (ystar - mustar))
sf2 = (h * (((mu * (ystar - mustar)) + yt) - mut))
return (sf1, sf2)
|
def score_hessian_factor(self, params, return_hessian=False, observed=True):
'Derivatives of loglikelihood function w.r.t. linear predictors.\n\n This calculates score and hessian factors at the same time, because\n there is a large overlap in calculations.\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n return_hessian : bool\n If False, then only score_factors are returned\n If True, the both score and hessian factors are returned\n observed : bool\n If True, then the observed Hessian is returned (default).\n If False, then the expected information matrix is returned.\n\n Returns\n -------\n score_factor : ndarray, 2-D\n A 2d weight vector used in the calculation of the score_obs.\n (-jbb, -jbg, -jgg) : tuple\n A tuple with 3 hessian factors, corresponding to the upper\n triangle of the Hessian matrix.\n TODO: check why there are minus\n '
from scipy import special
digamma = special.psi
(y, X, Z) = (self.endog, self.exog, self.exog_precision)
nz = Z.shape[1]
Xparams = params[:(- nz)]
Zparams = params[(- nz):]
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
eps_lb = 1e-200
alpha = np.clip((mu * phi), eps_lb, np.inf)
beta = np.clip(((1 - mu) * phi), eps_lb, np.inf)
ystar = np.log((y / (1.0 - y)))
dig_beta = digamma(beta)
mustar = (digamma(alpha) - dig_beta)
yt = np.log((1 - y))
mut = (dig_beta - digamma(phi))
t = (1.0 / self.link.deriv(mu))
h = (1.0 / self.link_precision.deriv(phi))
ymu_star = (ystar - mustar)
sf1 = ((phi * t) * ymu_star)
sf2 = (h * (((mu * ymu_star) + yt) - mut))
if return_hessian:
trigamma = (lambda x: special.polygamma(1, x))
trig_beta = trigamma(beta)
var_star = (trigamma(alpha) + trig_beta)
var_t = (trig_beta - trigamma(phi))
c = (- trig_beta)
s = self.link.deriv2(mu)
q = self.link_precision.deriv2(phi)
jbb = ((phi * t) * var_star)
if observed:
jbb += ((s * (t ** 2)) * ymu_star)
jbb *= (t * phi)
jbg = (((phi * t) * h) * ((mu * var_star) + c))
if observed:
jbg -= ((ymu_star * t) * h)
jgg = ((h ** 2) * ((((mu ** 2) * var_star) + ((2 * mu) * c)) + var_t))
if observed:
jgg += (((((mu * ymu_star) + yt) - mut) * q) * (h ** 3))
return ((sf1, sf2), ((- jbb), (- jbg), (- jgg)))
else:
return (sf1, sf2)
| 5,840,432,985,829,507,000
|
Derivatives of loglikelihood function w.r.t. linear predictors.
This calculates score and hessian factors at the same time, because
there is a large overlap in calculations.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
return_hessian : bool
If False, then only score_factors are returned
If True, the both score and hessian factors are returned
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
(-jbb, -jbg, -jgg) : tuple
A tuple with 3 hessian factors, corresponding to the upper
triangle of the Hessian matrix.
TODO: check why there are minus
|
statsmodels/othermod/betareg.py
|
score_hessian_factor
|
EC-AI/statsmodels
|
python
|
def score_hessian_factor(self, params, return_hessian=False, observed=True):
'Derivatives of loglikelihood function w.r.t. linear predictors.\n\n This calculates score and hessian factors at the same time, because\n there is a large overlap in calculations.\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n return_hessian : bool\n If False, then only score_factors are returned\n If True, the both score and hessian factors are returned\n observed : bool\n If True, then the observed Hessian is returned (default).\n If False, then the expected information matrix is returned.\n\n Returns\n -------\n score_factor : ndarray, 2-D\n A 2d weight vector used in the calculation of the score_obs.\n (-jbb, -jbg, -jgg) : tuple\n A tuple with 3 hessian factors, corresponding to the upper\n triangle of the Hessian matrix.\n TODO: check why there are minus\n '
from scipy import special
digamma = special.psi
(y, X, Z) = (self.endog, self.exog, self.exog_precision)
nz = Z.shape[1]
Xparams = params[:(- nz)]
Zparams = params[(- nz):]
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
eps_lb = 1e-200
alpha = np.clip((mu * phi), eps_lb, np.inf)
beta = np.clip(((1 - mu) * phi), eps_lb, np.inf)
ystar = np.log((y / (1.0 - y)))
dig_beta = digamma(beta)
mustar = (digamma(alpha) - dig_beta)
yt = np.log((1 - y))
mut = (dig_beta - digamma(phi))
t = (1.0 / self.link.deriv(mu))
h = (1.0 / self.link_precision.deriv(phi))
ymu_star = (ystar - mustar)
sf1 = ((phi * t) * ymu_star)
sf2 = (h * (((mu * ymu_star) + yt) - mut))
if return_hessian:
trigamma = (lambda x: special.polygamma(1, x))
trig_beta = trigamma(beta)
var_star = (trigamma(alpha) + trig_beta)
var_t = (trig_beta - trigamma(phi))
c = (- trig_beta)
s = self.link.deriv2(mu)
q = self.link_precision.deriv2(phi)
jbb = ((phi * t) * var_star)
if observed:
jbb += ((s * (t ** 2)) * ymu_star)
jbb *= (t * phi)
jbg = (((phi * t) * h) * ((mu * var_star) + c))
if observed:
jbg -= ((ymu_star * t) * h)
jgg = ((h ** 2) * ((((mu ** 2) * var_star) + ((2 * mu) * c)) + var_t))
if observed:
jgg += (((((mu * ymu_star) + yt) - mut) * q) * (h ** 3))
return ((sf1, sf2), ((- jbb), (- jbg), (- jgg)))
else:
return (sf1, sf2)
|
def score_obs(self, params):
'\n Score, first derivative of the loglikelihood for each observation.\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score_obs : ndarray, 2d\n The first derivative of the loglikelihood function evaluated at\n params for each observation.\n '
(sf1, sf2) = self.score_factor(params)
d1 = (sf1[:, None] * self.exog)
d2 = (sf2[:, None] * self.exog_precision)
return np.column_stack((d1, d2))
| 3,554,911,380,494,363,600
|
Score, first derivative of the loglikelihood for each observation.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_obs : ndarray, 2d
The first derivative of the loglikelihood function evaluated at
params for each observation.
|
statsmodels/othermod/betareg.py
|
score_obs
|
EC-AI/statsmodels
|
python
|
def score_obs(self, params):
'\n Score, first derivative of the loglikelihood for each observation.\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score_obs : ndarray, 2d\n The first derivative of the loglikelihood function evaluated at\n params for each observation.\n '
(sf1, sf2) = self.score_factor(params)
d1 = (sf1[:, None] * self.exog)
d2 = (sf2[:, None] * self.exog_precision)
return np.column_stack((d1, d2))
|
def hessian(self, params, observed=None):
'Hessian, second derivative of loglikelihood function\n\n Parameters\n ----------\n params : ndarray\n Parameter at which Hessian is evaluated.\n observed : bool\n If True, then the observed Hessian is returned (default).\n If False, then the expected information matrix is returned.\n\n Returns\n -------\n hessian : ndarray\n Hessian, i.e. observed information, or expected information matrix.\n '
if (self.hess_type == 'eim'):
observed = False
else:
observed = True
(_, hf) = self.score_hessian_factor(params, return_hessian=True, observed=observed)
(hf11, hf12, hf22) = hf
d11 = (self.exog.T * hf11).dot(self.exog)
d12 = (self.exog.T * hf12).dot(self.exog_precision)
d22 = (self.exog_precision.T * hf22).dot(self.exog_precision)
return np.block([[d11, d12], [d12.T, d22]])
| -7,365,549,943,060,540,000
|
Hessian, second derivative of loglikelihood function
Parameters
----------
params : ndarray
Parameter at which Hessian is evaluated.
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
hessian : ndarray
Hessian, i.e. observed information, or expected information matrix.
|
statsmodels/othermod/betareg.py
|
hessian
|
EC-AI/statsmodels
|
python
|
def hessian(self, params, observed=None):
'Hessian, second derivative of loglikelihood function\n\n Parameters\n ----------\n params : ndarray\n Parameter at which Hessian is evaluated.\n observed : bool\n If True, then the observed Hessian is returned (default).\n If False, then the expected information matrix is returned.\n\n Returns\n -------\n hessian : ndarray\n Hessian, i.e. observed information, or expected information matrix.\n '
if (self.hess_type == 'eim'):
observed = False
else:
observed = True
(_, hf) = self.score_hessian_factor(params, return_hessian=True, observed=observed)
(hf11, hf12, hf22) = hf
d11 = (self.exog.T * hf11).dot(self.exog)
d12 = (self.exog.T * hf12).dot(self.exog_precision)
d22 = (self.exog_precision.T * hf22).dot(self.exog_precision)
return np.block([[d11, d12], [d12.T, d22]])
|
def hessian_factor(self, params, observed=True):
'Derivatives of loglikelihood function w.r.t. linear predictors.\n '
(_, hf) = self.score_hessian_factor(params, return_hessian=True, observed=observed)
return hf
| 1,568,852,675,676,506,000
|
Derivatives of loglikelihood function w.r.t. linear predictors.
|
statsmodels/othermod/betareg.py
|
hessian_factor
|
EC-AI/statsmodels
|
python
|
def hessian_factor(self, params, observed=True):
'\n '
(_, hf) = self.score_hessian_factor(params, return_hessian=True, observed=observed)
return hf
|
def _start_params(self, niter=2, return_intermediate=False):
'find starting values\n\n Parameters\n ----------\n niter : int\n Number of iterations of WLS approximation\n return_intermediate : bool\n If False (default), then only the preliminary parameter estimate\n will be returned.\n If True, then also the two results instances of the WLS estimate\n for mean parameters and for the precision parameters will be\n returned.\n\n Returns\n -------\n sp : ndarray\n start parameters for the optimization\n res_m2 : results instance (optional)\n Results instance for the WLS regression of the mean function.\n res_p2 : results instance (optional)\n Results instance for the WLS regression of the precision function.\n\n Notes\n -----\n This calculates a few iteration of weighted least squares. This is not\n a full scoring algorithm.\n '
from statsmodels.regression.linear_model import OLS, WLS
res_m = OLS(self.link(self.endog), self.exog).fit()
fitted = self.link.inverse(res_m.fittedvalues)
resid = (self.endog - fitted)
prec_i = (((fitted * (1 - fitted)) / (np.maximum(np.abs(resid), 0.01) ** 2)) - 1)
res_p = OLS(self.link_precision(prec_i), self.exog_precision).fit()
prec_fitted = self.link_precision.inverse(res_p.fittedvalues)
for _ in range(niter):
y_var_inv = ((1 + prec_fitted) / (fitted * (1 - fitted)))
ylink_var_inv = (y_var_inv / (self.link.deriv(fitted) ** 2))
res_m2 = WLS(self.link(self.endog), self.exog, weights=ylink_var_inv).fit()
fitted = self.link.inverse(res_m2.fittedvalues)
resid2 = (self.endog - fitted)
prec_i2 = (((fitted * (1 - fitted)) / (np.maximum(np.abs(resid2), 0.01) ** 2)) - 1)
w_p = (1.0 / (self.link_precision.deriv(prec_fitted) ** 2))
res_p2 = WLS(self.link_precision(prec_i2), self.exog_precision, weights=w_p).fit()
prec_fitted = self.link_precision.inverse(res_p2.fittedvalues)
sp2 = np.concatenate((res_m2.params, res_p2.params))
if return_intermediate:
return (sp2, res_m2, res_p2)
return sp2
| 1,173,509,138,294,590,200
|
find starting values
Parameters
----------
niter : int
Number of iterations of WLS approximation
return_intermediate : bool
If False (default), then only the preliminary parameter estimate
will be returned.
If True, then also the two results instances of the WLS estimate
for mean parameters and for the precision parameters will be
returned.
Returns
-------
sp : ndarray
start parameters for the optimization
res_m2 : results instance (optional)
Results instance for the WLS regression of the mean function.
res_p2 : results instance (optional)
Results instance for the WLS regression of the precision function.
Notes
-----
This calculates a few iteration of weighted least squares. This is not
a full scoring algorithm.
|
statsmodels/othermod/betareg.py
|
_start_params
|
EC-AI/statsmodels
|
python
|
def _start_params(self, niter=2, return_intermediate=False):
'find starting values\n\n Parameters\n ----------\n niter : int\n Number of iterations of WLS approximation\n return_intermediate : bool\n If False (default), then only the preliminary parameter estimate\n will be returned.\n If True, then also the two results instances of the WLS estimate\n for mean parameters and for the precision parameters will be\n returned.\n\n Returns\n -------\n sp : ndarray\n start parameters for the optimization\n res_m2 : results instance (optional)\n Results instance for the WLS regression of the mean function.\n res_p2 : results instance (optional)\n Results instance for the WLS regression of the precision function.\n\n Notes\n -----\n This calculates a few iteration of weighted least squares. This is not\n a full scoring algorithm.\n '
from statsmodels.regression.linear_model import OLS, WLS
res_m = OLS(self.link(self.endog), self.exog).fit()
fitted = self.link.inverse(res_m.fittedvalues)
resid = (self.endog - fitted)
prec_i = (((fitted * (1 - fitted)) / (np.maximum(np.abs(resid), 0.01) ** 2)) - 1)
res_p = OLS(self.link_precision(prec_i), self.exog_precision).fit()
prec_fitted = self.link_precision.inverse(res_p.fittedvalues)
for _ in range(niter):
y_var_inv = ((1 + prec_fitted) / (fitted * (1 - fitted)))
ylink_var_inv = (y_var_inv / (self.link.deriv(fitted) ** 2))
res_m2 = WLS(self.link(self.endog), self.exog, weights=ylink_var_inv).fit()
fitted = self.link.inverse(res_m2.fittedvalues)
resid2 = (self.endog - fitted)
prec_i2 = (((fitted * (1 - fitted)) / (np.maximum(np.abs(resid2), 0.01) ** 2)) - 1)
w_p = (1.0 / (self.link_precision.deriv(prec_fitted) ** 2))
res_p2 = WLS(self.link_precision(prec_i2), self.exog_precision, weights=w_p).fit()
prec_fitted = self.link_precision.inverse(res_p2.fittedvalues)
sp2 = np.concatenate((res_m2.params, res_p2.params))
if return_intermediate:
return (sp2, res_m2, res_p2)
return sp2
|
def fit(self, start_params=None, maxiter=1000, disp=False, method='bfgs', **kwds):
'\n Fit the model by maximum likelihood.\n\n Parameters\n ----------\n start_params : array-like\n A vector of starting values for the regression\n coefficients. If None, a default is chosen.\n maxiter : integer\n The maximum number of iterations\n disp : bool\n Show convergence stats.\n method : str\n The optimization method to use.\n kwds :\n Keyword arguments for the optimizer.\n\n Returns\n -------\n BetaResults instance.\n '
if (start_params is None):
start_params = self._start_params()
if ('cov_type' in kwds):
if (kwds['cov_type'].lower() == 'eim'):
self.hess_type = 'eim'
del kwds['cov_type']
else:
self.hess_type = 'oim'
res = super(BetaModel, self).fit(start_params=start_params, maxiter=maxiter, method=method, disp=disp, **kwds)
if (not isinstance(res, BetaResultsWrapper)):
res = BetaResultsWrapper(res)
return res
| -1,019,944,970,131,715,300
|
Fit the model by maximum likelihood.
Parameters
----------
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
maxiter : integer
The maximum number of iterations
disp : bool
Show convergence stats.
method : str
The optimization method to use.
kwds :
Keyword arguments for the optimizer.
Returns
-------
BetaResults instance.
|
statsmodels/othermod/betareg.py
|
fit
|
EC-AI/statsmodels
|
python
|
def fit(self, start_params=None, maxiter=1000, disp=False, method='bfgs', **kwds):
'\n Fit the model by maximum likelihood.\n\n Parameters\n ----------\n start_params : array-like\n A vector of starting values for the regression\n coefficients. If None, a default is chosen.\n maxiter : integer\n The maximum number of iterations\n disp : bool\n Show convergence stats.\n method : str\n The optimization method to use.\n kwds :\n Keyword arguments for the optimizer.\n\n Returns\n -------\n BetaResults instance.\n '
if (start_params is None):
start_params = self._start_params()
if ('cov_type' in kwds):
if (kwds['cov_type'].lower() == 'eim'):
self.hess_type = 'eim'
del kwds['cov_type']
else:
self.hess_type = 'oim'
res = super(BetaModel, self).fit(start_params=start_params, maxiter=maxiter, method=method, disp=disp, **kwds)
if (not isinstance(res, BetaResultsWrapper)):
res = BetaResultsWrapper(res)
return res
|
def _deriv_mean_dparams(self, params):
'\n Derivative of the expected endog with respect to the parameters.\n\n not verified yet\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to the parameter vector.\n '
link = self.link
lin_pred = self.predict(params, which='linear')
idl = link.inverse_deriv(lin_pred)
dmat = (self.exog * idl[:, None])
return np.column_stack((dmat, np.zeros(self.exog_precision.shape)))
| 2,393,203,179,363,964,000
|
Derivative of the expected endog with respect to the parameters.
not verified yet
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
|
statsmodels/othermod/betareg.py
|
_deriv_mean_dparams
|
EC-AI/statsmodels
|
python
|
def _deriv_mean_dparams(self, params):
'\n Derivative of the expected endog with respect to the parameters.\n\n not verified yet\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to the parameter vector.\n '
link = self.link
lin_pred = self.predict(params, which='linear')
idl = link.inverse_deriv(lin_pred)
dmat = (self.exog * idl[:, None])
return np.column_stack((dmat, np.zeros(self.exog_precision.shape)))
|
def _deriv_score_obs_dendog(self, params):
'derivative of score_obs w.r.t. endog\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n derivative : ndarray_2d\n The derivative of the score_obs with respect to endog.\n '
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
def f(y):
if ((y.ndim == 2) and (y.shape[1] == 1)):
y = y[:, 0]
sf = self.score_factor(params, endog=y)
return np.column_stack(sf)
dsf = _approx_fprime_cs_scalar(self.endog[:, None], f)
d1 = (dsf[:, :1] * self.exog)
d2 = (dsf[:, 1:2] * self.exog_precision)
return np.column_stack((d1, d2))
| 2,784,673,522,723,553,000
|
derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
|
statsmodels/othermod/betareg.py
|
_deriv_score_obs_dendog
|
EC-AI/statsmodels
|
python
|
def _deriv_score_obs_dendog(self, params):
'derivative of score_obs w.r.t. endog\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n derivative : ndarray_2d\n The derivative of the score_obs with respect to endog.\n '
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
def f(y):
if ((y.ndim == 2) and (y.shape[1] == 1)):
y = y[:, 0]
sf = self.score_factor(params, endog=y)
return np.column_stack(sf)
dsf = _approx_fprime_cs_scalar(self.endog[:, None], f)
d1 = (dsf[:, :1] * self.exog)
d2 = (dsf[:, 1:2] * self.exog_precision)
return np.column_stack((d1, d2))
|
def get_distribution_params(self, params, exog=None, exog_precision=None):
'\n Return distribution parameters converted from model prediction.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for mean.\n\n Returns\n -------\n (alpha, beta) : tuple of ndarrays\n Parameters for the scipy distribution to evaluate predictive\n distribution.\n '
mean = self.predict(params, exog=exog)
precision = self.predict(params, exog_precision=exog_precision, which='precision')
return ((precision * mean), (precision * (1 - mean)))
| -3,883,005,072,907,994,000
|
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
|
statsmodels/othermod/betareg.py
|
get_distribution_params
|
EC-AI/statsmodels
|
python
|
def get_distribution_params(self, params, exog=None, exog_precision=None):
'\n Return distribution parameters converted from model prediction.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for mean.\n\n Returns\n -------\n (alpha, beta) : tuple of ndarrays\n Parameters for the scipy distribution to evaluate predictive\n distribution.\n '
mean = self.predict(params, exog=exog)
precision = self.predict(params, exog_precision=exog_precision, which='precision')
return ((precision * mean), (precision * (1 - mean)))
|
def get_distribution(self, params, exog=None, exog_precision=None):
'\n Return a instance of the predictive distribution.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for mean.\n\n Returns\n -------\n Instance of a scipy frozen distribution based on estimated\n parameters.\n\n See Also\n --------\n predict\n\n Notes\n -----\n This function delegates to the predict method to handle exog and\n exog_precision, which in turn makes any required transformations.\n\n Due to the behavior of ``scipy.stats.distributions objects``, the\n returned random number generator must be called with ``gen.rvs(n)``\n where ``n`` is the number of observations in the data set used\n to fit the model. If any other value is used for ``n``, misleading\n results will be produced.\n '
from scipy import stats
args = self.get_distribution_params(params, exog=exog, exog_precision=exog_precision)
distr = stats.beta(*args)
return distr
| -5,312,023,450,071,919,000
|
Return a instance of the predictive distribution.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
|
statsmodels/othermod/betareg.py
|
get_distribution
|
EC-AI/statsmodels
|
python
|
def get_distribution(self, params, exog=None, exog_precision=None):
'\n Return a instance of the predictive distribution.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for mean.\n\n Returns\n -------\n Instance of a scipy frozen distribution based on estimated\n parameters.\n\n See Also\n --------\n predict\n\n Notes\n -----\n This function delegates to the predict method to handle exog and\n exog_precision, which in turn makes any required transformations.\n\n Due to the behavior of ``scipy.stats.distributions objects``, the\n returned random number generator must be called with ``gen.rvs(n)``\n where ``n`` is the number of observations in the data set used\n to fit the model. If any other value is used for ``n``, misleading\n results will be produced.\n '
from scipy import stats
args = self.get_distribution_params(params, exog=exog, exog_precision=exog_precision)
distr = stats.beta(*args)
return distr
|
@cache_readonly
def fittedvalues(self):
'In-sample predicted mean, conditional expectation.'
return self.model.predict(self.params)
| -2,146,296,562,088,598,000
|
In-sample predicted mean, conditional expectation.
|
statsmodels/othermod/betareg.py
|
fittedvalues
|
EC-AI/statsmodels
|
python
|
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
|
@cache_readonly
def fitted_precision(self):
'In-sample predicted precision'
return self.model.predict(self.params, which='precision')
| -4,571,141,172,921,569,300
|
In-sample predicted precision
|
statsmodels/othermod/betareg.py
|
fitted_precision
|
EC-AI/statsmodels
|
python
|
@cache_readonly
def fitted_precision(self):
return self.model.predict(self.params, which='precision')
|
@cache_readonly
def resid(self):
'Response residual'
return (self.model.endog - self.fittedvalues)
| -2,164,090,418,230,139,400
|
Response residual
|
statsmodels/othermod/betareg.py
|
resid
|
EC-AI/statsmodels
|
python
|
@cache_readonly
def resid(self):
return (self.model.endog - self.fittedvalues)
|
@cache_readonly
def resid_pearson(self):
'Pearson standardize residual'
std = np.sqrt(self.model.predict(self.params, which='var'))
return (self.resid / std)
| -5,317,540,306,562,735,000
|
Pearson standardize residual
|
statsmodels/othermod/betareg.py
|
resid_pearson
|
EC-AI/statsmodels
|
python
|
@cache_readonly
def resid_pearson(self):
std = np.sqrt(self.model.predict(self.params, which='var'))
return (self.resid / std)
|
@cache_readonly
def prsquared(self):
'Cox-Snell Likelihood-Ratio pseudo-R-squared.\n\n 1 - exp((llnull - .llf) * (2 / nobs))\n '
return self.pseudo_rsquared(kind='lr')
| 5,066,028,713,186,439,000
|
Cox-Snell Likelihood-Ratio pseudo-R-squared.
1 - exp((llnull - .llf) * (2 / nobs))
|
statsmodels/othermod/betareg.py
|
prsquared
|
EC-AI/statsmodels
|
python
|
@cache_readonly
def prsquared(self):
'Cox-Snell Likelihood-Ratio pseudo-R-squared.\n\n 1 - exp((llnull - .llf) * (2 / nobs))\n '
return self.pseudo_rsquared(kind='lr')
|
def get_distribution_params(self, exog=None, exog_precision=None, transform=True):
'\n Return distribution parameters converted from model prediction.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n transform : bool\n If transform is True and formulas have been used, then predictor\n ``exog`` is passed through the formula processing. Default is True.\n\n Returns\n -------\n (alpha, beta) : tuple of ndarrays\n Parameters for the scipy distribution to evaluate predictive\n distribution.\n '
mean = self.predict(exog=exog, transform=transform)
precision = self.predict(exog_precision=exog_precision, which='precision', transform=transform)
return ((precision * mean), (precision * (1 - mean)))
| 6,766,482,164,122,011,000
|
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
|
statsmodels/othermod/betareg.py
|
get_distribution_params
|
EC-AI/statsmodels
|
python
|
def get_distribution_params(self, exog=None, exog_precision=None, transform=True):
'\n Return distribution parameters converted from model prediction.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n transform : bool\n If transform is True and formulas have been used, then predictor\n ``exog`` is passed through the formula processing. Default is True.\n\n Returns\n -------\n (alpha, beta) : tuple of ndarrays\n Parameters for the scipy distribution to evaluate predictive\n distribution.\n '
mean = self.predict(exog=exog, transform=transform)
precision = self.predict(exog_precision=exog_precision, which='precision', transform=transform)
return ((precision * mean), (precision * (1 - mean)))
|
def get_distribution(self, exog=None, exog_precision=None, transform=True):
'\n Return a instance of the predictive distribution.\n\n Parameters\n ----------\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for mean.\n transform : bool\n If transform is True and formulas have been used, then predictor\n ``exog`` is passed through the formula processing. Default is True.\n\n Returns\n -------\n Instance of a scipy frozen distribution based on estimated\n parameters.\n\n See Also\n --------\n predict\n\n Notes\n -----\n This function delegates to the predict method to handle exog and\n exog_precision, which in turn makes any required transformations.\n\n Due to the behavior of ``scipy.stats.distributions objects``, the\n returned random number generator must be called with ``gen.rvs(n)``\n where ``n`` is the number of observations in the data set used\n to fit the model. If any other value is used for ``n``, misleading\n results will be produced.\n '
from scipy import stats
args = self.get_distribution_params(exog=exog, exog_precision=exog_precision, transform=transform)
args = (np.asarray(arg) for arg in args)
distr = stats.beta(*args)
return distr
| -2,352,558,057,159,520,000
|
Return a instance of the predictive distribution.
Parameters
----------
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
|
statsmodels/othermod/betareg.py
|
get_distribution
|
EC-AI/statsmodels
|
python
|
def get_distribution(self, exog=None, exog_precision=None, transform=True):
'\n Return a instance of the predictive distribution.\n\n Parameters\n ----------\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for mean.\n transform : bool\n If transform is True and formulas have been used, then predictor\n ``exog`` is passed through the formula processing. Default is True.\n\n Returns\n -------\n Instance of a scipy frozen distribution based on estimated\n parameters.\n\n See Also\n --------\n predict\n\n Notes\n -----\n This function delegates to the predict method to handle exog and\n exog_precision, which in turn makes any required transformations.\n\n Due to the behavior of ``scipy.stats.distributions objects``, the\n returned random number generator must be called with ``gen.rvs(n)``\n where ``n`` is the number of observations in the data set used\n to fit the model. If any other value is used for ``n``, misleading\n results will be produced.\n '
from scipy import stats
args = self.get_distribution_params(exog=exog, exog_precision=exog_precision, transform=transform)
args = (np.asarray(arg) for arg in args)
distr = stats.beta(*args)
return distr
|
@glyph_method(glyphs.Annulus)
def annulus(self, **kwargs):
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",\n inner_radius=0.2, outer_radius=0.5)\n\n show(plot)\n\n'
| 3,938,251,180,688,287,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",
inner_radius=0.2, outer_radius=0.5)
show(plot)
|
bokeh/plotting/glyph_api.py
|
annulus
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Annulus)
def annulus(self, **kwargs):
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",\n inner_radius=0.2, outer_radius=0.5)\n\n show(plot)\n\n'
|
@marker_method()
def asterisk(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")\n\n show(plot)\n\n'
| 1,979,245,211,455,648,800
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")
show(plot)
|
bokeh/plotting/glyph_api.py
|
asterisk
|
AzureTech/bokeh
|
python
|
@marker_method()
def asterisk(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Circle)
def circle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n Only one of ``size`` or ``radius`` should be provided. Note that ``radius``\n defaults to |data units|.\n\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)\n\n show(plot)\n\n'
| 8,348,713,886,639,397,000
|
.. note::
Only one of ``size`` or ``radius`` should be provided. Note that ``radius``
defaults to |data units|.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)
show(plot)
|
bokeh/plotting/glyph_api.py
|
circle
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Circle)
def circle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n Only one of ``size`` or ``radius`` should be provided. Note that ``radius``\n defaults to |data units|.\n\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)\n\n show(plot)\n\n'
|
@marker_method()
def circle_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,\n color="#FB8072", fill_alpha=0.2, line_width=2)\n\n show(plot)\n\n'
| -3,575,792,139,181,636,600
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_alpha=0.2, line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
circle_cross
|
AzureTech/bokeh
|
python
|
@marker_method()
def circle_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,\n color="#FB8072", fill_alpha=0.2, line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def circle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle_dot(x=[1,2,3], y=[4,5,6], size=20,\n color="#FB8072", fill_color=None)\n\n show(plot)\n\n'
| 8,496,416,513,568,618,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_dot(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_color=None)
show(plot)
|
bokeh/plotting/glyph_api.py
|
circle_dot
|
AzureTech/bokeh
|
python
|
@marker_method()
def circle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle_dot(x=[1,2,3], y=[4,5,6], size=20,\n color="#FB8072", fill_color=None)\n\n show(plot)\n\n'
|
@marker_method()
def circle_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#DD1C77", fill_alpha=0.2)\n\n show(plot)\n\n'
| 4,937,916,615,365,577,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
circle_x
|
AzureTech/bokeh
|
python
|
@marker_method()
def circle_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#DD1C77", fill_alpha=0.2)\n\n show(plot)\n\n'
|
@marker_method()
def circle_y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle_y(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#DD1C77", fill_alpha=0.2)\n\n show(plot)\n\n'
| -8,742,850,000,730,177,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_y(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
circle_y
|
AzureTech/bokeh
|
python
|
@marker_method()
def circle_y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.circle_y(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#DD1C77", fill_alpha=0.2)\n\n show(plot)\n\n'
|
@marker_method()
def cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#E6550D", line_width=2)\n\n show(plot)\n\n'
| 2,837,880,521,095,167,500
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#E6550D", line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
cross
|
AzureTech/bokeh
|
python
|
@marker_method()
def cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#E6550D", line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def dash(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.dash(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#99D594", line_width=2)\n\n show(plot)\n\n'
| -6,676,188,663,389,309,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dash(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
dash
|
AzureTech/bokeh
|
python
|
@marker_method()
def dash(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.dash(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#99D594", line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def diamond(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#1C9099", line_width=2)\n\n show(plot)\n\n'
| 1,792,993,144,400,715,300
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
diamond
|
AzureTech/bokeh
|
python
|
@marker_method()
def diamond(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#1C9099", line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def diamond_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#386CB0", fill_color=None, line_width=2)\n\n show(plot)\n\n'
| 2,351,377,948,024,546,300
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
diamond_cross
|
AzureTech/bokeh
|
python
|
@marker_method()
def diamond_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#386CB0", fill_color=None, line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def diamond_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.diamond_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#386CB0", fill_color=None)\n\n show(plot)\n\n'
| -2,226,707,390,867,855,600
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None)
show(plot)
|
bokeh/plotting/glyph_api.py
|
diamond_dot
|
AzureTech/bokeh
|
python
|
@marker_method()
def diamond_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.diamond_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#386CB0", fill_color=None)\n\n show(plot)\n\n'
|
@marker_method()
def dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.dot(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#386CB0")\n\n show(plot)\n\n'
| 92,301,846,826,578,940
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dot(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#386CB0")
show(plot)
|
bokeh/plotting/glyph_api.py
|
dot
|
AzureTech/bokeh
|
python
|
@marker_method()
def dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.dot(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#386CB0")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.HArea)
def harea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.harea(x1=[0, 0, 0], x2=[1, 4, 2], y=[1, 2, 3],\n fill_color="#99D594")\n\n show(plot)\n\n'
| -5,182,362,240,292,432,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.harea(x1=[0, 0, 0], x2=[1, 4, 2], y=[1, 2, 3],
fill_color="#99D594")
show(plot)
|
bokeh/plotting/glyph_api.py
|
harea
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.HArea)
def harea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.harea(x1=[0, 0, 0], x2=[1, 4, 2], y=[1, 2, 3],\n fill_color="#99D594")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.HBar)
def hbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")\n\n show(plot)\n\n'
| -7,922,850,661,979,236,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")
show(plot)
|
bokeh/plotting/glyph_api.py
|
hbar
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.HBar)
def hbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Ellipse)
def ellipse(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,\n color="#386CB0", fill_color=None, line_width=2)\n\n show(plot)\n\n'
| 6,402,112,770,406,569,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
ellipse
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Ellipse)
def ellipse(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,\n color="#386CB0", fill_color=None, line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def hex(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.hex(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")\n\n show(plot)\n\n'
| 8,798,436,814,191,611,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
|
bokeh/plotting/glyph_api.py
|
hex
|
AzureTech/bokeh
|
python
|
@marker_method()
def hex(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.hex(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")\n\n show(plot)\n\n'
|
@marker_method()
def hex_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.hex_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30],\n color="#74ADD1", fill_color=None)\n\n show(plot)\n\n'
| -2,901,453,211,579,782,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30],
color="#74ADD1", fill_color=None)
show(plot)
|
bokeh/plotting/glyph_api.py
|
hex_dot
|
AzureTech/bokeh
|
python
|
@marker_method()
def hex_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.hex_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30],\n color="#74ADD1", fill_color=None)\n\n show(plot)\n\n'
|
@glyph_method(glyphs.HexTile)
def hex_tile(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300, match_aspect=True)\n plot.hex_tile(r=[0, 0, 1], q=[1, 2, 2], fill_color="#74ADD1")\n\n show(plot)\n\n'
| 3,511,653,394,743,668,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300, match_aspect=True)
plot.hex_tile(r=[0, 0, 1], q=[1, 2, 2], fill_color="#74ADD1")
show(plot)
|
bokeh/plotting/glyph_api.py
|
hex_tile
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.HexTile)
def hex_tile(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300, match_aspect=True)\n plot.hex_tile(r=[0, 0, 1], q=[1, 2, 2], fill_color="#74ADD1")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Image)
def image(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``\n exception will be raised. If neither is passed, then the ``Greys9``\n palette will be used as a default.\n\n'
| 5,084,553,159,512,576,000
|
.. note::
If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``
exception will be raised. If neither is passed, then the ``Greys9``
palette will be used as a default.
|
bokeh/plotting/glyph_api.py
|
image
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Image)
def image(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``\n exception will be raised. If neither is passed, then the ``Greys9``\n palette will be used as a default.\n\n'
|
@glyph_method(glyphs.ImageRGBA)
def image_rgba(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n The ``image_rgba`` method accepts images as a two-dimensional array of RGBA\n values (encoded as 32-bit integers).\n\n'
| 3,012,462,801,939,874,300
|
.. note::
The ``image_rgba`` method accepts images as a two-dimensional array of RGBA
values (encoded as 32-bit integers).
|
bokeh/plotting/glyph_api.py
|
image_rgba
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.ImageRGBA)
def image_rgba(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n The ``image_rgba`` method accepts images as a two-dimensional array of RGBA\n values (encoded as 32-bit integers).\n\n'
|
@marker_method()
def inverted_triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")\n\n show(plot)\n\n'
| 7,029,303,627,944,421,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
|
bokeh/plotting/glyph_api.py
|
inverted_triangle
|
AzureTech/bokeh
|
python
|
@marker_method()
def inverted_triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Line)
def line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(title="line", width=300, height=300)\n p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])\n\n show(p)\n\n'
| -53,520,465,852,709,820
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(title="line", width=300, height=300)
p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])
show(p)
|
bokeh/plotting/glyph_api.py
|
line
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Line)
def line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(title="line", width=300, height=300)\n p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])\n\n show(p)\n\n'
|
@glyph_method(glyphs.MultiLine)
def multi_line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n For this glyph, the data is not simply an array of scalars, it is an\n "array of arrays".\n\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(width=300, height=300)\n p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],\n color=[\'red\',\'green\'])\n\n show(p)\n\n'
| -8,230,569,948,412,799,000
|
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],
color=['red','green'])
show(p)
|
bokeh/plotting/glyph_api.py
|
multi_line
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.MultiLine)
def multi_line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n For this glyph, the data is not simply an array of scalars, it is an\n "array of arrays".\n\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(width=300, height=300)\n p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],\n color=[\'red\',\'green\'])\n\n show(p)\n\n'
|
@glyph_method(glyphs.MultiPolygons)
def multi_polygons(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"\n.. note::\n For this glyph, the data is not simply an array of scalars, it is a\n nested array.\n\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(width=300, height=300)\n p.multi_polygons(xs=[[[[1, 1, 2, 2]]], [[[1, 1, 3], [1.5, 1.5, 2]]]],\n ys=[[[[4, 3, 3, 4]]], [[[1, 3, 1], [1.5, 2, 1.5]]]],\n color=['red', 'green'])\n show(p)\n\n"
| 5,069,927,588,036,277,000
|
.. note::
For this glyph, the data is not simply an array of scalars, it is a
nested array.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_polygons(xs=[[[[1, 1, 2, 2]]], [[[1, 1, 3], [1.5, 1.5, 2]]]],
ys=[[[[4, 3, 3, 4]]], [[[1, 3, 1], [1.5, 2, 1.5]]]],
color=['red', 'green'])
show(p)
|
bokeh/plotting/glyph_api.py
|
multi_polygons
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.MultiPolygons)
def multi_polygons(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"\n.. note::\n For this glyph, the data is not simply an array of scalars, it is a\n nested array.\n\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(width=300, height=300)\n p.multi_polygons(xs=[[[[1, 1, 2, 2]]], [[[1, 1, 3], [1.5, 1.5, 2]]]],\n ys=[[[[4, 3, 3, 4]]], [[[1, 3, 1], [1.5, 2, 1.5]]]],\n color=['red', 'green'])\n show(p)\n\n"
|
@glyph_method(glyphs.Oval)
def oval(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.oval(x=[1, 2, 3], y=[1, 2, 3], width=0.2, height=0.4,\n angle=-0.7, color="#1D91C0")\n\n show(plot)\n\n'
| 9,096,103,592,002,765,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.oval(x=[1, 2, 3], y=[1, 2, 3], width=0.2, height=0.4,
angle=-0.7, color="#1D91C0")
show(plot)
|
bokeh/plotting/glyph_api.py
|
oval
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Oval)
def oval(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.oval(x=[1, 2, 3], y=[1, 2, 3], width=0.2, height=0.4,\n angle=-0.7, color="#1D91C0")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Patch)
def patch(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(width=300, height=300)\n p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")\n\n show(p)\n\n'
| -417,730,788,079,325,100
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")
show(p)
|
bokeh/plotting/glyph_api.py
|
patch
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Patch)
def patch(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(width=300, height=300)\n p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")\n\n show(p)\n\n'
|
@glyph_method(glyphs.Patches)
def patches(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n For this glyph, the data is not simply an array of scalars, it is an\n "array of arrays".\n\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(width=300, height=300)\n p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],\n color=["#43a2ca", "#a8ddb5"])\n\n show(p)\n\n'
| 8,282,966,439,202,146,000
|
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],
color=["#43a2ca", "#a8ddb5"])
show(p)
|
bokeh/plotting/glyph_api.py
|
patches
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Patches)
def patches(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n For this glyph, the data is not simply an array of scalars, it is an\n "array of arrays".\n\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n p = figure(width=300, height=300)\n p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],\n color=["#43a2ca", "#a8ddb5"])\n\n show(p)\n\n'
|
@marker_method()
def plus(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.plus(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")\n\n show(plot)\n\n'
| 1,416,541,568,013,438,700
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.plus(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
|
bokeh/plotting/glyph_api.py
|
plus
|
AzureTech/bokeh
|
python
|
@marker_method()
def plus(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.plus(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Quad)
def quad(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],\n right=[1.2, 2.5, 3.7], color="#B3DE69")\n\n show(plot)\n\n'
| -4,907,198,913,454,473,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],
right=[1.2, 2.5, 3.7], color="#B3DE69")
show(plot)
|
bokeh/plotting/glyph_api.py
|
quad
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Quad)
def quad(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],\n right=[1.2, 2.5, 3.7], color="#B3DE69")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Ray)
def ray(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",\n line_width=2)\n\n show(plot)\n\n'
| -6,554,476,667,526,731,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",
line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
ray
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Ray)
def ray(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",\n line_width=2)\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Rect)
def rect(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",\n width_units="screen", height_units="screen")\n\n show(plot)\n\n'
| 705,059,381,429,693,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",
width_units="screen", height_units="screen")
show(plot)
|
bokeh/plotting/glyph_api.py
|
rect
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Rect)
def rect(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",\n width_units="screen", height_units="screen")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Step)
def step(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.step(x=[1, 2, 3, 4, 5], y=[1, 2, 3, 2, 5], color="#FB8072")\n\n show(plot)\n\n'
| -3,492,657,471,929,209,300
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.step(x=[1, 2, 3, 4, 5], y=[1, 2, 3, 2, 5], color="#FB8072")
show(plot)
|
bokeh/plotting/glyph_api.py
|
step
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Step)
def step(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.step(x=[1, 2, 3, 4, 5], y=[1, 2, 3, 2, 5], color="#FB8072")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Segment)
def segment(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.segment(x0=[1, 2, 3], y0=[1, 2, 3],\n x1=[1, 2, 3], y1=[1.2, 2.5, 3.7],\n color="#F4A582", line_width=3)\n\n show(plot)\n\n'
| -5,143,638,344,136,174,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.segment(x0=[1, 2, 3], y0=[1, 2, 3],
x1=[1, 2, 3], y1=[1.2, 2.5, 3.7],
color="#F4A582", line_width=3)
show(plot)
|
bokeh/plotting/glyph_api.py
|
segment
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Segment)
def segment(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.segment(x0=[1, 2, 3], y0=[1, 2, 3],\n x1=[1, 2, 3], y1=[1.2, 2.5, 3.7],\n color="#F4A582", line_width=3)\n\n show(plot)\n\n'
|
@marker_method()
def square(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")\n\n show(plot)\n\n'
| 2,493,520,840,458,622,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
|
bokeh/plotting/glyph_api.py
|
square
|
AzureTech/bokeh
|
python
|
@marker_method()
def square(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")\n\n show(plot)\n\n'
|
@marker_method()
def square_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#7FC97F",fill_color=None, line_width=2)\n\n show(plot)\n\n'
| 7,120,505,516,620,560,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
square_cross
|
AzureTech/bokeh
|
python
|
@marker_method()
def square_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#7FC97F",fill_color=None, line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def square_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#7FC97F", fill_color=None)\n\n show(plot)\n\n'
| -5,974,181,200,857,512,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F", fill_color=None)
show(plot)
|
bokeh/plotting/glyph_api.py
|
square_dot
|
AzureTech/bokeh
|
python
|
@marker_method()
def square_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#7FC97F", fill_color=None)\n\n show(plot)\n\n'
|
@marker_method()
def square_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#7FC97F",fill_color=None, line_width=2)\n\n show(plot)\n\n'
| 6,103,200,413,580,941,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
square_pin
|
AzureTech/bokeh
|
python
|
@marker_method()
def square_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#7FC97F",fill_color=None, line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def square_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#FDAE6B",fill_color=None, line_width=2)\n\n show(plot)\n\n'
| 6,320,477,579,178,854,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#FDAE6B",fill_color=None, line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
square_x
|
AzureTech/bokeh
|
python
|
@marker_method()
def square_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#FDAE6B",fill_color=None, line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def star(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.star(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#1C9099", line_width=2)\n\n show(plot)\n\n'
| -1,999,413,690,404,477,700
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
star
|
AzureTech/bokeh
|
python
|
@marker_method()
def star(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.star(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#1C9099", line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def star_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.star_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#386CB0", fill_color=None, line_width=2)\n\n show(plot)\n\n'
| 344,974,706,167,149,630
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
star_dot
|
AzureTech/bokeh
|
python
|
@marker_method()
def star_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.star_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,\n color="#386CB0", fill_color=None, line_width=2)\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Text)
def text(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n The location and angle of the text relative to the ``x``, ``y`` coordinates\n is indicated by the alignment and baseline text properties.\n\n'
| -893,788,023,180,619,900
|
.. note::
The location and angle of the text relative to the ``x``, ``y`` coordinates
is indicated by the alignment and baseline text properties.
|
bokeh/plotting/glyph_api.py
|
text
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Text)
def text(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\n.. note::\n The location and angle of the text relative to the ``x``, ``y`` coordinates\n is indicated by the alignment and baseline text properties.\n\n'
|
@marker_method()
def triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#99D594", line_width=2)\n\n show(plot)\n\n'
| 6,747,221,224,420,255,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
triangle
|
AzureTech/bokeh
|
python
|
@marker_method()
def triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#99D594", line_width=2)\n\n show(plot)\n\n'
|
@marker_method()
def triangle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.triangle_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#99D594", fill_color=None)\n\n show(plot)\n\n'
| 7,526,180,058,643,305,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", fill_color=None)
show(plot)
|
bokeh/plotting/glyph_api.py
|
triangle_dot
|
AzureTech/bokeh
|
python
|
@marker_method()
def triangle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.triangle_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#99D594", fill_color=None)\n\n show(plot)\n\n'
|
@marker_method()
def triangle_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.triangle_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#99D594", line_width=2)\n\n show(plot)\n\n'
| -4,137,067,794,809,148,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
|
bokeh/plotting/glyph_api.py
|
triangle_pin
|
AzureTech/bokeh
|
python
|
@marker_method()
def triangle_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.triangle_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],\n color="#99D594", line_width=2)\n\n show(plot)\n\n'
|
@glyph_method(glyphs.VArea)
def varea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.varea(x=[1, 2, 3], y1=[0, 0, 0], y2=[1, 4, 2],\n fill_color="#99D594")\n\n show(plot)\n\n'
| 8,477,394,530,676,108,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.varea(x=[1, 2, 3], y1=[0, 0, 0], y2=[1, 4, 2],
fill_color="#99D594")
show(plot)
|
bokeh/plotting/glyph_api.py
|
varea
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.VArea)
def varea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.varea(x=[1, 2, 3], y1=[0, 0, 0], y2=[1, 4, 2],\n fill_color="#99D594")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.VBar)
def vbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")\n\n show(plot)\n\n'
| -3,424,809,155,875,691,500
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")
show(plot)
|
bokeh/plotting/glyph_api.py
|
vbar
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.VBar)
def vbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")\n\n show(plot)\n\n'
|
@glyph_method(glyphs.Wedge)
def wedge(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,\n end_angle=4.1, radius_units="screen", color="#2b8cbe")\n\n show(plot)\n\n'
| 188,312,062,759,754,080
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,
end_angle=4.1, radius_units="screen", color="#2b8cbe")
show(plot)
|
bokeh/plotting/glyph_api.py
|
wedge
|
AzureTech/bokeh
|
python
|
@glyph_method(glyphs.Wedge)
def wedge(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,\n end_angle=4.1, radius_units="screen", color="#2b8cbe")\n\n show(plot)\n\n'
|
@marker_method()
def x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")\n\n show(plot)\n\n'
| -2,744,288,825,578,744,300
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")
show(plot)
|
bokeh/plotting/glyph_api.py
|
x
|
AzureTech/bokeh
|
python
|
@marker_method()
def x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")\n\n show(plot)\n\n'
|
@marker_method()
def y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.y(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")\n\n show(plot)\n\n'
| 5,345,449,241,740,763,000
|
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.y(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
|
bokeh/plotting/glyph_api.py
|
y
|
AzureTech/bokeh
|
python
|
@marker_method()
def y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
'\nExamples:\n\n .. code-block:: python\n\n from bokeh.plotting import figure, output_file, show\n\n plot = figure(width=300, height=300)\n plot.y(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")\n\n show(plot)\n\n'
|
def scatter(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
' Creates a scatter plot of the given x and y items.\n\n Args:\n x (str or seq[float]) : values or field names of center x coordinates\n\n y (str or seq[float]) : values or field names of center y coordinates\n\n size (str or list[float]) : values or field names of sizes in |screen units|\n\n marker (str, or list[str]): values or field names of marker types\n\n color (color value, optional): shorthand to set both fill and line color\n\n source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.\n An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`\n if needed. If none is supplied, one is created for the user automatically.\n\n **kwargs: |line properties| and |fill properties|\n\n Examples:\n\n >>> p.scatter([1,2,3],[4,5,6], marker="square", fill_color="red")\n >>> p.scatter("data1", "data2", marker="mtype", source=data_source, ...)\n\n .. note::\n When passing ``marker="circle"`` it is also possible to supply a\n ``radius`` value in |data units|. When configuring marker type\n from a data source column, *all* markers including circles may only\n be configured with ``size`` in |screen units|.\n\n '
marker_type = kwargs.pop('marker', 'circle')
if (isinstance(marker_type, str) and (marker_type in _MARKER_SHORTCUTS)):
marker_type = _MARKER_SHORTCUTS[marker_type]
if ((marker_type == 'circle') and ('radius' in kwargs)):
return self.circle(*args, **kwargs)
else:
return self._scatter(*args, marker=marker_type, **kwargs)
| 4,243,868,176,108,034,000
|
Creates a scatter plot of the given x and y items.
Args:
x (str or seq[float]) : values or field names of center x coordinates
y (str or seq[float]) : values or field names of center y coordinates
size (str or list[float]) : values or field names of sizes in |screen units|
marker (str, or list[str]): values or field names of marker types
color (color value, optional): shorthand to set both fill and line color
source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.
An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`
if needed. If none is supplied, one is created for the user automatically.
**kwargs: |line properties| and |fill properties|
Examples:
>>> p.scatter([1,2,3],[4,5,6], marker="square", fill_color="red")
>>> p.scatter("data1", "data2", marker="mtype", source=data_source, ...)
.. note::
When passing ``marker="circle"`` it is also possible to supply a
``radius`` value in |data units|. When configuring marker type
from a data source column, *all* markers including circles may only
be configured with ``size`` in |screen units|.
|
bokeh/plotting/glyph_api.py
|
scatter
|
AzureTech/bokeh
|
python
|
def scatter(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
' Creates a scatter plot of the given x and y items.\n\n Args:\n x (str or seq[float]) : values or field names of center x coordinates\n\n y (str or seq[float]) : values or field names of center y coordinates\n\n size (str or list[float]) : values or field names of sizes in |screen units|\n\n marker (str, or list[str]): values or field names of marker types\n\n color (color value, optional): shorthand to set both fill and line color\n\n source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.\n An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`\n if needed. If none is supplied, one is created for the user automatically.\n\n **kwargs: |line properties| and |fill properties|\n\n Examples:\n\n >>> p.scatter([1,2,3],[4,5,6], marker="square", fill_color="red")\n >>> p.scatter("data1", "data2", marker="mtype", source=data_source, ...)\n\n .. note::\n When passing ``marker="circle"`` it is also possible to supply a\n ``radius`` value in |data units|. When configuring marker type\n from a data source column, *all* markers including circles may only\n be configured with ``size`` in |screen units|.\n\n '
marker_type = kwargs.pop('marker', 'circle')
if (isinstance(marker_type, str) and (marker_type in _MARKER_SHORTCUTS)):
marker_type = _MARKER_SHORTCUTS[marker_type]
if ((marker_type == 'circle') and ('radius' in kwargs)):
return self.circle(*args, **kwargs)
else:
return self._scatter(*args, marker=marker_type, **kwargs)
|
def docutilize(obj):
"Convert Numpy or Google style docstring into reStructuredText format.\n\n Args:\n obj (str or object):\n Takes an object and changes it's docstrings to a reStructuredText\n format.\n Returns:\n str or object:\n A converted string or an object with replaced docstring depending\n on the type of the input.\n "
from inspect import cleandoc, getdoc
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
if isinstance(obj, str):
doc = cleandoc(obj)
else:
doc = getdoc(obj)
doc = str(NumpyDocstring(doc))
doc = str(GoogleDocstring(doc))
doc = doc.replace(':exc:', '')
doc = doc.replace(':data:', '')
doc = doc.replace(':keyword', ':param')
doc = doc.replace(':kwtype', ':type')
if isinstance(obj, str):
return doc
obj.__doc__ = doc
return obj
| -4,612,370,075,998,829,000
|
Convert Numpy or Google style docstring into reStructuredText format.
Args:
obj (str or object):
Takes an object and changes it's docstrings to a reStructuredText
format.
Returns:
str or object:
A converted string or an object with replaced docstring depending
on the type of the input.
|
improver/cli/__init__.py
|
docutilize
|
anja-bom/improver
|
python
|
def docutilize(obj):
"Convert Numpy or Google style docstring into reStructuredText format.\n\n Args:\n obj (str or object):\n Takes an object and changes it's docstrings to a reStructuredText\n format.\n Returns:\n str or object:\n A converted string or an object with replaced docstring depending\n on the type of the input.\n "
from inspect import cleandoc, getdoc
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
if isinstance(obj, str):
doc = cleandoc(obj)
else:
doc = getdoc(obj)
doc = str(NumpyDocstring(doc))
doc = str(GoogleDocstring(doc))
doc = doc.replace(':exc:', )
doc = doc.replace(':data:', )
doc = doc.replace(':keyword', ':param')
doc = doc.replace(':kwtype', ':type')
if isinstance(obj, str):
return doc
obj.__doc__ = doc
return obj
|
def maybe_coerce_with(converter, obj, **kwargs):
'Apply converter if str, pass through otherwise.'
obj = getattr(obj, 'original_object', obj)
return (converter(obj, **kwargs) if isinstance(obj, str) else obj)
| -8,289,763,171,914,728,000
|
Apply converter if str, pass through otherwise.
|
improver/cli/__init__.py
|
maybe_coerce_with
|
anja-bom/improver
|
python
|
def maybe_coerce_with(converter, obj, **kwargs):
obj = getattr(obj, 'original_object', obj)
return (converter(obj, **kwargs) if isinstance(obj, str) else obj)
|
@value_converter
def inputcube(to_convert):
'Loads cube from file or returns passed object.\n\n Args:\n to_convert (string or iris.cube.Cube):\n File name or Cube object.\n\n Returns:\n Loaded cube or passed object.\n\n '
from improver.utilities.load import load_cube
return maybe_coerce_with(load_cube, to_convert)
| 1,064,669,169,553,711,200
|
Loads cube from file or returns passed object.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
|
improver/cli/__init__.py
|
inputcube
|
anja-bom/improver
|
python
|
@value_converter
def inputcube(to_convert):
'Loads cube from file or returns passed object.\n\n Args:\n to_convert (string or iris.cube.Cube):\n File name or Cube object.\n\n Returns:\n Loaded cube or passed object.\n\n '
from improver.utilities.load import load_cube
return maybe_coerce_with(load_cube, to_convert)
|
@value_converter
def inputcube_nolazy(to_convert):
'Loads cube from file or returns passed object.\n Where a load is performed, it will not have lazy data.\n Args:\n to_convert (string or iris.cube.Cube):\n File name or Cube object.\n Returns:\n Loaded cube or passed object.\n '
from improver.utilities.load import load_cube
if getattr(to_convert, 'has_lazy_data', False):
to_convert.data
return maybe_coerce_with(load_cube, to_convert, no_lazy_load=True)
| -8,033,249,565,026,459,000
|
Loads cube from file or returns passed object.
Where a load is performed, it will not have lazy data.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
|
improver/cli/__init__.py
|
inputcube_nolazy
|
anja-bom/improver
|
python
|
@value_converter
def inputcube_nolazy(to_convert):
'Loads cube from file or returns passed object.\n Where a load is performed, it will not have lazy data.\n Args:\n to_convert (string or iris.cube.Cube):\n File name or Cube object.\n Returns:\n Loaded cube or passed object.\n '
from improver.utilities.load import load_cube
if getattr(to_convert, 'has_lazy_data', False):
to_convert.data
return maybe_coerce_with(load_cube, to_convert, no_lazy_load=True)
|
@value_converter
def inputcubelist(to_convert):
'Loads a cubelist from file or returns passed object.\n Args:\n to_convert (string or iris.cube.CubeList):\n File name or CubeList object.\n Returns:\n Loaded cubelist or passed object.\n '
from improver.utilities.load import load_cubelist
return maybe_coerce_with(load_cubelist, to_convert)
| -6,123,243,750,426,282,000
|
Loads a cubelist from file or returns passed object.
Args:
to_convert (string or iris.cube.CubeList):
File name or CubeList object.
Returns:
Loaded cubelist or passed object.
|
improver/cli/__init__.py
|
inputcubelist
|
anja-bom/improver
|
python
|
@value_converter
def inputcubelist(to_convert):
'Loads a cubelist from file or returns passed object.\n Args:\n to_convert (string or iris.cube.CubeList):\n File name or CubeList object.\n Returns:\n Loaded cubelist or passed object.\n '
from improver.utilities.load import load_cubelist
return maybe_coerce_with(load_cubelist, to_convert)
|
@value_converter
def inputjson(to_convert):
'Loads json from file or returns passed object.\n\n Args:\n to_convert (string or dict):\n File name or json dictionary.\n\n Returns:\n Loaded json dictionary or passed object.\n\n '
from improver.utilities.cli_utilities import load_json_or_none
return maybe_coerce_with(load_json_or_none, to_convert)
| -3,255,302,438,592,015,400
|
Loads json from file or returns passed object.
Args:
to_convert (string or dict):
File name or json dictionary.
Returns:
Loaded json dictionary or passed object.
|
improver/cli/__init__.py
|
inputjson
|
anja-bom/improver
|
python
|
@value_converter
def inputjson(to_convert):
'Loads json from file or returns passed object.\n\n Args:\n to_convert (string or dict):\n File name or json dictionary.\n\n Returns:\n Loaded json dictionary or passed object.\n\n '
from improver.utilities.cli_utilities import load_json_or_none
return maybe_coerce_with(load_json_or_none, to_convert)
|
@value_converter
def comma_separated_list(to_convert):
'Converts comma separated string to list or returns passed object.\n\n Args:\n to_convert (string or list)\n comma separated string or list\n\n Returns:\n list\n '
return maybe_coerce_with((lambda s: s.split(',')), to_convert)
| 3,608,620,111,620,679,000
|
Converts comma separated string to list or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
|
improver/cli/__init__.py
|
comma_separated_list
|
anja-bom/improver
|
python
|
@value_converter
def comma_separated_list(to_convert):
'Converts comma separated string to list or returns passed object.\n\n Args:\n to_convert (string or list)\n comma separated string or list\n\n Returns:\n list\n '
return maybe_coerce_with((lambda s: s.split(',')), to_convert)
|
@value_converter
def comma_separated_list_of_float(to_convert):
'Converts comma separated string to list of floats or returns passed object.\n\n Args:\n to_convert (string or list)\n comma separated string or list\n\n Returns:\n list\n '
return maybe_coerce_with((lambda string: [float(s) for s in string.split(',')]), to_convert)
| 4,034,757,157,358,138,400
|
Converts comma separated string to list of floats or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
|
improver/cli/__init__.py
|
comma_separated_list_of_float
|
anja-bom/improver
|
python
|
@value_converter
def comma_separated_list_of_float(to_convert):
'Converts comma separated string to list of floats or returns passed object.\n\n Args:\n to_convert (string or list)\n comma separated string or list\n\n Returns:\n list\n '
return maybe_coerce_with((lambda string: [float(s) for s in string.split(',')]), to_convert)
|
@value_converter
def inputpath(to_convert):
'Converts string paths to pathlib Path objects\n\n Args:\n to_convert (string or pathlib.Path):\n path represented as string\n\n Returns:\n (pathlib.Path): Path object\n\n '
return maybe_coerce_with(pathlib.Path, to_convert)
| 6,136,849,895,679,115,000
|
Converts string paths to pathlib Path objects
Args:
to_convert (string or pathlib.Path):
path represented as string
Returns:
(pathlib.Path): Path object
|
improver/cli/__init__.py
|
inputpath
|
anja-bom/improver
|
python
|
@value_converter
def inputpath(to_convert):
'Converts string paths to pathlib Path objects\n\n Args:\n to_convert (string or pathlib.Path):\n path represented as string\n\n Returns:\n (pathlib.Path): Path object\n\n '
return maybe_coerce_with(pathlib.Path, to_convert)
|
@value_converter
def inputdatetime(to_convert):
'Converts string to datetime or returns passed object.\n\n Args:\n to_convert (string or datetime):\n datetime represented as string of the format YYYYMMDDTHHMMZ\n\n Returns:\n (datetime): datetime object\n\n '
from improver.utilities.temporal import cycletime_to_datetime
return maybe_coerce_with(cycletime_to_datetime, to_convert)
| 1,230,513,173,964,127,500
|
Converts string to datetime or returns passed object.
Args:
to_convert (string or datetime):
datetime represented as string of the format YYYYMMDDTHHMMZ
Returns:
(datetime): datetime object
|
improver/cli/__init__.py
|
inputdatetime
|
anja-bom/improver
|
python
|
@value_converter
def inputdatetime(to_convert):
'Converts string to datetime or returns passed object.\n\n Args:\n to_convert (string or datetime):\n datetime represented as string of the format YYYYMMDDTHHMMZ\n\n Returns:\n (datetime): datetime object\n\n '
from improver.utilities.temporal import cycletime_to_datetime
return maybe_coerce_with(cycletime_to_datetime, to_convert)
|
def create_constrained_inputcubelist_converter(*constraints):
"Makes function that the input constraints are used in a loop.\n\n The function is a @value_converter, this means it is used by clize to convert\n strings into objects.\n This is a way of not using the IMPROVER load_cube which will try to merge\n cubes. Iris load on the other hand won't deal with meta data properly.\n So an example is if you wanted to load an X cube and a Y cube from a cubelist\n of 2. You call this function with a list of constraints.\n These cubes get loaded and returned as a CubeList.\n\n Args:\n *constraints (tuple of str or callable or iris.Constraint):\n Constraints to be used in extracting the required cubes.\n Each constraint must match exactly one cube and extracted cubes\n will be sorted to match their order.\n A constraint can be an iris.Constraint object or a callable\n or cube name that can be used to construct one.\n\n Returns:\n callable:\n A function with the constraints used for a list comprehension.\n "
@value_converter
def constrained_inputcubelist_converter(to_convert):
'Passes the cube and constraints onto maybe_coerce_with.\n\n Args:\n to_convert (str or iris.cube.CubeList):\n A CubeList or a filename to be loaded into a CubeList.\n\n Returns:\n iris.cube.CubeList:\n The loaded cubelist of constrained cubes.\n '
from iris import Constraint
from iris.cube import CubeList
from improver.utilities.load import load_cubelist
cubelist = maybe_coerce_with(load_cubelist, to_convert)
return CubeList((cubelist.extract_cube((Constraint(cube_func=constr) if callable(constr) else constr)) for constr in constraints))
return constrained_inputcubelist_converter
| -340,979,575,987,960,260
|
Makes function that the input constraints are used in a loop.
The function is a @value_converter, this means it is used by clize to convert
strings into objects.
This is a way of not using the IMPROVER load_cube which will try to merge
cubes. Iris load on the other hand won't deal with meta data properly.
So an example is if you wanted to load an X cube and a Y cube from a cubelist
of 2. You call this function with a list of constraints.
These cubes get loaded and returned as a CubeList.
Args:
*constraints (tuple of str or callable or iris.Constraint):
Constraints to be used in extracting the required cubes.
Each constraint must match exactly one cube and extracted cubes
will be sorted to match their order.
A constraint can be an iris.Constraint object or a callable
or cube name that can be used to construct one.
Returns:
callable:
A function with the constraints used for a list comprehension.
|
improver/cli/__init__.py
|
create_constrained_inputcubelist_converter
|
anja-bom/improver
|
python
|
def create_constrained_inputcubelist_converter(*constraints):
"Makes function that the input constraints are used in a loop.\n\n The function is a @value_converter, this means it is used by clize to convert\n strings into objects.\n This is a way of not using the IMPROVER load_cube which will try to merge\n cubes. Iris load on the other hand won't deal with meta data properly.\n So an example is if you wanted to load an X cube and a Y cube from a cubelist\n of 2. You call this function with a list of constraints.\n These cubes get loaded and returned as a CubeList.\n\n Args:\n *constraints (tuple of str or callable or iris.Constraint):\n Constraints to be used in extracting the required cubes.\n Each constraint must match exactly one cube and extracted cubes\n will be sorted to match their order.\n A constraint can be an iris.Constraint object or a callable\n or cube name that can be used to construct one.\n\n Returns:\n callable:\n A function with the constraints used for a list comprehension.\n "
@value_converter
def constrained_inputcubelist_converter(to_convert):
'Passes the cube and constraints onto maybe_coerce_with.\n\n Args:\n to_convert (str or iris.cube.CubeList):\n A CubeList or a filename to be loaded into a CubeList.\n\n Returns:\n iris.cube.CubeList:\n The loaded cubelist of constrained cubes.\n '
from iris import Constraint
from iris.cube import CubeList
from improver.utilities.load import load_cubelist
cubelist = maybe_coerce_with(load_cubelist, to_convert)
return CubeList((cubelist.extract_cube((Constraint(cube_func=constr) if callable(constr) else constr)) for constr in constraints))
return constrained_inputcubelist_converter
|
@decorator
def with_output(wrapped, *args, output=None, compression_level=1, least_significant_digit: int=None, **kwargs):
'Add `output` keyword only argument.\n Add `compression_level` option.\n Add `least_significant_digit` option.\n\n This is used to add extra `output`, `compression_level` and `least_significant_digit` CLI\n options. If `output` is provided, it saves the result of calling `wrapped` to file and returns\n None, otherwise it returns the result. If `compression_level` is provided, it compresses the\n data with the provided compression level (or not, if `compression_level` 0). If\n `least_significant_digit` provided, it will quantize the data to a certain number of\n significant figures.\n\n Args:\n wrapped (obj):\n The function to be wrapped.\n output (str, optional):\n Output file name. If not supplied, the output object will be\n printed instead.\n compression_level (int):\n Will set the compression level (1 to 9), or disable compression (0).\n least_significant_digit (int):\n If specified will truncate the data to a precision given by\n 10**(-least_significant_digit), e.g. if least_significant_digit=2, then the data will\n be quantized to a precision of 0.01 (10**(-2)). See\n http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml\n for details. When used with `compression level`, this will result in lossy\n compression.\n Returns:\n Result of calling `wrapped` or None if `output` is given.\n '
from improver.utilities.save import save_netcdf
result = wrapped(*args, **kwargs)
if (output and result):
save_netcdf(result, output, compression_level, least_significant_digit)
return
return result
| 1,128,665,921,701,529,000
|
Add `output` keyword only argument.
Add `compression_level` option.
Add `least_significant_digit` option.
This is used to add extra `output`, `compression_level` and `least_significant_digit` CLI
options. If `output` is provided, it saves the result of calling `wrapped` to file and returns
None, otherwise it returns the result. If `compression_level` is provided, it compresses the
data with the provided compression level (or not, if `compression_level` 0). If
`least_significant_digit` provided, it will quantize the data to a certain number of
significant figures.
Args:
wrapped (obj):
The function to be wrapped.
output (str, optional):
Output file name. If not supplied, the output object will be
printed instead.
compression_level (int):
Will set the compression level (1 to 9), or disable compression (0).
least_significant_digit (int):
If specified will truncate the data to a precision given by
10**(-least_significant_digit), e.g. if least_significant_digit=2, then the data will
be quantized to a precision of 0.01 (10**(-2)). See
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml
for details. When used with `compression level`, this will result in lossy
compression.
Returns:
Result of calling `wrapped` or None if `output` is given.
|
improver/cli/__init__.py
|
with_output
|
anja-bom/improver
|
python
|
@decorator
def with_output(wrapped, *args, output=None, compression_level=1, least_significant_digit: int=None, **kwargs):
'Add `output` keyword only argument.\n Add `compression_level` option.\n Add `least_significant_digit` option.\n\n This is used to add extra `output`, `compression_level` and `least_significant_digit` CLI\n options. If `output` is provided, it saves the result of calling `wrapped` to file and returns\n None, otherwise it returns the result. If `compression_level` is provided, it compresses the\n data with the provided compression level (or not, if `compression_level` 0). If\n `least_significant_digit` provided, it will quantize the data to a certain number of\n significant figures.\n\n Args:\n wrapped (obj):\n The function to be wrapped.\n output (str, optional):\n Output file name. If not supplied, the output object will be\n printed instead.\n compression_level (int):\n Will set the compression level (1 to 9), or disable compression (0).\n least_significant_digit (int):\n If specified will truncate the data to a precision given by\n 10**(-least_significant_digit), e.g. if least_significant_digit=2, then the data will\n be quantized to a precision of 0.01 (10**(-2)). See\n http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml\n for details. When used with `compression level`, this will result in lossy\n compression.\n Returns:\n Result of calling `wrapped` or None if `output` is given.\n '
from improver.utilities.save import save_netcdf
result = wrapped(*args, **kwargs)
if (output and result):
save_netcdf(result, output, compression_level, least_significant_digit)
return
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.