body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def __decide_h_extension(self):
"\n Decides which language 'owns' how many .h files\n\n :returns: The report with divided header files\n "
report = self.__report
h_files = report['C']['.h']
if (h_files > 0):
c_files = (sum(report['C'].values()) - h_files)
cpp_files = ((sum(report['C++'].values()) - h_files) - report['C++']['.c'])
oc_files = (sum(report['Objective-C'].values()) - h_files)
lang_fiels = ((c_files + cpp_files) + oc_files)
if (lang_fiels == 0):
report['C']['.h'] = 1
report['C++']['.h'] = 0
report['Objective-C']['.h'] = 0
else:
report['C']['.h'] = ((h_files * c_files) / lang_fiels)
report['C++']['.h'] = ((h_files * cpp_files) / lang_fiels)
report['Objective-C']['.h'] = ((h_files * oc_files) / lang_fiels)
return report
| 5,973,847,334,793,231,000
|
Decides which language 'owns' how many .h files
:returns: The report with divided header files
|
gitScrabber/scrabTasks/file/languageDetector.py
|
__decide_h_extension
|
Eyenseo/gitScrabber
|
python
|
def __decide_h_extension(self):
"\n Decides which language 'owns' how many .h files\n\n :returns: The report with divided header files\n "
report = self.__report
h_files = report['C']['.h']
if (h_files > 0):
c_files = (sum(report['C'].values()) - h_files)
cpp_files = ((sum(report['C++'].values()) - h_files) - report['C++']['.c'])
oc_files = (sum(report['Objective-C'].values()) - h_files)
lang_fiels = ((c_files + cpp_files) + oc_files)
if (lang_fiels == 0):
report['C']['.h'] = 1
report['C++']['.h'] = 0
report['Objective-C']['.h'] = 0
else:
report['C']['.h'] = ((h_files * c_files) / lang_fiels)
report['C++']['.h'] = ((h_files * cpp_files) / lang_fiels)
report['Objective-C']['.h'] = ((h_files * oc_files) / lang_fiels)
return report
|
def __calculate_main_language(self, report):
'\n Calculates the main language (maximum of files extensions)\n\n :param report: The report\n\n :returns: The main language.\n '
max_files = 0
max_lang = None
for language in report:
lang_fiels = sum(report[language].values())
if (max_files < lang_fiels):
max_lang = language
max_files = lang_fiels
return max_lang
| 8,954,739,240,078,890,000
|
Calculates the main language (maximum of files extensions)
:param report: The report
:returns: The main language.
|
gitScrabber/scrabTasks/file/languageDetector.py
|
__calculate_main_language
|
Eyenseo/gitScrabber
|
python
|
def __calculate_main_language(self, report):
'\n Calculates the main language (maximum of files extensions)\n\n :param report: The report\n\n :returns: The main language.\n '
max_files = 0
max_lang = None
for language in report:
lang_fiels = sum(report[language].values())
if (max_files < lang_fiels):
max_lang = language
max_files = lang_fiels
return max_lang
|
def __calculate_used_languages(self, report):
'\n Calculates the used languages by throwing away the extension counts and\n collapsing them to the language. Only languages that have at least one\n file extension are kept and will appear in the report\n\n :param report: The report\n\n :returns: The used languages.\n '
languages = {}
for language in report:
total_files = sum(report[language].values())
if (total_files > 0):
languages[language] = total_files
return sorted(languages, key=languages.get, reverse=True)
| 8,194,500,951,750,470,000
|
Calculates the used languages by throwing away the extension counts and
collapsing them to the language. Only languages that have at least one
file extension are kept and will appear in the report
:param report: The report
:returns: The used languages.
|
gitScrabber/scrabTasks/file/languageDetector.py
|
__calculate_used_languages
|
Eyenseo/gitScrabber
|
python
|
def __calculate_used_languages(self, report):
'\n Calculates the used languages by throwing away the extension counts and\n collapsing them to the language. Only languages that have at least one\n file extension are kept and will appear in the report\n\n :param report: The report\n\n :returns: The used languages.\n '
languages = {}
for language in report:
total_files = sum(report[language].values())
if (total_files > 0):
languages[language] = total_files
return sorted(languages, key=languages.get, reverse=True)
|
def scrab(self, project, filepath, file):
'\n Counts the files that have an extension of one of the languages\n\n :param project: The project that the scrab task shall analyse\n :param filepath: The filepath to the file that can be analysed\n :param file: The file as string that can be analysed\n\n :returns: Report that contains the scrabbed information of *this* file\n - the extensions have either a count of 0 or 1\n '
(filename, file_extension) = os.path.splitext(filepath)
for language in self.__language_extensions:
if (file_extension in self.__language_extensions[language]):
self.__report[language][file_extension] += 1
| 9,044,298,979,763,655,000
|
Counts the files that have an extension of one of the languages
:param project: The project that the scrab task shall analyse
:param filepath: The filepath to the file that can be analysed
:param file: The file as string that can be analysed
:returns: Report that contains the scrabbed information of *this* file
- the extensions have either a count of 0 or 1
|
gitScrabber/scrabTasks/file/languageDetector.py
|
scrab
|
Eyenseo/gitScrabber
|
python
|
def scrab(self, project, filepath, file):
'\n Counts the files that have an extension of one of the languages\n\n :param project: The project that the scrab task shall analyse\n :param filepath: The filepath to the file that can be analysed\n :param file: The file as string that can be analysed\n\n :returns: Report that contains the scrabbed information of *this* file\n - the extensions have either a count of 0 or 1\n '
(filename, file_extension) = os.path.splitext(filepath)
for language in self.__language_extensions:
if (file_extension in self.__language_extensions[language]):
self.__report[language][file_extension] += 1
|
def report(self):
'\n Decides which headers files are (probable) from which language,\n calculates the main language and removes redundant / unnecessary\n detailed information from the report\n\n :param report: The complete report this task created\n\n :returns: Report that contains all scrabbed information\n eg.:\n LanguageDetector:\n main_language: C\n languages:\n - C\n - C++\n - Python\n '
pre_report = self.__decide_h_extension()
main_language = self.__calculate_main_language(pre_report)
report = {}
report['main_language'] = main_language
report['languages'] = self.__calculate_used_languages(pre_report)
return report
| 3,744,189,683,670,182,400
|
Decides which headers files are (probable) from which language,
calculates the main language and removes redundant / unnecessary
detailed information from the report
:param report: The complete report this task created
:returns: Report that contains all scrabbed information
eg.:
LanguageDetector:
main_language: C
languages:
- C
- C++
- Python
|
gitScrabber/scrabTasks/file/languageDetector.py
|
report
|
Eyenseo/gitScrabber
|
python
|
def report(self):
'\n Decides which headers files are (probable) from which language,\n calculates the main language and removes redundant / unnecessary\n detailed information from the report\n\n :param report: The complete report this task created\n\n :returns: Report that contains all scrabbed information\n eg.:\n LanguageDetector:\n main_language: C\n languages:\n - C\n - C++\n - Python\n '
pre_report = self.__decide_h_extension()
main_language = self.__calculate_main_language(pre_report)
report = {}
report['main_language'] = main_language
report['languages'] = self.__calculate_used_languages(pre_report)
return report
|
def main():
'Hep Mortality Prediction App'
st.markdown(html_temp.format('royalblue'), unsafe_allow_html=True)
menu = ['Home', 'Login', 'SignUp']
sub_menu = ['Plot', 'Prediction']
choice = st.sidebar.selectbox('Menu', menu)
if (choice == 'Home'):
st.subheader('Home')
st.markdown(descriptive_message_temp, unsafe_allow_html=True)
st.image(load_image('hepimage.jpg'))
elif (choice == 'Login'):
username = st.sidebar.text_input('Username')
password = st.sidebar.text_input('Password', type='password')
if st.sidebar.checkbox('Login'):
create_usertable()
hashed_pswd = generate_hashes(password)
result = login_user(username, verify_hashes(password, hashed_pswd))
if result:
st.success('Welcome {}'.format(username))
activity = st.selectbox('Activity', sub_menu)
if (activity == 'Plot'):
st.subheader('Data Vis Plot')
df = pd.read_csv('clean_hepatitis_dataset.csv')
st.dataframe(df)
df['class'].value_counts().plot(kind='bar')
st.pyplot()
freq_df = pd.read_csv('freq_df_hepatitis_dataset.csv')
st.bar_chart(freq_df['count'])
if st.checkbox('Area Chart'):
all_columns = df.columns.to_list()
feat_choices = st.multiselect('Choose a Feature', all_columns)
new_df = df[feat_choices]
st.area_chart(new_df)
elif (activity == 'Prediction'):
st.subheader('Predictive Analytics')
age = st.number_input('Age', 7, 80)
sex = st.radio('Sex', tuple(gender_dict.keys()))
steroid = st.radio('Do You Take Steroids?', tuple(feature_dict.keys()))
antivirals = st.radio('Do You Take Antivirals?', tuple(feature_dict.keys()))
fatigue = st.radio('Do You Have Fatigue', tuple(feature_dict.keys()))
spiders = st.radio('Presence of Spider Naeve', tuple(feature_dict.keys()))
ascites = st.selectbox('Ascities', tuple(feature_dict.keys()))
varices = st.selectbox('Presence of Varices', tuple(feature_dict.keys()))
bilirubin = st.number_input('bilirubin Content', 0.0, 8.0)
alk_phosphate = st.number_input('Alkaline Phosphate Content', 0.0, 296.0)
sgot = st.number_input('Sgot', 0.0, 648.0)
albumin = st.number_input('Albumin', 0.0, 6.4)
protime = st.number_input('Prothrombin Time', 0.0, 100.0)
histology = st.selectbox('Histology', tuple(feature_dict.keys()))
feature_list = [age, get_value(sex, gender_dict), get_fvalue(steroid), get_fvalue(antivirals), get_fvalue(fatigue), get_fvalue(spiders), get_fvalue(ascites), get_fvalue(varices), bilirubin, alk_phosphate, sgot, albumin, int(protime), get_fvalue(histology)]
st.write(len(feature_list))
st.write(feature_list)
pretty_result = {'age': age, 'sex': sex, 'steroid': steroid, 'antivirals': antivirals, 'fatigue': fatigue, 'spiders': spiders, 'ascites': ascites, 'varices': varices, 'bilirubin': bilirubin, 'alk_phosphate': alk_phosphate, 'sgot': sgot, 'albumin': albumin, 'protime': protime, 'histolog': histology}
st.json(pretty_result)
single_sample = np.array(feature_list).reshape(1, (- 1))
model_choice = st.selectbox('Select Model', ['LR', 'KNN', 'DecisionTree'])
if st.button('Predict'):
if (model_choice == 'KNN'):
loaded_model = load_model('knn_hepB_model.pkl')
prediction = loaded_model.predict(single_sample)
pred_prob = loaded_model.predict_proba(single_sample)
elif (model_choice == 'DecisionTree'):
loaded_model = load_model('decision_tree_clf_hepB_model.pkl')
prediction = loaded_model.predict(single_sample)
pred_prob = loaded_model.predict_proba(single_sample)
else:
loaded_model = load_model('logistic_regression_hepB_model.pkl')
prediction = loaded_model.predict(single_sample)
pred_prob = loaded_model.predict_proba(single_sample)
if (prediction == 1):
st.warning('Patient Dies')
pred_probability_score = {'Die': (pred_prob[0][0] * 100), 'Live': (pred_prob[0][1] * 100)}
st.subheader('Prediction Probability Score using {}'.format(model_choice))
st.json(pred_probability_score)
st.subheader('Prescriptive Analytics')
st.markdown(prescriptive_message_temp, unsafe_allow_html=True)
else:
st.success('Patient Lives')
pred_probability_score = {'Die': (pred_prob[0][0] * 100), 'Live': (pred_prob[0][1] * 100)}
st.subheader('Prediction Probability Score using {}'.format(model_choice))
st.json(pred_probability_score)
if st.checkbox('Interpret'):
if (model_choice == 'KNN'):
loaded_model = load_model('knn_hepB_model.pkl')
elif (model_choice == 'DecisionTree'):
loaded_model = load_model('decision_tree_clf_hepB_model.pkl')
else:
loaded_model = load_model('logistic_regression_hepB_model.pkl')
df = pd.read_csv('clean_hepatitis_dataset.csv')
x = df[['age', 'sex', 'steroid', 'antivirals', 'fatigue', 'spiders', 'ascites', 'varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime', 'histology']]
feature_names = ['age', 'sex', 'steroid', 'antivirals', 'fatigue', 'spiders', 'ascites', 'varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime', 'histology']
class_names = ['Die(1)', 'Live(2)']
explainer = lime.lime_tabular.LimeTabularExplainer(x.values, feature_names=feature_names, class_names=class_names, discretize_continuous=True)
exp = explainer.explain_instance(np.array(feature_list), loaded_model.predict_proba, num_features=13, top_labels=1)
exp.show_in_notebook(show_table=True, show_all=False)
st.write(exp.as_list())
new_exp = exp.as_list()
label_limits = [i[0] for i in new_exp]
label_scores = [i[1] for i in new_exp]
plt.barh(label_limits, label_scores)
st.pyplot()
plt.figure(figsize=(20, 10))
fig = exp.as_pyplot_figure()
st.pyplot()
else:
st.warning('Incorrect Username/Password')
elif (choice == 'SignUp'):
new_username = st.text_input('User name')
new_password = st.text_input('Password', type='password')
confirm_password = st.text_input('Confirm Password', type='password')
if (new_password == confirm_password):
st.success('Password Confirmed')
else:
st.warning('Passwords not the same')
if st.button('Submit'):
create_usertable()
hashed_new_password = generate_hashes(new_password)
add_userdata(new_username, hashed_new_password)
st.success('You have successfully created a new account')
st.info('Login to Get Started')
| 1,479,472,205,569,399,300
|
Hep Mortality Prediction App
|
app.py
|
main
|
Let-Me-Code/Hepatitis-B-Mortality-Prediction
|
python
|
def main():
st.markdown(html_temp.format('royalblue'), unsafe_allow_html=True)
menu = ['Home', 'Login', 'SignUp']
sub_menu = ['Plot', 'Prediction']
choice = st.sidebar.selectbox('Menu', menu)
if (choice == 'Home'):
st.subheader('Home')
st.markdown(descriptive_message_temp, unsafe_allow_html=True)
st.image(load_image('hepimage.jpg'))
elif (choice == 'Login'):
username = st.sidebar.text_input('Username')
password = st.sidebar.text_input('Password', type='password')
if st.sidebar.checkbox('Login'):
create_usertable()
hashed_pswd = generate_hashes(password)
result = login_user(username, verify_hashes(password, hashed_pswd))
if result:
st.success('Welcome {}'.format(username))
activity = st.selectbox('Activity', sub_menu)
if (activity == 'Plot'):
st.subheader('Data Vis Plot')
df = pd.read_csv('clean_hepatitis_dataset.csv')
st.dataframe(df)
df['class'].value_counts().plot(kind='bar')
st.pyplot()
freq_df = pd.read_csv('freq_df_hepatitis_dataset.csv')
st.bar_chart(freq_df['count'])
if st.checkbox('Area Chart'):
all_columns = df.columns.to_list()
feat_choices = st.multiselect('Choose a Feature', all_columns)
new_df = df[feat_choices]
st.area_chart(new_df)
elif (activity == 'Prediction'):
st.subheader('Predictive Analytics')
age = st.number_input('Age', 7, 80)
sex = st.radio('Sex', tuple(gender_dict.keys()))
steroid = st.radio('Do You Take Steroids?', tuple(feature_dict.keys()))
antivirals = st.radio('Do You Take Antivirals?', tuple(feature_dict.keys()))
fatigue = st.radio('Do You Have Fatigue', tuple(feature_dict.keys()))
spiders = st.radio('Presence of Spider Naeve', tuple(feature_dict.keys()))
ascites = st.selectbox('Ascities', tuple(feature_dict.keys()))
varices = st.selectbox('Presence of Varices', tuple(feature_dict.keys()))
bilirubin = st.number_input('bilirubin Content', 0.0, 8.0)
alk_phosphate = st.number_input('Alkaline Phosphate Content', 0.0, 296.0)
sgot = st.number_input('Sgot', 0.0, 648.0)
albumin = st.number_input('Albumin', 0.0, 6.4)
protime = st.number_input('Prothrombin Time', 0.0, 100.0)
histology = st.selectbox('Histology', tuple(feature_dict.keys()))
feature_list = [age, get_value(sex, gender_dict), get_fvalue(steroid), get_fvalue(antivirals), get_fvalue(fatigue), get_fvalue(spiders), get_fvalue(ascites), get_fvalue(varices), bilirubin, alk_phosphate, sgot, albumin, int(protime), get_fvalue(histology)]
st.write(len(feature_list))
st.write(feature_list)
pretty_result = {'age': age, 'sex': sex, 'steroid': steroid, 'antivirals': antivirals, 'fatigue': fatigue, 'spiders': spiders, 'ascites': ascites, 'varices': varices, 'bilirubin': bilirubin, 'alk_phosphate': alk_phosphate, 'sgot': sgot, 'albumin': albumin, 'protime': protime, 'histolog': histology}
st.json(pretty_result)
single_sample = np.array(feature_list).reshape(1, (- 1))
model_choice = st.selectbox('Select Model', ['LR', 'KNN', 'DecisionTree'])
if st.button('Predict'):
if (model_choice == 'KNN'):
loaded_model = load_model('knn_hepB_model.pkl')
prediction = loaded_model.predict(single_sample)
pred_prob = loaded_model.predict_proba(single_sample)
elif (model_choice == 'DecisionTree'):
loaded_model = load_model('decision_tree_clf_hepB_model.pkl')
prediction = loaded_model.predict(single_sample)
pred_prob = loaded_model.predict_proba(single_sample)
else:
loaded_model = load_model('logistic_regression_hepB_model.pkl')
prediction = loaded_model.predict(single_sample)
pred_prob = loaded_model.predict_proba(single_sample)
if (prediction == 1):
st.warning('Patient Dies')
pred_probability_score = {'Die': (pred_prob[0][0] * 100), 'Live': (pred_prob[0][1] * 100)}
st.subheader('Prediction Probability Score using {}'.format(model_choice))
st.json(pred_probability_score)
st.subheader('Prescriptive Analytics')
st.markdown(prescriptive_message_temp, unsafe_allow_html=True)
else:
st.success('Patient Lives')
pred_probability_score = {'Die': (pred_prob[0][0] * 100), 'Live': (pred_prob[0][1] * 100)}
st.subheader('Prediction Probability Score using {}'.format(model_choice))
st.json(pred_probability_score)
if st.checkbox('Interpret'):
if (model_choice == 'KNN'):
loaded_model = load_model('knn_hepB_model.pkl')
elif (model_choice == 'DecisionTree'):
loaded_model = load_model('decision_tree_clf_hepB_model.pkl')
else:
loaded_model = load_model('logistic_regression_hepB_model.pkl')
df = pd.read_csv('clean_hepatitis_dataset.csv')
x = df[['age', 'sex', 'steroid', 'antivirals', 'fatigue', 'spiders', 'ascites', 'varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime', 'histology']]
feature_names = ['age', 'sex', 'steroid', 'antivirals', 'fatigue', 'spiders', 'ascites', 'varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime', 'histology']
class_names = ['Die(1)', 'Live(2)']
explainer = lime.lime_tabular.LimeTabularExplainer(x.values, feature_names=feature_names, class_names=class_names, discretize_continuous=True)
exp = explainer.explain_instance(np.array(feature_list), loaded_model.predict_proba, num_features=13, top_labels=1)
exp.show_in_notebook(show_table=True, show_all=False)
st.write(exp.as_list())
new_exp = exp.as_list()
label_limits = [i[0] for i in new_exp]
label_scores = [i[1] for i in new_exp]
plt.barh(label_limits, label_scores)
st.pyplot()
plt.figure(figsize=(20, 10))
fig = exp.as_pyplot_figure()
st.pyplot()
else:
st.warning('Incorrect Username/Password')
elif (choice == 'SignUp'):
new_username = st.text_input('User name')
new_password = st.text_input('Password', type='password')
confirm_password = st.text_input('Confirm Password', type='password')
if (new_password == confirm_password):
st.success('Password Confirmed')
else:
st.warning('Passwords not the same')
if st.button('Submit'):
create_usertable()
hashed_new_password = generate_hashes(new_password)
add_userdata(new_username, hashed_new_password)
st.success('You have successfully created a new account')
st.info('Login to Get Started')
|
@property
def num_preds(self):
'int: the number of predictions in this assignment'
return len(self.gt_inds)
| 7,780,834,999,563,918,000
|
int: the number of predictions in this assignment
|
mmdet3d/models/dense_heads/assigner/assign_result.py
|
num_preds
|
yangzilongdmgy/merge_monster_3d
|
python
|
@property
def num_preds(self):
return len(self.gt_inds)
|
def set_extra_property(self, key, value):
'Set user-defined new property.'
assert (key not in self.info)
self._extra_properties[key] = value
| 393,492,990,254,824,600
|
Set user-defined new property.
|
mmdet3d/models/dense_heads/assigner/assign_result.py
|
set_extra_property
|
yangzilongdmgy/merge_monster_3d
|
python
|
def set_extra_property(self, key, value):
assert (key not in self.info)
self._extra_properties[key] = value
|
def get_extra_property(self, key):
'Get user-defined property.'
return self._extra_properties.get(key, None)
| -7,626,049,926,330,966,000
|
Get user-defined property.
|
mmdet3d/models/dense_heads/assigner/assign_result.py
|
get_extra_property
|
yangzilongdmgy/merge_monster_3d
|
python
|
def get_extra_property(self, key):
return self._extra_properties.get(key, None)
|
@property
def info(self):
'dict: a dictionary of info about the object'
basic_info = {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels}
basic_info.update(self._extra_properties)
return basic_info
| 8,762,152,943,817,003,000
|
dict: a dictionary of info about the object
|
mmdet3d/models/dense_heads/assigner/assign_result.py
|
info
|
yangzilongdmgy/merge_monster_3d
|
python
|
@property
def info(self):
basic_info = {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels}
basic_info.update(self._extra_properties)
return basic_info
|
def __nice__(self):
'str: a "nice" summary string describing this assign result'
parts = []
parts.append(f'num_gts={self.num_gts!r}')
if (self.gt_inds is None):
parts.append(f'gt_inds={self.gt_inds!r}')
else:
parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
if (self.max_overlaps is None):
parts.append(f'max_overlaps={self.max_overlaps!r}')
else:
parts.append(f'max_overlaps.shape={tuple(self.max_overlaps.shape)!r}')
if (self.labels is None):
parts.append(f'labels={self.labels!r}')
else:
parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
return ', '.join(parts)
| -2,866,129,337,503,404,000
|
str: a "nice" summary string describing this assign result
|
mmdet3d/models/dense_heads/assigner/assign_result.py
|
__nice__
|
yangzilongdmgy/merge_monster_3d
|
python
|
def __nice__(self):
parts = []
parts.append(f'num_gts={self.num_gts!r}')
if (self.gt_inds is None):
parts.append(f'gt_inds={self.gt_inds!r}')
else:
parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
if (self.max_overlaps is None):
parts.append(f'max_overlaps={self.max_overlaps!r}')
else:
parts.append(f'max_overlaps.shape={tuple(self.max_overlaps.shape)!r}')
if (self.labels is None):
parts.append(f'labels={self.labels!r}')
else:
parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
return ', '.join(parts)
|
@classmethod
def random(cls, **kwargs):
'Create random AssignResult for tests or debugging.\n\n Args:\n num_preds: number of predicted boxes\n num_gts: number of true boxes\n p_ignore (float): probability of a predicted box assinged to an\n ignored truth\n p_assigned (float): probability of a predicted box not being\n assigned\n p_use_label (float | bool): with labels or not\n rng (None | int | numpy.random.RandomState): seed or state\n\n Returns:\n :obj:`AssignResult`: Randomly generated assign results.\n\n Example:\n >>> from nanodet.model.head.assigner.assign_result import AssignResult\n >>> self = AssignResult.random()\n >>> print(self.info)\n '
rng = kwargs.get('rng', None)
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
import numpy as np
if (rng is None):
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
if (num_gts is None):
num_gts = rng.randint(0, 8)
if (num_preds is None):
num_preds = rng.randint(0, 16)
if (num_gts == 0):
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if ((p_use_label is True) or (p_use_label < rng.rand())):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
max_overlaps = torch.from_numpy(rng.rand(num_preds))
is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned))
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds))
gt_inds[is_ignore] = (- 1)
gt_inds[(~ is_assigned)] = 0
max_overlaps[(~ is_assigned)] = 0
if ((p_use_label is True) or (p_use_label < rng.rand())):
if (num_classes == 0):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(rng.randint(0, num_classes, size=num_preds))
labels[(~ is_assigned)] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
| 1,650,000,623,902,313,500
|
Create random AssignResult for tests or debugging.
Args:
num_preds: number of predicted boxes
num_gts: number of true boxes
p_ignore (float): probability of a predicted box assinged to an
ignored truth
p_assigned (float): probability of a predicted box not being
assigned
p_use_label (float | bool): with labels or not
rng (None | int | numpy.random.RandomState): seed or state
Returns:
:obj:`AssignResult`: Randomly generated assign results.
Example:
>>> from nanodet.model.head.assigner.assign_result import AssignResult
>>> self = AssignResult.random()
>>> print(self.info)
|
mmdet3d/models/dense_heads/assigner/assign_result.py
|
random
|
yangzilongdmgy/merge_monster_3d
|
python
|
@classmethod
def random(cls, **kwargs):
'Create random AssignResult for tests or debugging.\n\n Args:\n num_preds: number of predicted boxes\n num_gts: number of true boxes\n p_ignore (float): probability of a predicted box assinged to an\n ignored truth\n p_assigned (float): probability of a predicted box not being\n assigned\n p_use_label (float | bool): with labels or not\n rng (None | int | numpy.random.RandomState): seed or state\n\n Returns:\n :obj:`AssignResult`: Randomly generated assign results.\n\n Example:\n >>> from nanodet.model.head.assigner.assign_result import AssignResult\n >>> self = AssignResult.random()\n >>> print(self.info)\n '
rng = kwargs.get('rng', None)
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
import numpy as np
if (rng is None):
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
if (num_gts is None):
num_gts = rng.randint(0, 8)
if (num_preds is None):
num_preds = rng.randint(0, 16)
if (num_gts == 0):
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if ((p_use_label is True) or (p_use_label < rng.rand())):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
max_overlaps = torch.from_numpy(rng.rand(num_preds))
is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned))
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds))
gt_inds[is_ignore] = (- 1)
gt_inds[(~ is_assigned)] = 0
max_overlaps[(~ is_assigned)] = 0
if ((p_use_label is True) or (p_use_label < rng.rand())):
if (num_classes == 0):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(rng.randint(0, num_classes, size=num_preds))
labels[(~ is_assigned)] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
|
def add_gt_(self, gt_labels):
'Add ground truth as assigned results.\n\n Args:\n gt_labels (torch.Tensor): Labels of gt boxes\n '
self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
if (self.labels is not None):
self.labels = torch.cat([gt_labels, self.labels])
| 6,881,752,854,443,798,000
|
Add ground truth as assigned results.
Args:
gt_labels (torch.Tensor): Labels of gt boxes
|
mmdet3d/models/dense_heads/assigner/assign_result.py
|
add_gt_
|
yangzilongdmgy/merge_monster_3d
|
python
|
def add_gt_(self, gt_labels):
'Add ground truth as assigned results.\n\n Args:\n gt_labels (torch.Tensor): Labels of gt boxes\n '
self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
if (self.labels is not None):
self.labels = torch.cat([gt_labels, self.labels])
|
def glue(self, pos):
'\n Behaves like simple line port, but for folded interface suggests\n connection to the middle point of a port.\n '
if self.is_folded():
px = ((self.start.x + self.end.x) / 2)
py = ((self.start.y + self.end.y) / 2)
d = distance_point_point((px, py), pos)
return ((px, py), d)
else:
(d, pl) = distance_line_point(self.start, self.end, pos)
return (pl, d)
| 8,054,494,464,208,342,000
|
Behaves like simple line port, but for folded interface suggests
connection to the middle point of a port.
|
gaphor/diagram/classes/interface.py
|
glue
|
987Frogh/Makehuman
|
python
|
def glue(self, pos):
'\n Behaves like simple line port, but for folded interface suggests\n connection to the middle point of a port.\n '
if self.is_folded():
px = ((self.start.x + self.end.x) / 2)
py = ((self.start.y + self.end.y) / 2)
d = distance_point_point((px, py), pos)
return ((px, py), d)
else:
(d, pl) = distance_line_point(self.start, self.end, pos)
return (pl, d)
|
def _set_folded(self, folded):
'\n Set folded notation.\n\n :param folded: Folded state, see Folded.* enum.\n '
if (self._folded == folded):
return
self._folded = folded
if (folded == Folded.NONE):
movable = True
else:
if (self._folded == Folded.PROVIDED):
icon_size = (self.RADIUS_PROVIDED * 2)
else:
icon_size = (self.RADIUS_REQUIRED * 2)
(self.min_width, self.min_height) = (icon_size, icon_size)
(self.width, self.height) = (icon_size, icon_size)
h_nw = self._handles[NW]
h_se = self._handles[SE]
h_se.pos.x = (h_nw.pos.x + self.min_width)
h_se.pos.y = (h_nw.pos.y + self.min_height)
movable = False
for h in self._handles:
h.movable = movable
self.update_shapes()
| 614,404,046,103,405,400
|
Set folded notation.
:param folded: Folded state, see Folded.* enum.
|
gaphor/diagram/classes/interface.py
|
_set_folded
|
987Frogh/Makehuman
|
python
|
def _set_folded(self, folded):
'\n Set folded notation.\n\n :param folded: Folded state, see Folded.* enum.\n '
if (self._folded == folded):
return
self._folded = folded
if (folded == Folded.NONE):
movable = True
else:
if (self._folded == Folded.PROVIDED):
icon_size = (self.RADIUS_PROVIDED * 2)
else:
icon_size = (self.RADIUS_REQUIRED * 2)
(self.min_width, self.min_height) = (icon_size, icon_size)
(self.width, self.height) = (icon_size, icon_size)
h_nw = self._handles[NW]
h_se = self._handles[SE]
h_se.pos.x = (h_nw.pos.x + self.min_width)
h_se.pos.y = (h_nw.pos.y + self.min_height)
movable = False
for h in self._handles:
h.movable = movable
self.update_shapes()
|
def main(argv):
'\n Main function.\n '
(result_dir, src_dir) = options_script(argv)
run(result_dir, src_dir)
| 7,503,100,500,502,827,000
|
Main function.
|
Outils/TRIOXDATA/XTriou/Extract_xdata.py
|
main
|
cea-trust-platform/trust-code
|
python
|
def main(argv):
'\n \n '
(result_dir, src_dir) = options_script(argv)
run(result_dir, src_dir)
|
def assert_matches_stdout(actual, expected_stdout, normalize_fn=(lambda elem: elem), label=''):
'Asserts a PCollection of strings matches the expected stdout elements.\n\n Args:\n actual (beam.PCollection): A PCollection.\n expected (List[str]): A list of stdout elements, one line per element.\n normalize_fn (Function[any]): A function to normalize elements before\n comparing them. Can be used to sort lists before comparing.\n label (str): [optional] Label to make transform names unique.\n '
def stdout_to_python_object(elem_str):
try:
elem = ast.literal_eval(elem_str)
except (SyntaxError, ValueError):
elem = elem_str
return normalize_fn(elem)
actual = (actual | (label >> beam.Map(stdout_to_python_object)))
expected = list(map(stdout_to_python_object, expected_stdout))
assert_that(actual, equal_to(expected), ('assert ' + label))
| -4,696,306,568,593,374,000
|
Asserts a PCollection of strings matches the expected stdout elements.
Args:
actual (beam.PCollection): A PCollection.
expected (List[str]): A list of stdout elements, one line per element.
normalize_fn (Function[any]): A function to normalize elements before
comparing them. Can be used to sort lists before comparing.
label (str): [optional] Label to make transform names unique.
|
sdks/python/apache_beam/examples/snippets/util.py
|
assert_matches_stdout
|
DevangiDas/beam
|
python
|
def assert_matches_stdout(actual, expected_stdout, normalize_fn=(lambda elem: elem), label=):
'Asserts a PCollection of strings matches the expected stdout elements.\n\n Args:\n actual (beam.PCollection): A PCollection.\n expected (List[str]): A list of stdout elements, one line per element.\n normalize_fn (Function[any]): A function to normalize elements before\n comparing them. Can be used to sort lists before comparing.\n label (str): [optional] Label to make transform names unique.\n '
def stdout_to_python_object(elem_str):
try:
elem = ast.literal_eval(elem_str)
except (SyntaxError, ValueError):
elem = elem_str
return normalize_fn(elem)
actual = (actual | (label >> beam.Map(stdout_to_python_object)))
expected = list(map(stdout_to_python_object, expected_stdout))
assert_that(actual, equal_to(expected), ('assert ' + label))
|
def run_shell_commands(commands, **kwargs):
'Runs a list of Notebook-like shell commands.\n\n Lines starting with `#` are ignored as comments.\n Lines starting with `!` are run as commands.\n Variables like `{variable}` are substituted with **kwargs.\n '
for cmd in commands:
cmd = cmd.strip().lstrip('!').format(**kwargs)
sp_cmd = shlex.split(cmd, comments=True, posix=True)
if sp_cmd:
sp.call(sp_cmd)
(yield sp_cmd)
| 8,061,451,941,788,008,000
|
Runs a list of Notebook-like shell commands.
Lines starting with `#` are ignored as comments.
Lines starting with `!` are run as commands.
Variables like `{variable}` are substituted with **kwargs.
|
sdks/python/apache_beam/examples/snippets/util.py
|
run_shell_commands
|
DevangiDas/beam
|
python
|
def run_shell_commands(commands, **kwargs):
'Runs a list of Notebook-like shell commands.\n\n Lines starting with `#` are ignored as comments.\n Lines starting with `!` are run as commands.\n Variables like `{variable}` are substituted with **kwargs.\n '
for cmd in commands:
cmd = cmd.strip().lstrip('!').format(**kwargs)
sp_cmd = shlex.split(cmd, comments=True, posix=True)
if sp_cmd:
sp.call(sp_cmd)
(yield sp_cmd)
|
def __init__(self, parnames=[], name=''):
'\n :param parnames:\n A list of names of the kernel params, used to alias the intrinsic\n parameter names. This way different instances of the same kernel\n can have different parameter names.\n '
if (len(parnames) == 0):
parnames = self.kernel_params
assert (len(parnames) == len(self.kernel_params))
self.param_alias = dict(zip(self.kernel_params, parnames))
self.params = {}
self.name = name
| 80,885,053,197,826,180
|
:param parnames:
A list of names of the kernel params, used to alias the intrinsic
parameter names. This way different instances of the same kernel
can have different parameter names.
|
prospect/likelihood/kernels.py
|
__init__
|
errai34/prospector
|
python
|
def __init__(self, parnames=[], name=):
'\n :param parnames:\n A list of names of the kernel params, used to alias the intrinsic\n parameter names. This way different instances of the same kernel\n can have different parameter names.\n '
if (len(parnames) == 0):
parnames = self.kernel_params
assert (len(parnames) == len(self.kernel_params))
self.param_alias = dict(zip(self.kernel_params, parnames))
self.params = {}
self.name = name
|
def update(self, **kwargs):
'Take a dictionary of parameters, pick out the properly named\n parameters according to the alias, and put them in the param state\n dictionary.\n '
for k in self.kernel_params:
self.params[k] = kwargs[self.param_alias[k]]
| -4,019,182,405,496,869,400
|
Take a dictionary of parameters, pick out the properly named
parameters according to the alias, and put them in the param state
dictionary.
|
prospect/likelihood/kernels.py
|
update
|
errai34/prospector
|
python
|
def update(self, **kwargs):
'Take a dictionary of parameters, pick out the properly named\n parameters according to the alias, and put them in the param state\n dictionary.\n '
for k in self.kernel_params:
self.params[k] = kwargs[self.param_alias[k]]
|
def __call__(self, metric, weights=None, ndim=2, **extras):
'Return a covariance matrix, given a metric. Optionally, multiply\n the output kernel by a weight function to induce non-stationarity.\n '
k = self.construct_kernel(metric)
if (ndim != k.ndim):
k = np.diag(k)
if (weights is None):
return k
elif (ndim == 2):
Sigma = ((weights[None, :] * k) * weights[:, None])
else:
Sigma = (k * (weights ** 2))
return Sigma
| 1,769,017,840,861,649,200
|
Return a covariance matrix, given a metric. Optionally, multiply
the output kernel by a weight function to induce non-stationarity.
|
prospect/likelihood/kernels.py
|
__call__
|
errai34/prospector
|
python
|
def __call__(self, metric, weights=None, ndim=2, **extras):
'Return a covariance matrix, given a metric. Optionally, multiply\n the output kernel by a weight function to induce non-stationarity.\n '
k = self.construct_kernel(metric)
if (ndim != k.ndim):
k = np.diag(k)
if (weights is None):
return k
elif (ndim == 2):
Sigma = ((weights[None, :] * k) * weights[:, None])
else:
Sigma = (k * (weights ** 2))
return Sigma
|
def construct_kernel(self, metric):
'Construct an exponential squared covariance matrix.\n '
(a, l) = (self.params['amplitude'], self.params['length'])
Sigma = ((a ** 2) * np.exp(((- ((metric[:, None] - metric[None, :]) ** 2)) / (2 * (l ** 2)))))
return Sigma
| -3,748,341,603,669,811,000
|
Construct an exponential squared covariance matrix.
|
prospect/likelihood/kernels.py
|
construct_kernel
|
errai34/prospector
|
python
|
def construct_kernel(self, metric):
'\n '
(a, l) = (self.params['amplitude'], self.params['length'])
Sigma = ((a ** 2) * np.exp(((- ((metric[:, None] - metric[None, :]) ** 2)) / (2 * (l ** 2)))))
return Sigma
|
def construct_kernel(self, metric):
'Construct a Matern kernel covariance matrix, for \nu=3/2.\n '
(a, l) = (self.params['amplitude'], self.params['length'])
Sigma = ((np.sqrt(3) * np.abs((metric[:, None] - metric[None, :]))) / l)
Sigma = (((a ** 2) * (1 + Sigma)) * np.exp((- Sigma)))
return Sigma
| -2,407,672,587,236,184,600
|
Construct a Matern kernel covariance matrix, for
u=3/2.
|
prospect/likelihood/kernels.py
|
construct_kernel
|
errai34/prospector
|
python
|
def construct_kernel(self, metric):
'Construct a Matern kernel covariance matrix, for \nu=3/2.\n '
(a, l) = (self.params['amplitude'], self.params['length'])
Sigma = ((np.sqrt(3) * np.abs((metric[:, None] - metric[None, :]))) / l)
Sigma = (((a ** 2) * (1 + Sigma)) * np.exp((- Sigma)))
return Sigma
|
def print(self):
" Method prints person's data.\n\n :return: None\n "
print('Name: {}, age: {}, phone: {}'.format(self.name, self.age, self.phone))
| 2,257,337,300,328,433,200
|
Method prints person's data.
:return: None
|
person.py
|
print
|
jhsaraja/testiprojekti
|
python
|
def print(self):
" Method prints person's data.\n\n :return: None\n "
print('Name: {}, age: {}, phone: {}'.format(self.name, self.age, self.phone))
|
def set_name(self, name):
' Method saves a new name for the person.\n\n :param name: new name for the person, string\n :return: None\n '
self.name = name
| -8,456,299,319,435,507,000
|
Method saves a new name for the person.
:param name: new name for the person, string
:return: None
|
person.py
|
set_name
|
jhsaraja/testiprojekti
|
python
|
def set_name(self, name):
' Method saves a new name for the person.\n\n :param name: new name for the person, string\n :return: None\n '
self.name = name
|
def get_name(self):
' Method returns the name of the person.\n\n :return: name, string\n '
return self.name
| 8,722,847,781,120,407,000
|
Method returns the name of the person.
:return: name, string
|
person.py
|
get_name
|
jhsaraja/testiprojekti
|
python
|
def get_name(self):
' Method returns the name of the person.\n\n :return: name, string\n '
return self.name
|
def set_age(self, age):
' Method saves a new age for the person.\n\n :param age: new age for the person, integer\n :return: None\n '
if (type(age) != int):
print('not valid age {}'.format(age))
return
if (age >= 0):
self.age = age
else:
print('not valid age {}'.format(age))
| 2,367,029,125,253,940,000
|
Method saves a new age for the person.
:param age: new age for the person, integer
:return: None
|
person.py
|
set_age
|
jhsaraja/testiprojekti
|
python
|
def set_age(self, age):
' Method saves a new age for the person.\n\n :param age: new age for the person, integer\n :return: None\n '
if (type(age) != int):
print('not valid age {}'.format(age))
return
if (age >= 0):
self.age = age
else:
print('not valid age {}'.format(age))
|
def get_age(self):
' Method returns the age of the person.\n\n :return: age, integer\n '
return self.age
| 5,929,410,324,352,048,000
|
Method returns the age of the person.
:return: age, integer
|
person.py
|
get_age
|
jhsaraja/testiprojekti
|
python
|
def get_age(self):
' Method returns the age of the person.\n\n :return: age, integer\n '
return self.age
|
def set_phone(self, phone):
' Method saves a new phone for the person.\n\n :param phone: new phone for the person, string\n :return: None\n '
self.phone = phone
| 8,880,604,806,047,877,000
|
Method saves a new phone for the person.
:param phone: new phone for the person, string
:return: None
|
person.py
|
set_phone
|
jhsaraja/testiprojekti
|
python
|
def set_phone(self, phone):
' Method saves a new phone for the person.\n\n :param phone: new phone for the person, string\n :return: None\n '
self.phone = phone
|
def get_phone(self):
' Method returns the phone of the person.\n\n :return: phone, string\n '
return self.phone
| -1,529,533,477,153,461,500
|
Method returns the phone of the person.
:return: phone, string
|
person.py
|
get_phone
|
jhsaraja/testiprojekti
|
python
|
def get_phone(self):
' Method returns the phone of the person.\n\n :return: phone, string\n '
return self.phone
|
def get_title(self):
' Method returns the title of the person.\n\n :return: title, string\n '
return self.title
| 7,125,931,693,280,901,000
|
Method returns the title of the person.
:return: title, string
|
person.py
|
get_title
|
jhsaraja/testiprojekti
|
python
|
def get_title(self):
' Method returns the title of the person.\n\n :return: title, string\n '
return self.title
|
def set_title(self, title):
' Method saves a new title for the person.\n\n :param title: new title for the person, string\n :return: None\n '
self.title = title
| -5,331,485,032,930,876,000
|
Method saves a new title for the person.
:param title: new title for the person, string
:return: None
|
person.py
|
set_title
|
jhsaraja/testiprojekti
|
python
|
def set_title(self, title):
' Method saves a new title for the person.\n\n :param title: new title for the person, string\n :return: None\n '
self.title = title
|
def get_salary(self):
' Method returns the salary of the person.\n\n :return: salary, string\n '
return self.salary
| -3,578,107,366,643,422,000
|
Method returns the salary of the person.
:return: salary, string
|
person.py
|
get_salary
|
jhsaraja/testiprojekti
|
python
|
def get_salary(self):
' Method returns the salary of the person.\n\n :return: salary, string\n '
return self.salary
|
def set_salary(self, salary):
' Method saves a new salary for the person.\n\n :param salary: new salary for the person, string\n :return: None\n '
if (salary >= 0):
self.salary = salary
| 4,689,736,759,264,431,000
|
Method saves a new salary for the person.
:param salary: new salary for the person, string
:return: None
|
person.py
|
set_salary
|
jhsaraja/testiprojekti
|
python
|
def set_salary(self, salary):
' Method saves a new salary for the person.\n\n :param salary: new salary for the person, string\n :return: None\n '
if (salary >= 0):
self.salary = salary
|
def get_location(self):
' Method returns the location of the person.\n\n :return: location, string\n '
return self.location
| 1,266,652,687,538,883,800
|
Method returns the location of the person.
:return: location, string
|
person.py
|
get_location
|
jhsaraja/testiprojekti
|
python
|
def get_location(self):
' Method returns the location of the person.\n\n :return: location, string\n '
return self.location
|
def set_location(self, location):
' Method saves a new location for the person.\n\n :param location: new location for the person, string\n :return: None\n '
self.location = location
| 5,467,453,087,817,736,000
|
Method saves a new location for the person.
:param location: new location for the person, string
:return: None
|
person.py
|
set_location
|
jhsaraja/testiprojekti
|
python
|
def set_location(self, location):
' Method saves a new location for the person.\n\n :param location: new location for the person, string\n :return: None\n '
self.location = location
|
def print_businesscard(self):
' Method prints a business card information.\n\n :return: None\n '
print(' Name: {}\n Title: {}\n Phone: {}'.format(self.name, self.title, self.phone))
| -6,489,935,535,142,710,000
|
Method prints a business card information.
:return: None
|
person.py
|
print_businesscard
|
jhsaraja/testiprojekti
|
python
|
def print_businesscard(self):
' Method prints a business card information.\n\n :return: None\n '
print(' Name: {}\n Title: {}\n Phone: {}'.format(self.name, self.title, self.phone))
|
def get_defaults(lang):
'Get the language-specific defaults, if available in spaCy. This allows\n using lexical attribute getters that depend on static language data, e.g.\n Token.like_num, Token.is_stop, Doc.noun_chunks etc.\n\n lang (unicode): The language code.\n RETURNS (Language.Defaults): The language defaults.\n '
try:
lang_cls = get_lang_class(lang)
return lang_cls.Defaults
except ImportError:
return Language.Defaults
| -7,850,812,653,197,558,000
|
Get the language-specific defaults, if available in spaCy. This allows
using lexical attribute getters that depend on static language data, e.g.
Token.like_num, Token.is_stop, Doc.noun_chunks etc.
lang (unicode): The language code.
RETURNS (Language.Defaults): The language defaults.
|
spacy_stanfordnlp/language.py
|
get_defaults
|
mehmetilker/spacy-stanfordnlp
|
python
|
def get_defaults(lang):
'Get the language-specific defaults, if available in spaCy. This allows\n using lexical attribute getters that depend on static language data, e.g.\n Token.like_num, Token.is_stop, Doc.noun_chunks etc.\n\n lang (unicode): The language code.\n RETURNS (Language.Defaults): The language defaults.\n '
try:
lang_cls = get_lang_class(lang)
return lang_cls.Defaults
except ImportError:
return Language.Defaults
|
def __init__(self, snlp, meta=None, **kwargs):
'Initialize the Language class.\n\n Instead of "en" etc. we call the language "stanfordnlp_en" to not\n cause conflicts with spaCy\'s built-in languages. Using entry points,\n this also allows serializing and deserializing the language class\n and "lang": "stanfordnlp_en" in the meta.json will automatically\n instantiate this class if this package is available.\n\n snlp (stanfordnlp.Pipeline): The loaded StanfordNLP pipeline.\n kwargs: Optional config parameters.\n RETURNS (spacy.language.Language): The nlp object.\n '
lang = snlp.config['lang']
self.lang = ('stanfordnlp_' + lang)
self.Defaults = get_defaults(lang)
self.vocab = self.Defaults.create_vocab()
self.tokenizer = Tokenizer(snlp, self.vocab)
self.pipeline = []
self.max_length = kwargs.get('max_length', (10 ** 6))
self._meta = ({'lang': self.lang, 'stanfordnlp': snlp.config} if (meta is None) else dict(meta))
self._path = None
self._optimizer = None
| -5,133,790,172,121,754,000
|
Initialize the Language class.
Instead of "en" etc. we call the language "stanfordnlp_en" to not
cause conflicts with spaCy's built-in languages. Using entry points,
this also allows serializing and deserializing the language class
and "lang": "stanfordnlp_en" in the meta.json will automatically
instantiate this class if this package is available.
snlp (stanfordnlp.Pipeline): The loaded StanfordNLP pipeline.
kwargs: Optional config parameters.
RETURNS (spacy.language.Language): The nlp object.
|
spacy_stanfordnlp/language.py
|
__init__
|
mehmetilker/spacy-stanfordnlp
|
python
|
def __init__(self, snlp, meta=None, **kwargs):
'Initialize the Language class.\n\n Instead of "en" etc. we call the language "stanfordnlp_en" to not\n cause conflicts with spaCy\'s built-in languages. Using entry points,\n this also allows serializing and deserializing the language class\n and "lang": "stanfordnlp_en" in the meta.json will automatically\n instantiate this class if this package is available.\n\n snlp (stanfordnlp.Pipeline): The loaded StanfordNLP pipeline.\n kwargs: Optional config parameters.\n RETURNS (spacy.language.Language): The nlp object.\n '
lang = snlp.config['lang']
self.lang = ('stanfordnlp_' + lang)
self.Defaults = get_defaults(lang)
self.vocab = self.Defaults.create_vocab()
self.tokenizer = Tokenizer(snlp, self.vocab)
self.pipeline = []
self.max_length = kwargs.get('max_length', (10 ** 6))
self._meta = ({'lang': self.lang, 'stanfordnlp': snlp.config} if (meta is None) else dict(meta))
self._path = None
self._optimizer = None
|
def __init__(self, snlp, vocab):
'Initialize the tokenizer.\n\n snlp (stanfordnlp.Pipeline): The initialized StanfordNLP pipeline.\n vocab (spacy.vocab.Vocab): The vocabulary to use.\n RETURNS (Tokenizer): The custom tokenizer.\n '
self.snlp = snlp
self.vocab = vocab
| -2,122,144,844,259,570,200
|
Initialize the tokenizer.
snlp (stanfordnlp.Pipeline): The initialized StanfordNLP pipeline.
vocab (spacy.vocab.Vocab): The vocabulary to use.
RETURNS (Tokenizer): The custom tokenizer.
|
spacy_stanfordnlp/language.py
|
__init__
|
mehmetilker/spacy-stanfordnlp
|
python
|
def __init__(self, snlp, vocab):
'Initialize the tokenizer.\n\n snlp (stanfordnlp.Pipeline): The initialized StanfordNLP pipeline.\n vocab (spacy.vocab.Vocab): The vocabulary to use.\n RETURNS (Tokenizer): The custom tokenizer.\n '
self.snlp = snlp
self.vocab = vocab
|
def __call__(self, text):
'Convert a StanfordNLP Doc to a spaCy Doc.\n\n text (unicode): The text to process.\n RETURNS (spacy.tokens.Doc): The spaCy Doc object.\n '
snlp_doc = self.snlp(text)
text = snlp_doc.text
(tokens, heads) = self.get_tokens_with_heads(snlp_doc)
if (not len(tokens)):
raise ValueError('No tokens available.')
words = []
spaces = []
pos = []
tags = []
deps = []
lemmas = []
offset = 0
is_aligned = self.check_aligned(text, tokens)
for (i, token) in enumerate(tokens):
span = text[offset:]
if (not len(span)):
break
while (len(span) and span[0].isspace()):
offset += 1
span = text[offset:]
words.append(token.text)
pos.append(self.vocab.strings.add((token.upos or '')))
tags.append(self.vocab.strings.add((token.xpos or '')))
deps.append(self.vocab.strings.add((token.dependency_relation or '')))
lemmas.append(self.vocab.strings.add((token.lemma or '')))
offset += len(token.text)
span = text[offset:]
if (i == (len(tokens) - 1)):
spaces.append(False)
elif (not is_aligned):
spaces.append(True)
else:
next_token = tokens[(i + 1)]
spaces.append((not span.startswith(next_token.text)))
attrs = [POS, TAG, DEP, HEAD]
array = numpy.array(list(zip(pos, tags, deps, heads)), dtype='uint64')
doc = Doc(self.vocab, words=words, spaces=spaces).from_array(attrs, array)
lemma_array = numpy.array([[lemma] for lemma in lemmas], dtype='uint64')
doc.from_array([LEMMA], lemma_array)
if (any(pos) and any(tags)):
doc.is_tagged = True
if any(deps):
doc.is_parsed = True
return doc
| 621,916,058,141,886,700
|
Convert a StanfordNLP Doc to a spaCy Doc.
text (unicode): The text to process.
RETURNS (spacy.tokens.Doc): The spaCy Doc object.
|
spacy_stanfordnlp/language.py
|
__call__
|
mehmetilker/spacy-stanfordnlp
|
python
|
def __call__(self, text):
'Convert a StanfordNLP Doc to a spaCy Doc.\n\n text (unicode): The text to process.\n RETURNS (spacy.tokens.Doc): The spaCy Doc object.\n '
snlp_doc = self.snlp(text)
text = snlp_doc.text
(tokens, heads) = self.get_tokens_with_heads(snlp_doc)
if (not len(tokens)):
raise ValueError('No tokens available.')
words = []
spaces = []
pos = []
tags = []
deps = []
lemmas = []
offset = 0
is_aligned = self.check_aligned(text, tokens)
for (i, token) in enumerate(tokens):
span = text[offset:]
if (not len(span)):
break
while (len(span) and span[0].isspace()):
offset += 1
span = text[offset:]
words.append(token.text)
pos.append(self.vocab.strings.add((token.upos or )))
tags.append(self.vocab.strings.add((token.xpos or )))
deps.append(self.vocab.strings.add((token.dependency_relation or )))
lemmas.append(self.vocab.strings.add((token.lemma or )))
offset += len(token.text)
span = text[offset:]
if (i == (len(tokens) - 1)):
spaces.append(False)
elif (not is_aligned):
spaces.append(True)
else:
next_token = tokens[(i + 1)]
spaces.append((not span.startswith(next_token.text)))
attrs = [POS, TAG, DEP, HEAD]
array = numpy.array(list(zip(pos, tags, deps, heads)), dtype='uint64')
doc = Doc(self.vocab, words=words, spaces=spaces).from_array(attrs, array)
lemma_array = numpy.array([[lemma] for lemma in lemmas], dtype='uint64')
doc.from_array([LEMMA], lemma_array)
if (any(pos) and any(tags)):
doc.is_tagged = True
if any(deps):
doc.is_parsed = True
return doc
|
def get_tokens_with_heads(self, snlp_doc):
'Flatten the tokens in the StanfordNLP Doc and extract the token indices\n of the sentence start tokens to set is_sent_start.\n\n snlp_doc (stanfordnlp.Document): The processed StanfordNLP doc.\n RETURNS (list): The tokens (words).\n '
tokens = []
heads = []
offset = 0
for sentence in snlp_doc.sentences:
for token in sentence.tokens:
for word in token.words:
if word.governor:
head = (((word.governor + offset) - len(tokens)) - 1)
else:
head = 0
heads.append(head)
tokens.append(word)
offset += sum((len(token.words) for token in sentence.tokens))
return (tokens, heads)
| -4,882,335,766,437,383,000
|
Flatten the tokens in the StanfordNLP Doc and extract the token indices
of the sentence start tokens to set is_sent_start.
snlp_doc (stanfordnlp.Document): The processed StanfordNLP doc.
RETURNS (list): The tokens (words).
|
spacy_stanfordnlp/language.py
|
get_tokens_with_heads
|
mehmetilker/spacy-stanfordnlp
|
python
|
def get_tokens_with_heads(self, snlp_doc):
'Flatten the tokens in the StanfordNLP Doc and extract the token indices\n of the sentence start tokens to set is_sent_start.\n\n snlp_doc (stanfordnlp.Document): The processed StanfordNLP doc.\n RETURNS (list): The tokens (words).\n '
tokens = []
heads = []
offset = 0
for sentence in snlp_doc.sentences:
for token in sentence.tokens:
for word in token.words:
if word.governor:
head = (((word.governor + offset) - len(tokens)) - 1)
else:
head = 0
heads.append(head)
tokens.append(word)
offset += sum((len(token.words) for token in sentence.tokens))
return (tokens, heads)
|
def create_socket_pair(self):
'\n Creates a local socket listening on a random port.\n '
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(server.close)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(client.close)
return (server, client)
| -8,949,072,660,331,688,000
|
Creates a local socket listening on a random port.
|
tests/test_ws2_32/test_events.py
|
create_socket_pair
|
opalmer/pycffiwin32
|
python
|
def create_socket_pair(self):
'\n \n '
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(server.close)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(client.close)
return (server, client)
|
def check_config_status(self):
"Check this subframe's configuration status.\n\n\n By default, incorrectly configured subframes in the database are not returned from\n :any:`Frame.mux_subframes` because they cannot be used in the bus communication.\n You can change this behavior by setting :any:`Database.show_invalid_from_open` to `True`.\n When a subframe configuration status becomes invalid after the database is opened,\n the subframe still is returned from :any:`Frame.mux_subframes`\n even if :any:`Database.show_invalid_from_open` is `False`.\n\n Raises:\n :any:`XnetError`: The subframe is incorrectly configured.\n "
status_code = _props.get_subframe_config_status(self._handle)
_errors.check_for_error(status_code)
| -6,809,806,336,883,870,000
|
Check this subframe's configuration status.
By default, incorrectly configured subframes in the database are not returned from
:any:`Frame.mux_subframes` because they cannot be used in the bus communication.
You can change this behavior by setting :any:`Database.show_invalid_from_open` to `True`.
When a subframe configuration status becomes invalid after the database is opened,
the subframe still is returned from :any:`Frame.mux_subframes`
even if :any:`Database.show_invalid_from_open` is `False`.
Raises:
:any:`XnetError`: The subframe is incorrectly configured.
|
nixnet/database/_subframe.py
|
check_config_status
|
bigoulours/nixnet-python
|
python
|
def check_config_status(self):
"Check this subframe's configuration status.\n\n\n By default, incorrectly configured subframes in the database are not returned from\n :any:`Frame.mux_subframes` because they cannot be used in the bus communication.\n You can change this behavior by setting :any:`Database.show_invalid_from_open` to `True`.\n When a subframe configuration status becomes invalid after the database is opened,\n the subframe still is returned from :any:`Frame.mux_subframes`\n even if :any:`Database.show_invalid_from_open` is `False`.\n\n Raises:\n :any:`XnetError`: The subframe is incorrectly configured.\n "
status_code = _props.get_subframe_config_status(self._handle)
_errors.check_for_error(status_code)
|
def find(self, object_class, object_name):
'Finds an object in the database.\n\n This function finds a database object relative to this parent object.\n This object may be a grandparent or great-grandparent.\n\n If this object is a direct parent\n (for example, :any:`Frame<_frame.Frame>` for :any:`Signal<_signal.Signal>`),\n the ``object_name`` to search for can be short, and the search proceeds quickly.\n\n If this object is not a direct parent\n (for example, :any:`Database` for :any:`Signal<_signal.Signal>`),\n the ``object_name`` to search for must be qualified such\n that it is unique within the scope of this object.\n\n For example, if the class of this object is :any:`Cluster`,\n and ``object_class`` is :any:`Signal<_signal.Signal>`,\n you can specify ``object_name`` of ``mySignal``,\n assuming that signal name is unique to the cluster.\n If not, you must include the :any:`Frame<_frame.Frame>` name as a prefix,\n such as ``myFrameA.mySignal``.\n\n NI-XNET supports the following subclasses of ``DatabaseObject`` as arguments for ``object_class``:\n\n * :any:`nixnet.database.Cluster<Cluster>`\n * :any:`nixnet.database.Frame<_frame.Frame>`\n * :any:`nixnet.database.Pdu<Pdu>`\n * :any:`nixnet.database.Signal<_signal.Signal>`\n * :any:`nixnet.database.SubFrame<SubFrame>`\n * :any:`nixnet.database.Ecu<Ecu>`\n * :any:`nixnet.database.LinSched<LinSched>`\n * :any:`nixnet.database.LinSchedEntry<LinSchedEntry>`\n\n Args:\n object_class(``DatabaseObject``): The class of the object to find.\n object_name(str): The name of the object to find.\n Returns:\n An instance of the found object.\n Raises:\n ValueError: Unsupported value provided for argument ``object_class``.\n :any:`XnetError`: The object is not found.\n '
return _find_object.find_object(self._handle, object_class, object_name)
| 2,359,012,860,875,746,000
|
Finds an object in the database.
This function finds a database object relative to this parent object.
This object may be a grandparent or great-grandparent.
If this object is a direct parent
(for example, :any:`Frame<_frame.Frame>` for :any:`Signal<_signal.Signal>`),
the ``object_name`` to search for can be short, and the search proceeds quickly.
If this object is not a direct parent
(for example, :any:`Database` for :any:`Signal<_signal.Signal>`),
the ``object_name`` to search for must be qualified such
that it is unique within the scope of this object.
For example, if the class of this object is :any:`Cluster`,
and ``object_class`` is :any:`Signal<_signal.Signal>`,
you can specify ``object_name`` of ``mySignal``,
assuming that signal name is unique to the cluster.
If not, you must include the :any:`Frame<_frame.Frame>` name as a prefix,
such as ``myFrameA.mySignal``.
NI-XNET supports the following subclasses of ``DatabaseObject`` as arguments for ``object_class``:
* :any:`nixnet.database.Cluster<Cluster>`
* :any:`nixnet.database.Frame<_frame.Frame>`
* :any:`nixnet.database.Pdu<Pdu>`
* :any:`nixnet.database.Signal<_signal.Signal>`
* :any:`nixnet.database.SubFrame<SubFrame>`
* :any:`nixnet.database.Ecu<Ecu>`
* :any:`nixnet.database.LinSched<LinSched>`
* :any:`nixnet.database.LinSchedEntry<LinSchedEntry>`
Args:
object_class(``DatabaseObject``): The class of the object to find.
object_name(str): The name of the object to find.
Returns:
An instance of the found object.
Raises:
ValueError: Unsupported value provided for argument ``object_class``.
:any:`XnetError`: The object is not found.
|
nixnet/database/_subframe.py
|
find
|
bigoulours/nixnet-python
|
python
|
def find(self, object_class, object_name):
'Finds an object in the database.\n\n This function finds a database object relative to this parent object.\n This object may be a grandparent or great-grandparent.\n\n If this object is a direct parent\n (for example, :any:`Frame<_frame.Frame>` for :any:`Signal<_signal.Signal>`),\n the ``object_name`` to search for can be short, and the search proceeds quickly.\n\n If this object is not a direct parent\n (for example, :any:`Database` for :any:`Signal<_signal.Signal>`),\n the ``object_name`` to search for must be qualified such\n that it is unique within the scope of this object.\n\n For example, if the class of this object is :any:`Cluster`,\n and ``object_class`` is :any:`Signal<_signal.Signal>`,\n you can specify ``object_name`` of ``mySignal``,\n assuming that signal name is unique to the cluster.\n If not, you must include the :any:`Frame<_frame.Frame>` name as a prefix,\n such as ``myFrameA.mySignal``.\n\n NI-XNET supports the following subclasses of ``DatabaseObject`` as arguments for ``object_class``:\n\n * :any:`nixnet.database.Cluster<Cluster>`\n * :any:`nixnet.database.Frame<_frame.Frame>`\n * :any:`nixnet.database.Pdu<Pdu>`\n * :any:`nixnet.database.Signal<_signal.Signal>`\n * :any:`nixnet.database.SubFrame<SubFrame>`\n * :any:`nixnet.database.Ecu<Ecu>`\n * :any:`nixnet.database.LinSched<LinSched>`\n * :any:`nixnet.database.LinSchedEntry<LinSchedEntry>`\n\n Args:\n object_class(``DatabaseObject``): The class of the object to find.\n object_name(str): The name of the object to find.\n Returns:\n An instance of the found object.\n Raises:\n ValueError: Unsupported value provided for argument ``object_class``.\n :any:`XnetError`: The object is not found.\n '
return _find_object.find_object(self._handle, object_class, object_name)
|
@property
def dyn_signals(self):
':any:`DbCollection`: Returns a collection of dynamic :any:`Signal<_signal.Signal>` objects in the subframe.\n\n Those signals are transmitted when the multiplexer signal\n in the frame has the multiplexer value defined in the subframe.\n '
return self._dyn_signals
| -7,876,208,852,592,375,000
|
:any:`DbCollection`: Returns a collection of dynamic :any:`Signal<_signal.Signal>` objects in the subframe.
Those signals are transmitted when the multiplexer signal
in the frame has the multiplexer value defined in the subframe.
|
nixnet/database/_subframe.py
|
dyn_signals
|
bigoulours/nixnet-python
|
python
|
@property
def dyn_signals(self):
':any:`DbCollection`: Returns a collection of dynamic :any:`Signal<_signal.Signal>` objects in the subframe.\n\n Those signals are transmitted when the multiplexer signal\n in the frame has the multiplexer value defined in the subframe.\n '
return self._dyn_signals
|
@property
def frm(self):
':any:`Frame<_frame.Frame>`: Returns the reference to the parent frame.\n\n The parent frame is defined when the subframe is created,\n and you cannot change it afterwards.\n '
handle = _props.get_subframe_frm_ref(self._handle)
return _frame.Frame(_handle=handle)
| 747,096,797,756,296,700
|
:any:`Frame<_frame.Frame>`: Returns the reference to the parent frame.
The parent frame is defined when the subframe is created,
and you cannot change it afterwards.
|
nixnet/database/_subframe.py
|
frm
|
bigoulours/nixnet-python
|
python
|
@property
def frm(self):
':any:`Frame<_frame.Frame>`: Returns the reference to the parent frame.\n\n The parent frame is defined when the subframe is created,\n and you cannot change it afterwards.\n '
handle = _props.get_subframe_frm_ref(self._handle)
return _frame.Frame(_handle=handle)
|
@property
def mux_value(self):
'int: Get or set the multiplexer value for this subframe.\n\n This property specifies the multiplexer signal value used when the\n dynamic signals in this subframe are transmitted in the frame.\n Only one subframe is transmitted at a time in the frame.\n\n There also is a multiplexer value for a signal object as a read-only property.\n It reflects the value set on the parent subframe object.\n\n This property is required. If the property does not contain a valid value,\n and you create an XNET session that uses this subframe,\n the session returns an error.\n To ensure that the property contains a valid value,\n you can do one of the following:\n\n * Use a database file (or alias) to create the session.\n\n The file formats require a valid value in the text for this property.\n\n * Set a value at runtime using this property.\n\n This is needed when you create your own in-memory database (*:memory:*) rather than use a file.\n The property does not contain a default in this case,\n so you must set a valid value prior to creating a session.\n '
return _props.get_subframe_mux_value(self._handle)
| -2,052,745,770,387,338,800
|
int: Get or set the multiplexer value for this subframe.
This property specifies the multiplexer signal value used when the
dynamic signals in this subframe are transmitted in the frame.
Only one subframe is transmitted at a time in the frame.
There also is a multiplexer value for a signal object as a read-only property.
It reflects the value set on the parent subframe object.
This property is required. If the property does not contain a valid value,
and you create an XNET session that uses this subframe,
the session returns an error.
To ensure that the property contains a valid value,
you can do one of the following:
* Use a database file (or alias) to create the session.
The file formats require a valid value in the text for this property.
* Set a value at runtime using this property.
This is needed when you create your own in-memory database (*:memory:*) rather than use a file.
The property does not contain a default in this case,
so you must set a valid value prior to creating a session.
|
nixnet/database/_subframe.py
|
mux_value
|
bigoulours/nixnet-python
|
python
|
@property
def mux_value(self):
'int: Get or set the multiplexer value for this subframe.\n\n This property specifies the multiplexer signal value used when the\n dynamic signals in this subframe are transmitted in the frame.\n Only one subframe is transmitted at a time in the frame.\n\n There also is a multiplexer value for a signal object as a read-only property.\n It reflects the value set on the parent subframe object.\n\n This property is required. If the property does not contain a valid value,\n and you create an XNET session that uses this subframe,\n the session returns an error.\n To ensure that the property contains a valid value,\n you can do one of the following:\n\n * Use a database file (or alias) to create the session.\n\n The file formats require a valid value in the text for this property.\n\n * Set a value at runtime using this property.\n\n This is needed when you create your own in-memory database (*:memory:*) rather than use a file.\n The property does not contain a default in this case,\n so you must set a valid value prior to creating a session.\n '
return _props.get_subframe_mux_value(self._handle)
|
@property
def name(self):
'str: Get or set the name of the subframe object.\n\n Lowercase letters, uppercase letters, numbers,\n and the underscore (_) are valid characters for the short name.\n The space ( ), period (.), and other special characters are not supported within the name.\n The short name must begin with a letter (uppercase or lowercase) or underscore, and not a number.\n The short name is limited to 128 characters.\n\n A subframe name must be unique for all subframes in a frame.\n\n This short name does not include qualifiers to ensure that it is unique,\n such as the database, cluster, and frame name. It is for display purposes.\n '
return _props.get_subframe_name(self._handle)
| -6,174,104,265,428,616,000
|
str: Get or set the name of the subframe object.
Lowercase letters, uppercase letters, numbers,
and the underscore (_) are valid characters for the short name.
The space ( ), period (.), and other special characters are not supported within the name.
The short name must begin with a letter (uppercase or lowercase) or underscore, and not a number.
The short name is limited to 128 characters.
A subframe name must be unique for all subframes in a frame.
This short name does not include qualifiers to ensure that it is unique,
such as the database, cluster, and frame name. It is for display purposes.
|
nixnet/database/_subframe.py
|
name
|
bigoulours/nixnet-python
|
python
|
@property
def name(self):
'str: Get or set the name of the subframe object.\n\n Lowercase letters, uppercase letters, numbers,\n and the underscore (_) are valid characters for the short name.\n The space ( ), period (.), and other special characters are not supported within the name.\n The short name must begin with a letter (uppercase or lowercase) or underscore, and not a number.\n The short name is limited to 128 characters.\n\n A subframe name must be unique for all subframes in a frame.\n\n This short name does not include qualifiers to ensure that it is unique,\n such as the database, cluster, and frame name. It is for display purposes.\n '
return _props.get_subframe_name(self._handle)
|
@property
def pdu(self):
":any:`Pdu`: Returns the subframe's parent PDU.\n\n This property returns the reference to the subframe's parent PDU.\n The parent PDU is defined when the subframe object is created.\n You cannot change it afterwards.\n "
from nixnet.database import _pdu
handle = _props.get_subframe_pdu_ref(self._handle)
return _pdu.Pdu(_handle=handle)
| 4,860,405,005,379,393,000
|
:any:`Pdu`: Returns the subframe's parent PDU.
This property returns the reference to the subframe's parent PDU.
The parent PDU is defined when the subframe object is created.
You cannot change it afterwards.
|
nixnet/database/_subframe.py
|
pdu
|
bigoulours/nixnet-python
|
python
|
@property
def pdu(self):
":any:`Pdu`: Returns the subframe's parent PDU.\n\n This property returns the reference to the subframe's parent PDU.\n The parent PDU is defined when the subframe object is created.\n You cannot change it afterwards.\n "
from nixnet.database import _pdu
handle = _props.get_subframe_pdu_ref(self._handle)
return _pdu.Pdu(_handle=handle)
|
@property
def name_unique_to_cluster(self):
'str: Returns a subframe name unique to the cluster that contains the subframe.\n\n If the single name is not unique within the cluster, the name is <frame-name>.<subframe-name>.\n\n You can pass the name to the `find` function to retrieve the reference to the object,\n while the single name is not guaranteed success in `find`\n because it may be not unique in the cluster.\n '
return _props.get_subframe_name_unique_to_cluster(self._handle)
| 9,096,425,762,100,041,000
|
str: Returns a subframe name unique to the cluster that contains the subframe.
If the single name is not unique within the cluster, the name is <frame-name>.<subframe-name>.
You can pass the name to the `find` function to retrieve the reference to the object,
while the single name is not guaranteed success in `find`
because it may be not unique in the cluster.
|
nixnet/database/_subframe.py
|
name_unique_to_cluster
|
bigoulours/nixnet-python
|
python
|
@property
def name_unique_to_cluster(self):
'str: Returns a subframe name unique to the cluster that contains the subframe.\n\n If the single name is not unique within the cluster, the name is <frame-name>.<subframe-name>.\n\n You can pass the name to the `find` function to retrieve the reference to the object,\n while the single name is not guaranteed success in `find`\n because it may be not unique in the cluster.\n '
return _props.get_subframe_name_unique_to_cluster(self._handle)
|
def log_gaussian(x, mean, sigma):
'\n Computes the log-probability of X=x for a Gaussian of mean=mean and sigma=sigma\n Parameters\n ----------\n x\n mean\n sigma\n\n Returns\n -------\n\n '
log_pdf = ((- ((x - mean) ** 2)) / (2 * (sigma ** 2)))
log_pdf = (log_pdf - np.log((np.sqrt((2 * np.pi)) * sigma)))
return log_pdf
| -581,951,873,479,949,700
|
Computes the log-probability of X=x for a Gaussian of mean=mean and sigma=sigma
Parameters
----------
x
mean
sigma
Returns
-------
|
lstchain/image/pdf.py
|
log_gaussian
|
calispac/cta-lstchain
|
python
|
def log_gaussian(x, mean, sigma):
'\n Computes the log-probability of X=x for a Gaussian of mean=mean and sigma=sigma\n Parameters\n ----------\n x\n mean\n sigma\n\n Returns\n -------\n\n '
log_pdf = ((- ((x - mean) ** 2)) / (2 * (sigma ** 2)))
log_pdf = (log_pdf - np.log((np.sqrt((2 * np.pi)) * sigma)))
return log_pdf
|
def set_seed(seed: int):
'\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if\n installed).\n\n Args:\n seed (:obj:`int`): The seed to set.\n '
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if is_tf_available():
import tensorflow as tf
tf.random.set_seed(seed)
| 1,569,534,815,772,305,700
|
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
|
machine-learning/nlp/bert-text-classification/train.py
|
set_seed
|
AJuneSlop/pythoncode-tutorials
|
python
|
def set_seed(seed: int):
'\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if\n installed).\n\n Args:\n seed (:obj:`int`): The seed to set.\n '
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if is_tf_available():
import tensorflow as tf
tf.random.set_seed(seed)
|
def main():
'\n Unit tests\n '
max_depth = 4.0
numFrames = 10
height_ratio = 0.5
sub_sample = 1
reduce_to = 'middle_lower'
print('Program settings:')
print(('\tmax_depth: ' + str(max_depth)))
print(('\tnumFrames: ' + str(numFrames)))
print(('\theight_ratio: ' + str(height_ratio)))
print(('\tsub_sample: ' + str(sub_sample)))
print(('\treduce_to: ' + reduce_to))
cam = Camera(max_depth=max_depth)
cam.connect()
time.sleep(2.5)
t1 = time.time()
d = cam.getFrames(numFrames)
t2 = time.time()
printStmt = ('Time to get {0} frames: ' + str((t2 - t1)))
print(printStmt.format(numFrames))
d_small = cam.reduceFrame(d, height_ratio=height_ratio, sub_sample=sub_sample, reduce_to=reduce_to)
plt.figure(figsize=(6, 7))
ax2 = plt.subplot(2, 1, 2)
plt.imshow(d_small, cmap='gist_rainbow')
plt.colorbar()
plt.title('Scaled (height_ratio = {0}, sub_sample = {1})'.format(height_ratio, sub_sample))
plt.grid()
plt.subplot(2, 1, 1)
plt.imshow(d, cmap='gist_rainbow')
plt.colorbar()
plt.title('Original')
plt.grid()
plt.subplots_adjust(hspace=0.3)
plt.show()
cam.disconnect()
| -4,685,269,840,475,023,000
|
Unit tests
|
Camera/camera.py
|
main
|
marioliu/AutonomousQuadblade
|
python
|
def main():
'\n \n '
max_depth = 4.0
numFrames = 10
height_ratio = 0.5
sub_sample = 1
reduce_to = 'middle_lower'
print('Program settings:')
print(('\tmax_depth: ' + str(max_depth)))
print(('\tnumFrames: ' + str(numFrames)))
print(('\theight_ratio: ' + str(height_ratio)))
print(('\tsub_sample: ' + str(sub_sample)))
print(('\treduce_to: ' + reduce_to))
cam = Camera(max_depth=max_depth)
cam.connect()
time.sleep(2.5)
t1 = time.time()
d = cam.getFrames(numFrames)
t2 = time.time()
printStmt = ('Time to get {0} frames: ' + str((t2 - t1)))
print(printStmt.format(numFrames))
d_small = cam.reduceFrame(d, height_ratio=height_ratio, sub_sample=sub_sample, reduce_to=reduce_to)
plt.figure(figsize=(6, 7))
ax2 = plt.subplot(2, 1, 2)
plt.imshow(d_small, cmap='gist_rainbow')
plt.colorbar()
plt.title('Scaled (height_ratio = {0}, sub_sample = {1})'.format(height_ratio, sub_sample))
plt.grid()
plt.subplot(2, 1, 1)
plt.imshow(d, cmap='gist_rainbow')
plt.colorbar()
plt.title('Original')
plt.grid()
plt.subplots_adjust(hspace=0.3)
plt.show()
cam.disconnect()
|
def __init__(self, max_depth=4.0, save_images=False, t_buffer=5, output_dir='./Trials/'):
'\n Intitalizes Camera object \n '
self.max_depth = max_depth
self.save_images = save_images
self.clock = time.time()
self.t_buffer = t_buffer
self.output_dir = output_dir
self.data_dir = path.join(self.output_dir, '{}'.format(time.strftime('%d_%b_%Y_%H:%M', time.localtime())))
if self.save_images:
ensureDir(self.data_dir)
pass
np.warnings.filterwarnings('ignore')
| 559,276,931,801,889,150
|
Intitalizes Camera object
|
Camera/camera.py
|
__init__
|
marioliu/AutonomousQuadblade
|
python
|
def __init__(self, max_depth=4.0, save_images=False, t_buffer=5, output_dir='./Trials/'):
'\n \n '
self.max_depth = max_depth
self.save_images = save_images
self.clock = time.time()
self.t_buffer = t_buffer
self.output_dir = output_dir
self.data_dir = path.join(self.output_dir, '{}'.format(time.strftime('%d_%b_%Y_%H:%M', time.localtime())))
if self.save_images:
ensureDir(self.data_dir)
pass
np.warnings.filterwarnings('ignore')
|
def connect(self):
'\n Establishes connection to R200 camera\n '
logging.info('Cam.py: connecting components')
self.serv = pyrs.Service()
self.dev = self.serv.Device(device_id=0, streams=[pyrs.stream.DepthStream(fps=60), pyrs.stream.ColorStream(fps=60)])
| 8,347,761,966,569,549,000
|
Establishes connection to R200 camera
|
Camera/camera.py
|
connect
|
marioliu/AutonomousQuadblade
|
python
|
def connect(self):
'\n \n '
logging.info('Cam.py: connecting components')
self.serv = pyrs.Service()
self.dev = self.serv.Device(device_id=0, streams=[pyrs.stream.DepthStream(fps=60), pyrs.stream.ColorStream(fps=60)])
|
def disconnect(self):
'\n Disconnects from R200 camera\n '
self.dev.stop()
self.serv.stop()
logging.info('Cam.py: camera disconnected')
| 4,568,159,224,948,574,000
|
Disconnects from R200 camera
|
Camera/camera.py
|
disconnect
|
marioliu/AutonomousQuadblade
|
python
|
def disconnect(self):
'\n \n '
self.dev.stop()
self.serv.stop()
logging.info('Cam.py: camera disconnected')
|
def getFrames(self, frames=5, rgb=False):
'\n Retrieves depth frames (and RGB if true) from R200 input, cleans and averages depth images\n '
self.dev.wait_for_frames()
depth = (self.dev.depth * self.dev.depth_scale)
col = self.dev.color
if (self.save_images and ((time.time() - self.clock) > self.t_buffer)):
np.save(path.join(self.data_dir, (str(time.time()) + '_d')), depth)
np.save(path.join(self.data_dir, (str(time.time()) + '_c')), col)
self.clock = time.time()
for _ in range((frames - 1)):
self.dev.wait_for_frames()
curr = (self.dev.depth * self.dev.depth_scale)
depth = np.dstack((depth, curr))
if (frames != 1):
depth = np.nanmean(depth, 2)
depth[(depth <= 0)] = np.nan
depth[(depth > self.max_depth)] = np.nan
if rgb:
return (depth, col)
return depth
| -6,442,358,346,834,681,000
|
Retrieves depth frames (and RGB if true) from R200 input, cleans and averages depth images
|
Camera/camera.py
|
getFrames
|
marioliu/AutonomousQuadblade
|
python
|
def getFrames(self, frames=5, rgb=False):
'\n \n '
self.dev.wait_for_frames()
depth = (self.dev.depth * self.dev.depth_scale)
col = self.dev.color
if (self.save_images and ((time.time() - self.clock) > self.t_buffer)):
np.save(path.join(self.data_dir, (str(time.time()) + '_d')), depth)
np.save(path.join(self.data_dir, (str(time.time()) + '_c')), col)
self.clock = time.time()
for _ in range((frames - 1)):
self.dev.wait_for_frames()
curr = (self.dev.depth * self.dev.depth_scale)
depth = np.dstack((depth, curr))
if (frames != 1):
depth = np.nanmean(depth, 2)
depth[(depth <= 0)] = np.nan
depth[(depth > self.max_depth)] = np.nan
if rgb:
return (depth, col)
return depth
|
def reduceFrame(self, depth, height_ratio=0.5, sub_sample=0.3, reduce_to='lower'):
'\n Takes in a depth image and rescales it\n\n Args:\n height_ratio: Determines fraction of rows to keep\n sub_sample: Scaling factor for image\n '
if ((height_ratio > 1.0) or (height_ratio < 0.0) or (sub_sample > 1.0) or (sub_sample < 0.0)):
print('height_ratio and sub_sample must be between 0 and 1')
exit(1)
depth_copy = depth.copy()
height = depth_copy.shape[0]
h = int((height_ratio * height))
cols_to_cut = 0
if (height_ratio == 1):
d_short = depth_copy
elif (reduce_to == 'lower'):
d_short = depth_copy[(height - h):, cols_to_cut:(- (cols_to_cut + 1))]
elif (reduce_to == 'middle_lower'):
upper_brdr = int(((3 * (height / 4.0)) - (h / 2)))
lower_brdr = (upper_brdr + h)
d_short = depth_copy[upper_brdr:lower_brdr, cols_to_cut:(- (cols_to_cut + 1))]
elif (reduce_to == 'middle'):
upper_brdr = int(((height - h) / 2.0))
lower_brdr = (upper_brdr + h)
d_short = depth_copy[upper_brdr:lower_brdr, cols_to_cut:(- (cols_to_cut + 1))]
elif (reduce_to == 'middle_upper'):
upper_brdr = int(((height / 4.0) - (h / 2)))
lower_brdr = (upper_brdr + h)
d_short = depth_copy[upper_brdr:lower_brdr, cols_to_cut:(- (cols_to_cut + 1))]
elif (reduce_to == 'upper'):
d_short = depth_copy[:h, cols_to_cut:(- (cols_to_cut + 1))]
d_short[(d_short <= 0)] = np.nan
d_short[(d_short > self.max_depth)] = np.nan
rescaled = rescale(d_short, sub_sample, mode='reflect', multichannel=False, anti_aliasing=True)
return rescaled
| -3,983,497,567,927,168,500
|
Takes in a depth image and rescales it
Args:
height_ratio: Determines fraction of rows to keep
sub_sample: Scaling factor for image
|
Camera/camera.py
|
reduceFrame
|
marioliu/AutonomousQuadblade
|
python
|
def reduceFrame(self, depth, height_ratio=0.5, sub_sample=0.3, reduce_to='lower'):
'\n Takes in a depth image and rescales it\n\n Args:\n height_ratio: Determines fraction of rows to keep\n sub_sample: Scaling factor for image\n '
if ((height_ratio > 1.0) or (height_ratio < 0.0) or (sub_sample > 1.0) or (sub_sample < 0.0)):
print('height_ratio and sub_sample must be between 0 and 1')
exit(1)
depth_copy = depth.copy()
height = depth_copy.shape[0]
h = int((height_ratio * height))
cols_to_cut = 0
if (height_ratio == 1):
d_short = depth_copy
elif (reduce_to == 'lower'):
d_short = depth_copy[(height - h):, cols_to_cut:(- (cols_to_cut + 1))]
elif (reduce_to == 'middle_lower'):
upper_brdr = int(((3 * (height / 4.0)) - (h / 2)))
lower_brdr = (upper_brdr + h)
d_short = depth_copy[upper_brdr:lower_brdr, cols_to_cut:(- (cols_to_cut + 1))]
elif (reduce_to == 'middle'):
upper_brdr = int(((height - h) / 2.0))
lower_brdr = (upper_brdr + h)
d_short = depth_copy[upper_brdr:lower_brdr, cols_to_cut:(- (cols_to_cut + 1))]
elif (reduce_to == 'middle_upper'):
upper_brdr = int(((height / 4.0) - (h / 2)))
lower_brdr = (upper_brdr + h)
d_short = depth_copy[upper_brdr:lower_brdr, cols_to_cut:(- (cols_to_cut + 1))]
elif (reduce_to == 'upper'):
d_short = depth_copy[:h, cols_to_cut:(- (cols_to_cut + 1))]
d_short[(d_short <= 0)] = np.nan
d_short[(d_short > self.max_depth)] = np.nan
rescaled = rescale(d_short, sub_sample, mode='reflect', multichannel=False, anti_aliasing=True)
return rescaled
|
def parse_options():
'process command line options.'
parser = optparse.OptionParser('usage: %prog [options]')
parser.add_option('--verbose', action='store_true', help='List lock files found and deleted')
(options, args) = parser.parse_args()
return (options, args)
| 4,582,584,220,910,883,000
|
process command line options.
|
tools/clean_file_locks.py
|
parse_options
|
bopopescu/extra-specs-1
|
python
|
def parse_options():
parser = optparse.OptionParser('usage: %prog [options]')
parser.add_option('--verbose', action='store_true', help='List lock files found and deleted')
(options, args) = parser.parse_args()
return (options, args)
|
def main():
'Main loop.'
(options, args) = parse_options()
verbose = options.verbose
if verbose:
LOG.logger.setLevel(logging.DEBUG)
else:
LOG.logger.setLevel(logging.INFO)
LOG.info(('Cleaning stale locks from %s' % FLAGS.lock_path))
utils.cleanup_file_locks()
LOG.info('Finished')
| 9,010,034,981,460,363,000
|
Main loop.
|
tools/clean_file_locks.py
|
main
|
bopopescu/extra-specs-1
|
python
|
def main():
(options, args) = parse_options()
verbose = options.verbose
if verbose:
LOG.logger.setLevel(logging.DEBUG)
else:
LOG.logger.setLevel(logging.INFO)
LOG.info(('Cleaning stale locks from %s' % FLAGS.lock_path))
utils.cleanup_file_locks()
LOG.info('Finished')
|
def cli(self, interface='', output=None):
'parsing mechanism: cli\n\n Function cli() defines the cli type output parsing mechanism which\n typically contains 3 steps: exe\n cuting, transforming, returning\n '
parsed_dict = {}
if (output is None):
if interface:
cmd = self.cli_command[0].format(interface=interface)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
if out:
res = parsergen.oper_fill_tabular(device_output=out, device_os='iosxe', table_terminal_pattern='^\\n', header_fields=['Interface', 'IP-Address', 'OK\\?', 'Method', 'Status', 'Protocol'], label_fields=['Interface', 'ip_address', 'interface_is_ok', 'method', 'status', 'protocol'], index=[0])
if res.entries:
for (intf, intf_dict) in res.entries.items():
intf = Common.convert_intf_name(intf)
del intf_dict['Interface']
parsed_dict.setdefault('interface', {}).update({intf: intf_dict})
return parsed_dict
| -608,989,924,387,144,300
|
parsing mechanism: cli
Function cli() defines the cli type output parsing mechanism which
typically contains 3 steps: exe
cuting, transforming, returning
|
src/genie/libs/parser/iosxe/show_interface.py
|
cli
|
Tristou27/genieparser
|
python
|
def cli(self, interface=, output=None):
'parsing mechanism: cli\n\n Function cli() defines the cli type output parsing mechanism which\n typically contains 3 steps: exe\n cuting, transforming, returning\n '
parsed_dict = {}
if (output is None):
if interface:
cmd = self.cli_command[0].format(interface=interface)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
if out:
res = parsergen.oper_fill_tabular(device_output=out, device_os='iosxe', table_terminal_pattern='^\\n', header_fields=['Interface', 'IP-Address', 'OK\\?', 'Method', 'Status', 'Protocol'], label_fields=['Interface', 'ip_address', 'interface_is_ok', 'method', 'status', 'protocol'], index=[0])
if res.entries:
for (intf, intf_dict) in res.entries.items():
intf = Common.convert_intf_name(intf)
del intf_dict['Interface']
parsed_dict.setdefault('interface', {}).update({intf: intf_dict})
return parsed_dict
|
def yang(self):
' parsing mechanism: yang\n\n Function yang() defines the yang type output parsing mechanism which\n typically contains 3 steps: executing, transforming, returning\n '
pass
| 116,842,370,709,678,300
|
parsing mechanism: yang
Function yang() defines the yang type output parsing mechanism which
typically contains 3 steps: executing, transforming, returning
|
src/genie/libs/parser/iosxe/show_interface.py
|
yang
|
Tristou27/genieparser
|
python
|
def yang(self):
' parsing mechanism: yang\n\n Function yang() defines the yang type output parsing mechanism which\n typically contains 3 steps: executing, transforming, returning\n '
pass
|
def yang(self):
'parsing mechanism: yang\n\n Function yang() defines the yang type output parsing mechanism which\n typically contains 3 steps: executing, transforming, returning\n '
ret = {}
cmd = '<native><interface><Vlan/></interface></native>'
output = self.device.get(('subtree', cmd))
for data in output.data:
for native in data:
for interface in native:
vlan_id = None
interface_name = None
ip_address = None
ip_mask = None
for vlan in interface:
text = vlan.tag[(vlan.tag.find('}') + 1):]
if (text == 'name'):
vlan_id = vlan.text
interface_name = ('Vlan' + str(vlan_id))
continue
if (text == 'ip'):
for ip in vlan:
text = ip.tag[(ip.tag.find('}') + 1):]
if (text == 'address'):
for address in ip:
text = address.tag[(address.tag.find('}') + 1):]
if (text == 'primary'):
for primary in address:
text = primary.tag[(primary.tag.find('}') + 1):]
if (text == 'address'):
ip_address = primary.text
continue
if (text == 'mask'):
ip_mask = primary.text
continue
if ('interface' not in ret):
ret['interface'] = {}
if (interface_name is not None):
ret['interface'][interface_name] = {}
if (vlan_id is not None):
ret['interface'][interface_name]['vlan_id'] = {}
ret['interface'][interface_name]['vlan_id'][vlan_id] = {}
if (ip_address is not None):
ret['interface'][interface_name]['vlan_id'][vlan_id]['ip_address'] = ip_address
else:
ret['interface'][interface_name]['vlan_id'][vlan_id]['ip_address'] = 'unassigned'
return ret
| 4,722,897,063,672,879,000
|
parsing mechanism: yang
Function yang() defines the yang type output parsing mechanism which
typically contains 3 steps: executing, transforming, returning
|
src/genie/libs/parser/iosxe/show_interface.py
|
yang
|
Tristou27/genieparser
|
python
|
def yang(self):
'parsing mechanism: yang\n\n Function yang() defines the yang type output parsing mechanism which\n typically contains 3 steps: executing, transforming, returning\n '
ret = {}
cmd = '<native><interface><Vlan/></interface></native>'
output = self.device.get(('subtree', cmd))
for data in output.data:
for native in data:
for interface in native:
vlan_id = None
interface_name = None
ip_address = None
ip_mask = None
for vlan in interface:
text = vlan.tag[(vlan.tag.find('}') + 1):]
if (text == 'name'):
vlan_id = vlan.text
interface_name = ('Vlan' + str(vlan_id))
continue
if (text == 'ip'):
for ip in vlan:
text = ip.tag[(ip.tag.find('}') + 1):]
if (text == 'address'):
for address in ip:
text = address.tag[(address.tag.find('}') + 1):]
if (text == 'primary'):
for primary in address:
text = primary.tag[(primary.tag.find('}') + 1):]
if (text == 'address'):
ip_address = primary.text
continue
if (text == 'mask'):
ip_mask = primary.text
continue
if ('interface' not in ret):
ret['interface'] = {}
if (interface_name is not None):
ret['interface'][interface_name] = {}
if (vlan_id is not None):
ret['interface'][interface_name]['vlan_id'] = {}
ret['interface'][interface_name]['vlan_id'][vlan_id] = {}
if (ip_address is not None):
ret['interface'][interface_name]['vlan_id'][vlan_id]['ip_address'] = ip_address
else:
ret['interface'][interface_name]['vlan_id'][vlan_id]['ip_address'] = 'unassigned'
return ret
|
def _gather(params, indices, validate_indices=None, axis=None, batch_dims=0, name=None):
'gather.'
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if (validate_indices is not None):
raise NotImplementedError('Argument `validate_indices != None` is currently unimplemented.')
if (batch_dims < 0):
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if (axis is None):
axis = batch_dims
if (axis < 0):
axis = (axis + len(params.shape))
if JAX_MODE:
take = (lambda params, indices: np.take(params, indices, axis=(axis - batch_dims)))
take = functools.reduce((lambda g, f: f(g)), ([jax.vmap] * int(batch_dims)), take)
return take(params, indices)
params = ops.convert_to_tensor(params)
res = np.array([np.take(params[i], indices[i], axis=(axis - batch_dims)) for i in np.ndindex(*params.shape[:batch_dims])])
return np.reshape(res, ((params.shape[:axis] + indices.shape[batch_dims:]) + params.shape[(axis + 1):]))
| 1,254,113,188,679,910,000
|
gather.
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
_gather
|
michalbrys/probability
|
python
|
def _gather(params, indices, validate_indices=None, axis=None, batch_dims=0, name=None):
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if (validate_indices is not None):
raise NotImplementedError('Argument `validate_indices != None` is currently unimplemented.')
if (batch_dims < 0):
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if (axis is None):
axis = batch_dims
if (axis < 0):
axis = (axis + len(params.shape))
if JAX_MODE:
take = (lambda params, indices: np.take(params, indices, axis=(axis - batch_dims)))
take = functools.reduce((lambda g, f: f(g)), ([jax.vmap] * int(batch_dims)), take)
return take(params, indices)
params = ops.convert_to_tensor(params)
res = np.array([np.take(params[i], indices[i], axis=(axis - batch_dims)) for i in np.ndindex(*params.shape[:batch_dims])])
return np.reshape(res, ((params.shape[:axis] + indices.shape[batch_dims:]) + params.shape[(axis + 1):]))
|
def _args_to_matching_arrays(args_list, dtype_hint=None):
'Converts a list to array using the first element for dtype.\n\n This method is used to match the behavior of `tf.concat`.\n\n Args:\n args_list: A list or tuple of arguments.\n dtype_hint: An optional hint used when converting the args to tensors.\n Returns:\n A list of tensors.\n '
dtype = None
for arg in args_list:
if ops.is_tensor(arg):
dtype = arg.dtype
break
if (dtype is None):
ret = []
for arg in args_list:
ret.append(ops.convert_to_tensor(arg, dtype, dtype_hint=dtype_hint))
if (dtype is None):
dtype = ret[(- 1)].dtype
else:
ret = [ops.convert_to_tensor(arg, dtype) for arg in args_list]
return ret
| 5,353,915,506,816,408,000
|
Converts a list to array using the first element for dtype.
This method is used to match the behavior of `tf.concat`.
Args:
args_list: A list or tuple of arguments.
dtype_hint: An optional hint used when converting the args to tensors.
Returns:
A list of tensors.
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
_args_to_matching_arrays
|
michalbrys/probability
|
python
|
def _args_to_matching_arrays(args_list, dtype_hint=None):
'Converts a list to array using the first element for dtype.\n\n This method is used to match the behavior of `tf.concat`.\n\n Args:\n args_list: A list or tuple of arguments.\n dtype_hint: An optional hint used when converting the args to tensors.\n Returns:\n A list of tensors.\n '
dtype = None
for arg in args_list:
if ops.is_tensor(arg):
dtype = arg.dtype
break
if (dtype is None):
ret = []
for arg in args_list:
ret.append(ops.convert_to_tensor(arg, dtype, dtype_hint=dtype_hint))
if (dtype is None):
dtype = ret[(- 1)].dtype
else:
ret = [ops.convert_to_tensor(arg, dtype) for arg in args_list]
return ret
|
def _gather_nd(params, indices, batch_dims=0, name=None):
'gather_nd.'
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if (batch_dims < 0):
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if ((not JAX_MODE) and (batch_dims > 0)):
raise NotImplementedError('`batch_dims > 0` currently unsupported in NumPy backend.')
gather_nd_ = _gather_nd_single
if JAX_MODE:
gather_nd_ = functools.reduce((lambda g, f: f(g)), ([jax.vmap] * int(batch_dims)), gather_nd_)
return gather_nd_(params, indices)
| -6,853,863,073,649,036,000
|
gather_nd.
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
_gather_nd
|
michalbrys/probability
|
python
|
def _gather_nd(params, indices, batch_dims=0, name=None):
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if (batch_dims < 0):
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if ((not JAX_MODE) and (batch_dims > 0)):
raise NotImplementedError('`batch_dims > 0` currently unsupported in NumPy backend.')
gather_nd_ = _gather_nd_single
if JAX_MODE:
gather_nd_ = functools.reduce((lambda g, f: f(g)), ([jax.vmap] * int(batch_dims)), gather_nd_)
return gather_nd_(params, indices)
|
def _linspace(start, stop, num, name=None, axis=0):
'Match TF behavior with np.linspace.'
start = ops.convert_to_tensor(start)
if np.issubdtype(start.dtype, np.integer):
start = start.astype(np.float64)
stop = ops.convert_to_tensor(stop, dtype=start.dtype)
num = ops.convert_to_tensor(num, dtype_hint=np.int32)
if (not np.issubdtype(num.dtype, np.integer)):
raise TypeError('`num` must be an integer but got {}'.format(num.dtype))
num = num.astype(np.int32)
return np.linspace(start, stop, num, axis=axis).astype(start.dtype)
| -7,066,821,717,063,519,000
|
Match TF behavior with np.linspace.
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
_linspace
|
michalbrys/probability
|
python
|
def _linspace(start, stop, num, name=None, axis=0):
start = ops.convert_to_tensor(start)
if np.issubdtype(start.dtype, np.integer):
start = start.astype(np.float64)
stop = ops.convert_to_tensor(stop, dtype=start.dtype)
num = ops.convert_to_tensor(num, dtype_hint=np.int32)
if (not np.issubdtype(num.dtype, np.integer)):
raise TypeError('`num` must be an integer but got {}'.format(num.dtype))
num = num.astype(np.int32)
return np.linspace(start, stop, num, axis=axis).astype(start.dtype)
|
def _one_hot(indices, depth, on_value=None, off_value=None, axis=None, dtype=None, name=None):
'One hot.'
if (on_value is None):
on_value = 1
if (off_value is None):
off_value = 0
if (dtype is None):
dtype = utils.common_dtype([on_value, off_value], np.float32)
indices = np.array(indices)
depth = np.array(depth)
pred = (abs((np.arange(depth, dtype=indices.dtype) - indices[(..., np.newaxis)])) > 0)
y_out = np.where(pred, np.array(off_value, dtype), np.array(on_value, dtype))
if (axis is not None):
y_out = np.moveaxis(y_out, (- 1), axis)
return y_out
| 1,411,797,174,937,303,300
|
One hot.
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
_one_hot
|
michalbrys/probability
|
python
|
def _one_hot(indices, depth, on_value=None, off_value=None, axis=None, dtype=None, name=None):
if (on_value is None):
on_value = 1
if (off_value is None):
off_value = 0
if (dtype is None):
dtype = utils.common_dtype([on_value, off_value], np.float32)
indices = np.array(indices)
depth = np.array(depth)
pred = (abs((np.arange(depth, dtype=indices.dtype) - indices[(..., np.newaxis)])) > 0)
y_out = np.where(pred, np.array(off_value, dtype), np.array(on_value, dtype))
if (axis is not None):
y_out = np.moveaxis(y_out, (- 1), axis)
return y_out
|
def _range(start, limit=None, delta=1, dtype=None, name='range'):
'Emulates tf.range.'
dtype = utils.numpy_dtype(dtype)
start = ops.convert_to_tensor(start, dtype=dtype)
limit = (None if (limit is None) else ops.convert_to_tensor(limit, dtype=dtype))
delta = ops.convert_to_tensor(delta, dtype=dtype)
if (dtype is None):
dtype_hierarchy = [np.int32, np.int64, np.float32, np.float64]
inferred_dtype = max([arg.dtype for arg in [start, limit, delta] if (arg is not None)], key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
return np.arange(start, limit, delta).astype(inferred_dtype)
| 5,974,374,142,208,092,000
|
Emulates tf.range.
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
_range
|
michalbrys/probability
|
python
|
def _range(start, limit=None, delta=1, dtype=None, name='range'):
dtype = utils.numpy_dtype(dtype)
start = ops.convert_to_tensor(start, dtype=dtype)
limit = (None if (limit is None) else ops.convert_to_tensor(limit, dtype=dtype))
delta = ops.convert_to_tensor(delta, dtype=dtype)
if (dtype is None):
dtype_hierarchy = [np.int32, np.int64, np.float32, np.float64]
inferred_dtype = max([arg.dtype for arg in [start, limit, delta] if (arg is not None)], key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
return np.arange(start, limit, delta).astype(inferred_dtype)
|
def _searchsorted(sorted_sequence, values, side='left', out_type=np.int32, name=None):
'Find indices for insertion for list to remain sorted.'
if JAX_MODE:
try:
func = _searchsorted_vmap_sides[side]
except KeyError:
raise ValueError(("'%s' is an invalid value for keyword 'side'" % side))
sorted_sequence_2d = np.reshape(sorted_sequence, ((- 1), sorted_sequence.shape[(- 1)]))
values_2d = np.reshape(values, ((- 1), values.shape[(- 1)]))
if (sorted_sequence_2d.shape[0] != values_2d.shape[0]):
raise ValueError('Leading dim_size of both tensors must match.')
return np.reshape(func(sorted_sequence_2d, values_2d).astype(out_type), values.shape)
sorted_sequence = sorted_sequence[..., np.newaxis, :]
values = values[..., :, np.newaxis]
if (side == 'left'):
is_in_right_location = (sorted_sequence < values)
elif (side == 'right'):
is_in_right_location = (sorted_sequence <= values)
return np.sum(is_in_right_location, axis=(- 1)).astype(out_type)
| -3,334,490,459,446,652,000
|
Find indices for insertion for list to remain sorted.
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
_searchsorted
|
michalbrys/probability
|
python
|
def _searchsorted(sorted_sequence, values, side='left', out_type=np.int32, name=None):
if JAX_MODE:
try:
func = _searchsorted_vmap_sides[side]
except KeyError:
raise ValueError(("'%s' is an invalid value for keyword 'side'" % side))
sorted_sequence_2d = np.reshape(sorted_sequence, ((- 1), sorted_sequence.shape[(- 1)]))
values_2d = np.reshape(values, ((- 1), values.shape[(- 1)]))
if (sorted_sequence_2d.shape[0] != values_2d.shape[0]):
raise ValueError('Leading dim_size of both tensors must match.')
return np.reshape(func(sorted_sequence_2d, values_2d).astype(out_type), values.shape)
sorted_sequence = sorted_sequence[..., np.newaxis, :]
values = values[..., :, np.newaxis]
if (side == 'left'):
is_in_right_location = (sorted_sequence < values)
elif (side == 'right'):
is_in_right_location = (sorted_sequence <= values)
return np.sum(is_in_right_location, axis=(- 1)).astype(out_type)
|
def _split(value, num_or_size_splits, axis=0, num=None, name='split'):
'Map tf.split -> np.split.'
indices_or_sections = np.array(num_or_size_splits)
if (indices_or_sections.ndim == 1):
if any(((idx == (- 1)) for idx in indices_or_sections)):
total_splits = sum((idx for idx in indices_or_sections if (idx != (- 1))))
remainder = int(max(0, (np.array(value).shape[axis] - total_splits)))
indices_or_sections = [(idx if (idx != (- 1)) else remainder) for idx in indices_or_sections]
indices_or_sections = np.cumsum(np.array(indices_or_sections))[:(- 1)]
return np.split(value, indices_or_sections, axis)
| 6,905,526,348,121,598,000
|
Map tf.split -> np.split.
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
_split
|
michalbrys/probability
|
python
|
def _split(value, num_or_size_splits, axis=0, num=None, name='split'):
indices_or_sections = np.array(num_or_size_splits)
if (indices_or_sections.ndim == 1):
if any(((idx == (- 1)) for idx in indices_or_sections)):
total_splits = sum((idx for idx in indices_or_sections if (idx != (- 1))))
remainder = int(max(0, (np.array(value).shape[axis] - total_splits)))
indices_or_sections = [(idx if (idx != (- 1)) else remainder) for idx in indices_or_sections]
indices_or_sections = np.cumsum(np.array(indices_or_sections))[:(- 1)]
return np.split(value, indices_or_sections, axis)
|
@staticmethod
def run(path, code=None, params=None, **meta):
'Check code with pycodestyle.\n\n :return list: List of errors.\n '
parser = get_parser()
for option in parser.option_list:
if (option.dest and (option.dest in params)):
value = params[option.dest]
if isinstance(value, str):
params[option.dest] = option.convert_value(option, value)
for key in ['filename', 'exclude', 'select', 'ignore']:
if ((key in params) and isinstance(params[key], str)):
params[key] = _parse_multi_options(params[key])
P8Style = StyleGuide(reporter=_PycodestyleReport, **params)
buf = StringIO(code)
return P8Style.input_file(path, lines=buf.readlines())
| 7,664,685,662,998,880,000
|
Check code with pycodestyle.
:return list: List of errors.
|
vimfiles/bundle/vim-python/submodules/pylama/pylama/lint/pylama_pycodestyle.py
|
run
|
BHills15/vimrc
|
python
|
@staticmethod
def run(path, code=None, params=None, **meta):
'Check code with pycodestyle.\n\n :return list: List of errors.\n '
parser = get_parser()
for option in parser.option_list:
if (option.dest and (option.dest in params)):
value = params[option.dest]
if isinstance(value, str):
params[option.dest] = option.convert_value(option, value)
for key in ['filename', 'exclude', 'select', 'ignore']:
if ((key in params) and isinstance(params[key], str)):
params[key] = _parse_multi_options(params[key])
P8Style = StyleGuide(reporter=_PycodestyleReport, **params)
buf = StringIO(code)
return P8Style.input_file(path, lines=buf.readlines())
|
def init_file(self, filename, lines, expected, line_offset):
'Prepare storage for errors.'
super(_PycodestyleReport, self).init_file(filename, lines, expected, line_offset)
self.errors = []
| -8,009,620,792,537,842,000
|
Prepare storage for errors.
|
vimfiles/bundle/vim-python/submodules/pylama/pylama/lint/pylama_pycodestyle.py
|
init_file
|
BHills15/vimrc
|
python
|
def init_file(self, filename, lines, expected, line_offset):
super(_PycodestyleReport, self).init_file(filename, lines, expected, line_offset)
self.errors = []
|
def error(self, line_number, offset, text, check):
'Save errors.'
code = super(_PycodestyleReport, self).error(line_number, offset, text, check)
if code:
self.errors.append(dict(text=text, type=code.replace('E', 'C'), col=(offset + 1), lnum=line_number))
| -7,287,559,401,521,401,000
|
Save errors.
|
vimfiles/bundle/vim-python/submodules/pylama/pylama/lint/pylama_pycodestyle.py
|
error
|
BHills15/vimrc
|
python
|
def error(self, line_number, offset, text, check):
code = super(_PycodestyleReport, self).error(line_number, offset, text, check)
if code:
self.errors.append(dict(text=text, type=code.replace('E', 'C'), col=(offset + 1), lnum=line_number))
|
def get_file_results(self):
'Get errors.\n\n :return list: List of errors.\n\n '
return self.errors
| 6,514,165,612,194,767,000
|
Get errors.
:return list: List of errors.
|
vimfiles/bundle/vim-python/submodules/pylama/pylama/lint/pylama_pycodestyle.py
|
get_file_results
|
BHills15/vimrc
|
python
|
def get_file_results(self):
'Get errors.\n\n :return list: List of errors.\n\n '
return self.errors
|
def load_conf_from_file(conf_file_path, conf=__conf__):
'\n Load conf file from: conf_file_path\n '
if (os.path.isfile(conf_file_path) == False):
raise AgentConfigError('Missing configuration in {0}'.format(conf_file_path))
try:
content = fileutil.read_file(conf_file_path)
conf.load(content)
except IOError as err:
raise AgentConfigError('Failed to load conf file:{0}, {1}'.format(conf_file_path, err))
| -1,907,993,394,519,222,800
|
Load conf file from: conf_file_path
|
azurelinuxagent/common/conf.py
|
load_conf_from_file
|
vittyvk/WALinuxAgent
|
python
|
def load_conf_from_file(conf_file_path, conf=__conf__):
'\n \n '
if (os.path.isfile(conf_file_path) == False):
raise AgentConfigError('Missing configuration in {0}'.format(conf_file_path))
try:
content = fileutil.read_file(conf_file_path)
conf.load(content)
except IOError as err:
raise AgentConfigError('Failed to load conf file:{0}, {1}'.format(conf_file_path, err))
|
@tf_export('block_lstm')
def block_lstm(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
"Computes the LSTM cell forward propagation for all the time steps.\n\n This is equivalent to applying LSTMBlockCell in a loop, like so:\n\n ```python\n for x1 in unpack(x):\n i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(\n x1, cs_prev, h_prev, w, wci, wcf, wco, b)\n cs_prev = cs1\n h_prev = h1\n i.append(i1)\n cs.append(cs1)\n f.append(f1)\n o.append(o1)\n ci.append(ci1)\n co.append(co1)\n h.append(h1)\n return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)\n ```\n\n Args:\n seq_len_max: A `Tensor` of type `int64`.\n Maximum time length actually used by this input. Outputs are padded\n with zeros beyond this length.\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\n The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n Value of the initial cell state.\n h_prev: A `Tensor`. Must have the same type as `x`.\n Initial output of cell (to be used for peephole).\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.\n cell_clip: An optional `float`. Defaults to `3`.\n Value to clip the 'cs' value to.\n use_peephole: An optional `bool`. Defaults to `False`.\n Whether to use peephole weights.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\n\n i: A `Tensor`. Has the same type as `x`. The input gate over the whole time sequence.\n cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh over the whole time sequence.\n f: A `Tensor`. Has the same type as `x`. The forget gate over the whole time sequence.\n o: A `Tensor`. Has the same type as `x`. The output gate over the whole time sequence.\n ci: A `Tensor`. Has the same type as `x`. The cell input over the whole time sequence.\n co: A `Tensor`. Has the same type as `x`. The cell after the tanh over the whole time sequence.\n h: A `Tensor`. Has the same type as `x`. The output h vector over the whole time sequence.\n "
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (forget_bias is None):
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, 'forget_bias')
if (cell_clip is None):
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, 'cell_clip')
if (use_peephole is None):
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_, _, _op) = _op_def_lib._apply_op_helper('BlockLSTM', seq_len_max=seq_len_max, x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('forget_bias', _op.get_attr('forget_bias'), 'cell_clip', _op.get_attr('cell_clip'), 'use_peephole', _op.get_attr('use_peephole'), 'T', _op.get_attr('T'))
_execute.record_gradient('BlockLSTM', _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BlockLSTM', name, _ctx._post_execution_callbacks, seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, 'forget_bias', forget_bias, 'cell_clip', cell_clip, 'use_peephole', use_peephole)
_result = _BlockLSTMOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| -259,147,601,963,394,300
|
Computes the LSTM cell forward propagation for all the time steps.
This is equivalent to applying LSTMBlockCell in a loop, like so:
```python
for x1 in unpack(x):
i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
x1, cs_prev, h_prev, w, wci, wcf, wco, b)
cs_prev = cs1
h_prev = h1
i.append(i1)
cs.append(cs1)
f.append(f1)
o.append(o1)
ci.append(ci1)
co.append(co1)
h.append(h1)
return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
```
Args:
seq_len_max: A `Tensor` of type `int64`.
Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the initial cell state.
h_prev: A `Tensor`. Must have the same type as `x`.
Initial output of cell (to be used for peephole).
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate over the whole time sequence.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh over the whole time sequence.
f: A `Tensor`. Has the same type as `x`. The forget gate over the whole time sequence.
o: A `Tensor`. Has the same type as `x`. The output gate over the whole time sequence.
ci: A `Tensor`. Has the same type as `x`. The cell input over the whole time sequence.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh over the whole time sequence.
h: A `Tensor`. Has the same type as `x`. The output h vector over the whole time sequence.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
block_lstm
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
@tf_export('block_lstm')
def block_lstm(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
"Computes the LSTM cell forward propagation for all the time steps.\n\n This is equivalent to applying LSTMBlockCell in a loop, like so:\n\n ```python\n for x1 in unpack(x):\n i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(\n x1, cs_prev, h_prev, w, wci, wcf, wco, b)\n cs_prev = cs1\n h_prev = h1\n i.append(i1)\n cs.append(cs1)\n f.append(f1)\n o.append(o1)\n ci.append(ci1)\n co.append(co1)\n h.append(h1)\n return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)\n ```\n\n Args:\n seq_len_max: A `Tensor` of type `int64`.\n Maximum time length actually used by this input. Outputs are padded\n with zeros beyond this length.\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\n The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n Value of the initial cell state.\n h_prev: A `Tensor`. Must have the same type as `x`.\n Initial output of cell (to be used for peephole).\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.\n cell_clip: An optional `float`. Defaults to `3`.\n Value to clip the 'cs' value to.\n use_peephole: An optional `bool`. Defaults to `False`.\n Whether to use peephole weights.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\n\n i: A `Tensor`. Has the same type as `x`. The input gate over the whole time sequence.\n cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh over the whole time sequence.\n f: A `Tensor`. Has the same type as `x`. The forget gate over the whole time sequence.\n o: A `Tensor`. Has the same type as `x`. The output gate over the whole time sequence.\n ci: A `Tensor`. Has the same type as `x`. The cell input over the whole time sequence.\n co: A `Tensor`. Has the same type as `x`. The cell after the tanh over the whole time sequence.\n h: A `Tensor`. Has the same type as `x`. The output h vector over the whole time sequence.\n "
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (forget_bias is None):
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, 'forget_bias')
if (cell_clip is None):
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, 'cell_clip')
if (use_peephole is None):
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_, _, _op) = _op_def_lib._apply_op_helper('BlockLSTM', seq_len_max=seq_len_max, x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('forget_bias', _op.get_attr('forget_bias'), 'cell_clip', _op.get_attr('cell_clip'), 'use_peephole', _op.get_attr('use_peephole'), 'T', _op.get_attr('T'))
_execute.record_gradient('BlockLSTM', _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BlockLSTM', name, _ctx._post_execution_callbacks, seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, 'forget_bias', forget_bias, 'cell_clip', cell_clip, 'use_peephole', use_peephole)
_result = _BlockLSTMOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function block_lstm\n '
_ctx = (ctx if ctx else _context.context())
if (forget_bias is None):
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, 'forget_bias')
if (cell_clip is None):
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, 'cell_clip')
if (use_peephole is None):
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_attr_T, _inputs_T) = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ('forget_bias', forget_bias, 'cell_clip', cell_clip, 'use_peephole', use_peephole, 'T', _attr_T)
_result = _execute.execute(b'BlockLSTM', 7, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BlockLSTM', _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
| -8,994,183,612,343,482,000
|
This is the slowpath function for Eager mode.
This is for function block_lstm
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
block_lstm_eager_fallback
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function block_lstm\n '
_ctx = (ctx if ctx else _context.context())
if (forget_bias is None):
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, 'forget_bias')
if (cell_clip is None):
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, 'cell_clip')
if (use_peephole is None):
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_attr_T, _inputs_T) = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ('forget_bias', forget_bias, 'cell_clip', cell_clip, 'use_peephole', use_peephole, 'T', _attr_T)
_result = _execute.execute(b'BlockLSTM', 7, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BlockLSTM', _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
|
@tf_export('block_lstm_grad')
def block_lstm_grad(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None):
'Computes the LSTM cell backward propagation for the entire time sequence.\n\n This implementation is to be used in conjunction of LSTMBlock.\n\n Args:\n seq_len_max: A `Tensor` of type `int64`.\n Maximum time length actually used by this input. Outputs are padded\n with zeros beyond this length.\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\n The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n Value of the initial cell state.\n h_prev: A `Tensor`. Must have the same type as `x`.\n Initial output of cell (to be used for peephole).\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n i: A `Tensor`. Must have the same type as `x`.\n The input gate over the whole time sequence.\n cs: A `Tensor`. Must have the same type as `x`.\n The cell state before the tanh over the whole time sequence.\n f: A `Tensor`. Must have the same type as `x`.\n The forget gate over the whole time sequence.\n o: A `Tensor`. Must have the same type as `x`.\n The output gate over the whole time sequence.\n ci: A `Tensor`. Must have the same type as `x`.\n The cell input over the whole time sequence.\n co: A `Tensor`. Must have the same type as `x`.\n The cell after the tanh over the whole time sequence.\n h: A `Tensor`. Must have the same type as `x`.\n The output h vector over the whole time sequence.\n cs_grad: A `Tensor`. Must have the same type as `x`.\n The current gradient of cs.\n h_grad: A `Tensor`. Must have the same type as `x`.\n The gradient of h vector.\n use_peephole: A `bool`. Whether to use peephole weights.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad).\n\n x_grad: A `Tensor`. Has the same type as `x`. The gradient of x to be back-propped.\n cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs_prev to be back-propped.\n h_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of h_prev to be back-propped.\n w_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.\n wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.\n wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.\n wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.\n b_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_, _, _op) = _op_def_lib._apply_op_helper('BlockLSTMGrad', seq_len_max=seq_len_max, x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co, h=h, cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('use_peephole', _op.get_attr('use_peephole'), 'T', _op.get_attr('T'))
_execute.record_gradient('BlockLSTMGrad', _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BlockLSTMGrad', name, _ctx._post_execution_callbacks, seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, 'use_peephole', use_peephole)
_result = _BlockLSTMGradOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_grad_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| -8,757,221,502,674,187,000
|
Computes the LSTM cell backward propagation for the entire time sequence.
This implementation is to be used in conjunction of LSTMBlock.
Args:
seq_len_max: A `Tensor` of type `int64`.
Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the initial cell state.
h_prev: A `Tensor`. Must have the same type as `x`.
Initial output of cell (to be used for peephole).
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
i: A `Tensor`. Must have the same type as `x`.
The input gate over the whole time sequence.
cs: A `Tensor`. Must have the same type as `x`.
The cell state before the tanh over the whole time sequence.
f: A `Tensor`. Must have the same type as `x`.
The forget gate over the whole time sequence.
o: A `Tensor`. Must have the same type as `x`.
The output gate over the whole time sequence.
ci: A `Tensor`. Must have the same type as `x`.
The cell input over the whole time sequence.
co: A `Tensor`. Must have the same type as `x`.
The cell after the tanh over the whole time sequence.
h: A `Tensor`. Must have the same type as `x`.
The output h vector over the whole time sequence.
cs_grad: A `Tensor`. Must have the same type as `x`.
The current gradient of cs.
h_grad: A `Tensor`. Must have the same type as `x`.
The gradient of h vector.
use_peephole: A `bool`. Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad).
x_grad: A `Tensor`. Has the same type as `x`. The gradient of x to be back-propped.
cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs_prev to be back-propped.
h_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of h_prev to be back-propped.
w_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.
wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.
wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.
wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.
b_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
block_lstm_grad
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
@tf_export('block_lstm_grad')
def block_lstm_grad(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None):
'Computes the LSTM cell backward propagation for the entire time sequence.\n\n This implementation is to be used in conjunction of LSTMBlock.\n\n Args:\n seq_len_max: A `Tensor` of type `int64`.\n Maximum time length actually used by this input. Outputs are padded\n with zeros beyond this length.\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\n The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n Value of the initial cell state.\n h_prev: A `Tensor`. Must have the same type as `x`.\n Initial output of cell (to be used for peephole).\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n i: A `Tensor`. Must have the same type as `x`.\n The input gate over the whole time sequence.\n cs: A `Tensor`. Must have the same type as `x`.\n The cell state before the tanh over the whole time sequence.\n f: A `Tensor`. Must have the same type as `x`.\n The forget gate over the whole time sequence.\n o: A `Tensor`. Must have the same type as `x`.\n The output gate over the whole time sequence.\n ci: A `Tensor`. Must have the same type as `x`.\n The cell input over the whole time sequence.\n co: A `Tensor`. Must have the same type as `x`.\n The cell after the tanh over the whole time sequence.\n h: A `Tensor`. Must have the same type as `x`.\n The output h vector over the whole time sequence.\n cs_grad: A `Tensor`. Must have the same type as `x`.\n The current gradient of cs.\n h_grad: A `Tensor`. Must have the same type as `x`.\n The gradient of h vector.\n use_peephole: A `bool`. Whether to use peephole weights.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad).\n\n x_grad: A `Tensor`. Has the same type as `x`. The gradient of x to be back-propped.\n cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs_prev to be back-propped.\n h_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of h_prev to be back-propped.\n w_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.\n wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.\n wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.\n wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.\n b_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_, _, _op) = _op_def_lib._apply_op_helper('BlockLSTMGrad', seq_len_max=seq_len_max, x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co, h=h, cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('use_peephole', _op.get_attr('use_peephole'), 'T', _op.get_attr('T'))
_execute.record_gradient('BlockLSTMGrad', _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BlockLSTMGrad', name, _ctx._post_execution_callbacks, seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, 'use_peephole', use_peephole)
_result = _BlockLSTMGradOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_grad_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def block_lstm_grad_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function block_lstm_grad\n '
_ctx = (ctx if ctx else _context.context())
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_attr_T, _inputs_T) = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad]
_attrs = ('use_peephole', use_peephole, 'T', _attr_T)
_result = _execute.execute(b'BlockLSTMGrad', 8, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BlockLSTMGrad', _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
| 1,323,251,434,971,964,000
|
This is the slowpath function for Eager mode.
This is for function block_lstm_grad
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
block_lstm_grad_eager_fallback
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def block_lstm_grad_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function block_lstm_grad\n '
_ctx = (ctx if ctx else _context.context())
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_attr_T, _inputs_T) = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad]
_attrs = ('use_peephole', use_peephole, 'T', _attr_T)
_result = _execute.execute(b'BlockLSTMGrad', 8, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BlockLSTMGrad', _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
|
@tf_export('lstm_block_cell')
def lstm_block_cell(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
"Computes the LSTM cell forward propagation for 1 time step.\n\n This implementation uses 1 weight matrix and 1 bias vector, and there's an\n optional peephole connection.\n\n This kernel op implements the following mathematical equations:\n\n ```python\n xh = [x, h_prev]\n [i, f, ci, o] = xh * w + b\n f = f + forget_bias\n\n if not use_peephole:\n wci = wcf = wco = 0\n\n i = sigmoid(cs_prev * wci + i)\n f = sigmoid(cs_prev * wcf + f)\n ci = tanh(ci)\n\n cs = ci .* i + cs_prev .* f\n cs = clip(cs, cell_clip)\n\n o = sigmoid(cs * wco + o)\n co = tanh(cs)\n h = co .* o\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\n The input to the LSTM cell, shape (batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n Value of the cell state at previous time step.\n h_prev: A `Tensor`. Must have the same type as `x`.\n Output of the previous cell at previous time step.\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.\n cell_clip: An optional `float`. Defaults to `3`.\n Value to clip the 'cs' value to.\n use_peephole: An optional `bool`. Defaults to `False`.\n Whether to use peephole weights.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\n\n i: A `Tensor`. Has the same type as `x`. The input gate.\n cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.\n f: A `Tensor`. Has the same type as `x`. The forget gate.\n o: A `Tensor`. Has the same type as `x`. The output gate.\n ci: A `Tensor`. Has the same type as `x`. The cell input.\n co: A `Tensor`. Has the same type as `x`. The cell after the tanh.\n h: A `Tensor`. Has the same type as `x`. The output h vector.\n "
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (forget_bias is None):
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, 'forget_bias')
if (cell_clip is None):
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, 'cell_clip')
if (use_peephole is None):
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_, _, _op) = _op_def_lib._apply_op_helper('LSTMBlockCell', x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('forget_bias', _op.get_attr('forget_bias'), 'cell_clip', _op.get_attr('cell_clip'), 'use_peephole', _op.get_attr('use_peephole'), 'T', _op.get_attr('T'))
_execute.record_gradient('LSTMBlockCell', _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'LSTMBlockCell', name, _ctx._post_execution_callbacks, x, cs_prev, h_prev, w, wci, wcf, wco, b, 'forget_bias', forget_bias, 'cell_clip', cell_clip, 'use_peephole', use_peephole)
_result = _LSTMBlockCellOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| -5,341,519,731,373,629,000
|
Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, f, ci, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
lstm_block_cell
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
@tf_export('lstm_block_cell')
def lstm_block_cell(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
"Computes the LSTM cell forward propagation for 1 time step.\n\n This implementation uses 1 weight matrix and 1 bias vector, and there's an\n optional peephole connection.\n\n This kernel op implements the following mathematical equations:\n\n ```python\n xh = [x, h_prev]\n [i, f, ci, o] = xh * w + b\n f = f + forget_bias\n\n if not use_peephole:\n wci = wcf = wco = 0\n\n i = sigmoid(cs_prev * wci + i)\n f = sigmoid(cs_prev * wcf + f)\n ci = tanh(ci)\n\n cs = ci .* i + cs_prev .* f\n cs = clip(cs, cell_clip)\n\n o = sigmoid(cs * wco + o)\n co = tanh(cs)\n h = co .* o\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\n The input to the LSTM cell, shape (batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n Value of the cell state at previous time step.\n h_prev: A `Tensor`. Must have the same type as `x`.\n Output of the previous cell at previous time step.\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.\n cell_clip: An optional `float`. Defaults to `3`.\n Value to clip the 'cs' value to.\n use_peephole: An optional `bool`. Defaults to `False`.\n Whether to use peephole weights.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\n\n i: A `Tensor`. Has the same type as `x`. The input gate.\n cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.\n f: A `Tensor`. Has the same type as `x`. The forget gate.\n o: A `Tensor`. Has the same type as `x`. The output gate.\n ci: A `Tensor`. Has the same type as `x`. The cell input.\n co: A `Tensor`. Has the same type as `x`. The cell after the tanh.\n h: A `Tensor`. Has the same type as `x`. The output h vector.\n "
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (forget_bias is None):
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, 'forget_bias')
if (cell_clip is None):
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, 'cell_clip')
if (use_peephole is None):
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_, _, _op) = _op_def_lib._apply_op_helper('LSTMBlockCell', x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('forget_bias', _op.get_attr('forget_bias'), 'cell_clip', _op.get_attr('cell_clip'), 'use_peephole', _op.get_attr('use_peephole'), 'T', _op.get_attr('T'))
_execute.record_gradient('LSTMBlockCell', _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'LSTMBlockCell', name, _ctx._post_execution_callbacks, x, cs_prev, h_prev, w, wci, wcf, wco, b, 'forget_bias', forget_bias, 'cell_clip', cell_clip, 'use_peephole', use_peephole)
_result = _LSTMBlockCellOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function lstm_block_cell\n '
_ctx = (ctx if ctx else _context.context())
if (forget_bias is None):
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, 'forget_bias')
if (cell_clip is None):
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, 'cell_clip')
if (use_peephole is None):
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_attr_T, _inputs_T) = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ('forget_bias', forget_bias, 'cell_clip', cell_clip, 'use_peephole', use_peephole, 'T', _attr_T)
_result = _execute.execute(b'LSTMBlockCell', 7, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('LSTMBlockCell', _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
| 5,588,209,277,027,045,000
|
This is the slowpath function for Eager mode.
This is for function lstm_block_cell
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
lstm_block_cell_eager_fallback
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function lstm_block_cell\n '
_ctx = (ctx if ctx else _context.context())
if (forget_bias is None):
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, 'forget_bias')
if (cell_clip is None):
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, 'cell_clip')
if (use_peephole is None):
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_attr_T, _inputs_T) = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ('forget_bias', forget_bias, 'cell_clip', cell_clip, 'use_peephole', use_peephole, 'T', _attr_T)
_result = _execute.execute(b'LSTMBlockCell', 7, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('LSTMBlockCell', _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
|
@tf_export('lstm_block_cell_grad')
def lstm_block_cell_grad(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None):
'Computes the LSTM cell backward propagation for 1 timestep.\n\n This implementation is to be used in conjunction of LSTMBlockCell.\n\n Args:\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\n The input to the LSTM cell, shape (batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n The previous cell state.\n h_prev: A `Tensor`. Must have the same type as `x`. The previous h state.\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n i: A `Tensor`. Must have the same type as `x`. The input gate.\n cs: A `Tensor`. Must have the same type as `x`.\n The cell state before the tanh.\n f: A `Tensor`. Must have the same type as `x`. The forget gate.\n o: A `Tensor`. Must have the same type as `x`. The output gate.\n ci: A `Tensor`. Must have the same type as `x`. The cell input.\n co: A `Tensor`. Must have the same type as `x`. The cell after the tanh.\n cs_grad: A `Tensor`. Must have the same type as `x`.\n The current gradient of cs.\n h_grad: A `Tensor`. Must have the same type as `x`.\n The gradient of h vector.\n use_peephole: A `bool`. Whether the cell uses peephole connections.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (cs_prev_grad, dicfo, wci_grad, wcf_grad, wco_grad).\n\n cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs to be back-propped.\n dicfo: A `Tensor`. Has the same type as `x`. The derivative wrt to [i, cs, f, o].\n wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.\n wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.\n wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_, _, _op) = _op_def_lib._apply_op_helper('LSTMBlockCellGrad', x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co, cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('use_peephole', _op.get_attr('use_peephole'), 'T', _op.get_attr('T'))
_execute.record_gradient('LSTMBlockCellGrad', _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'LSTMBlockCellGrad', name, _ctx._post_execution_callbacks, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, 'use_peephole', use_peephole)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_grad_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| 5,906,303,798,717,116,000
|
Computes the LSTM cell backward propagation for 1 timestep.
This implementation is to be used in conjunction of LSTMBlockCell.
Args:
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
The previous cell state.
h_prev: A `Tensor`. Must have the same type as `x`. The previous h state.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
i: A `Tensor`. Must have the same type as `x`. The input gate.
cs: A `Tensor`. Must have the same type as `x`.
The cell state before the tanh.
f: A `Tensor`. Must have the same type as `x`. The forget gate.
o: A `Tensor`. Must have the same type as `x`. The output gate.
ci: A `Tensor`. Must have the same type as `x`. The cell input.
co: A `Tensor`. Must have the same type as `x`. The cell after the tanh.
cs_grad: A `Tensor`. Must have the same type as `x`.
The current gradient of cs.
h_grad: A `Tensor`. Must have the same type as `x`.
The gradient of h vector.
use_peephole: A `bool`. Whether the cell uses peephole connections.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (cs_prev_grad, dicfo, wci_grad, wcf_grad, wco_grad).
cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs to be back-propped.
dicfo: A `Tensor`. Has the same type as `x`. The derivative wrt to [i, cs, f, o].
wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.
wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.
wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
lstm_block_cell_grad
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
@tf_export('lstm_block_cell_grad')
def lstm_block_cell_grad(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None):
'Computes the LSTM cell backward propagation for 1 timestep.\n\n This implementation is to be used in conjunction of LSTMBlockCell.\n\n Args:\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\n The input to the LSTM cell, shape (batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n The previous cell state.\n h_prev: A `Tensor`. Must have the same type as `x`. The previous h state.\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n i: A `Tensor`. Must have the same type as `x`. The input gate.\n cs: A `Tensor`. Must have the same type as `x`.\n The cell state before the tanh.\n f: A `Tensor`. Must have the same type as `x`. The forget gate.\n o: A `Tensor`. Must have the same type as `x`. The output gate.\n ci: A `Tensor`. Must have the same type as `x`. The cell input.\n co: A `Tensor`. Must have the same type as `x`. The cell after the tanh.\n cs_grad: A `Tensor`. Must have the same type as `x`.\n The current gradient of cs.\n h_grad: A `Tensor`. Must have the same type as `x`.\n The gradient of h vector.\n use_peephole: A `bool`. Whether the cell uses peephole connections.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (cs_prev_grad, dicfo, wci_grad, wcf_grad, wco_grad).\n\n cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs to be back-propped.\n dicfo: A `Tensor`. Has the same type as `x`. The derivative wrt to [i, cs, f, o].\n wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.\n wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.\n wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_, _, _op) = _op_def_lib._apply_op_helper('LSTMBlockCellGrad', x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co, cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('use_peephole', _op.get_attr('use_peephole'), 'T', _op.get_attr('T'))
_execute.record_gradient('LSTMBlockCellGrad', _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'LSTMBlockCellGrad', name, _ctx._post_execution_callbacks, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, 'use_peephole', use_peephole)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_grad_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def lstm_block_cell_grad_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function lstm_block_cell_grad\n '
_ctx = (ctx if ctx else _context.context())
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_attr_T, _inputs_T) = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad]
_attrs = ('use_peephole', use_peephole, 'T', _attr_T)
_result = _execute.execute(b'LSTMBlockCellGrad', 5, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('LSTMBlockCellGrad', _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
| -1,909,998,256,456,007,400
|
This is the slowpath function for Eager mode.
This is for function lstm_block_cell_grad
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
lstm_block_cell_grad_eager_fallback
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def lstm_block_cell_grad_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function lstm_block_cell_grad\n '
_ctx = (ctx if ctx else _context.context())
use_peephole = _execute.make_bool(use_peephole, 'use_peephole')
(_attr_T, _inputs_T) = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad]
_attrs = ('use_peephole', use_peephole, 'T', _attr_T)
_result = _execute.execute(b'LSTMBlockCellGrad', 5, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('LSTMBlockCellGrad', _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
|
def get_or_create_session_key(self):
'\n Get or create the session key from the request object.\n\n When not present yet, this initializes the session for the user.\n As a result, the request then returns session cookie to the user\n via session middleware.\n '
session_key = self.request.session.session_key
if (session_key is None):
self.request.session.create()
session_key = self.request.session.session_key
return session_key
| 9,179,766,036,658,682,000
|
Get or create the session key from the request object.
When not present yet, this initializes the session for the user.
As a result, the request then returns session cookie to the user
via session middleware.
|
sqrl/sqrl.py
|
get_or_create_session_key
|
JamesonNetworks/django-sqrl
|
python
|
def get_or_create_session_key(self):
'\n Get or create the session key from the request object.\n\n When not present yet, this initializes the session for the user.\n As a result, the request then returns session cookie to the user\n via session middleware.\n '
session_key = self.request.session.session_key
if (session_key is None):
self.request.session.create()
session_key = self.request.session.session_key
return session_key
|
@property
def nut(self):
'\n Cached property for getting :obj:`.models.SQRLNut`.\n\n When accessed for the first time, this property either replaces or creates\n new :obj:`.models.SQRLNut` by using :meth:`.managers.SQRLNutManager.replace_or_create`.\n All the data for the creation of the nut is created by using :meth:`.generate_nut_kwargs`.\n '
if hasattr(self, '_nut'):
return self._nut
self._nut = SQRLNut.objects.replace_or_create(**self.generate_nut_kwargs())
return self._nut
| 3,638,656,951,657,241,000
|
Cached property for getting :obj:`.models.SQRLNut`.
When accessed for the first time, this property either replaces or creates
new :obj:`.models.SQRLNut` by using :meth:`.managers.SQRLNutManager.replace_or_create`.
All the data for the creation of the nut is created by using :meth:`.generate_nut_kwargs`.
|
sqrl/sqrl.py
|
nut
|
JamesonNetworks/django-sqrl
|
python
|
@property
def nut(self):
'\n Cached property for getting :obj:`.models.SQRLNut`.\n\n When accessed for the first time, this property either replaces or creates\n new :obj:`.models.SQRLNut` by using :meth:`.managers.SQRLNutManager.replace_or_create`.\n All the data for the creation of the nut is created by using :meth:`.generate_nut_kwargs`.\n '
if hasattr(self, '_nut'):
return self._nut
self._nut = SQRLNut.objects.replace_or_create(**self.generate_nut_kwargs())
return self._nut
|
def generate_nut_kwargs(self):
'\n Generate kwargs which can be used to create new :obj:`.models.SQRLNut`.\n\n Returns\n -------\n dict\n All required kwargs to instantiate and create :obj:`.models.SQRLNut`.\n '
randomness = generate_randomness(64)
l = (len(randomness) // 2)
return {'session_key': self.get_or_create_session_key(), 'nonce': randomness[:l], 'transaction_nonce': randomness[l:], 'is_transaction_complete': False, 'ip_address': get_user_ip(self.request)}
| 2,031,614,310,051,996,000
|
Generate kwargs which can be used to create new :obj:`.models.SQRLNut`.
Returns
-------
dict
All required kwargs to instantiate and create :obj:`.models.SQRLNut`.
|
sqrl/sqrl.py
|
generate_nut_kwargs
|
JamesonNetworks/django-sqrl
|
python
|
def generate_nut_kwargs(self):
'\n Generate kwargs which can be used to create new :obj:`.models.SQRLNut`.\n\n Returns\n -------\n dict\n All required kwargs to instantiate and create :obj:`.models.SQRLNut`.\n '
randomness = generate_randomness(64)
l = (len(randomness) // 2)
return {'session_key': self.get_or_create_session_key(), 'nonce': randomness[:l], 'transaction_nonce': randomness[l:], 'is_transaction_complete': False, 'ip_address': get_user_ip(self.request)}
|
def get_sqrl_url(self):
'\n Get the server URL of where SQRL client will make first request.\n\n This method should be customized when a custom namespace should be used\n by the SQRL client when generating on the fly per-site public-private keypair.\n For example this can be used when a web site is a SAAS in which different\n "sub-sites" are determined tenant within a URL path - ``mysaas.com/<tenant>``.\n In that case the returned SQRL auth url should be something like -\n ``mysaas.com/mytenant:sqrl/auth/?nut=<nut value>``.\n By using ``:`` within the path will let SQRL client know that up until\n that point full domain name should be used to generate public-private keypair.\n '
return reverse('sqrl:auth')
| 3,748,251,528,209,340,400
|
Get the server URL of where SQRL client will make first request.
This method should be customized when a custom namespace should be used
by the SQRL client when generating on the fly per-site public-private keypair.
For example this can be used when a web site is a SAAS in which different
"sub-sites" are determined tenant within a URL path - ``mysaas.com/<tenant>``.
In that case the returned SQRL auth url should be something like -
``mysaas.com/mytenant:sqrl/auth/?nut=<nut value>``.
By using ``:`` within the path will let SQRL client know that up until
that point full domain name should be used to generate public-private keypair.
|
sqrl/sqrl.py
|
get_sqrl_url
|
JamesonNetworks/django-sqrl
|
python
|
def get_sqrl_url(self):
'\n Get the server URL of where SQRL client will make first request.\n\n This method should be customized when a custom namespace should be used\n by the SQRL client when generating on the fly per-site public-private keypair.\n For example this can be used when a web site is a SAAS in which different\n "sub-sites" are determined tenant within a URL path - ``mysaas.com/<tenant>``.\n In that case the returned SQRL auth url should be something like -\n ``mysaas.com/mytenant:sqrl/auth/?nut=<nut value>``.\n By using ``:`` within the path will let SQRL client know that up until\n that point full domain name should be used to generate public-private keypair.\n '
return reverse('sqrl:auth')
|
def get_sqrl_url_params(self):
'\n Get SQRL url params to be added as querystring params in the SQRL url.\n\n By default this only adds ``nut=<nut>``.\n\n Returns\n -------\n str\n URLEncoded querystring params\n '
qd = QueryDict('', mutable=True)
qd.update({'nut': self.nut.nonce})
return qd.urlencode()
| 8,559,213,639,511,226,000
|
Get SQRL url params to be added as querystring params in the SQRL url.
By default this only adds ``nut=<nut>``.
Returns
-------
str
URLEncoded querystring params
|
sqrl/sqrl.py
|
get_sqrl_url_params
|
JamesonNetworks/django-sqrl
|
python
|
def get_sqrl_url_params(self):
'\n Get SQRL url params to be added as querystring params in the SQRL url.\n\n By default this only adds ``nut=<nut>``.\n\n Returns\n -------\n str\n URLEncoded querystring params\n '
qd = QueryDict(, mutable=True)
qd.update({'nut': self.nut.nonce})
return qd.urlencode()
|
@property
def url(self):
'\n Property for getting only server-side SQRL auth view URL.\n\n This does not include the full domain within the URL.\n The URL is always relative to the current domain of the site.\n '
return '{url}?{params}'.format(url=self.get_sqrl_url(), params=self.get_sqrl_url_params())
| -2,513,625,284,204,591,000
|
Property for getting only server-side SQRL auth view URL.
This does not include the full domain within the URL.
The URL is always relative to the current domain of the site.
|
sqrl/sqrl.py
|
url
|
JamesonNetworks/django-sqrl
|
python
|
@property
def url(self):
'\n Property for getting only server-side SQRL auth view URL.\n\n This does not include the full domain within the URL.\n The URL is always relative to the current domain of the site.\n '
return '{url}?{params}'.format(url=self.get_sqrl_url(), params=self.get_sqrl_url_params())
|
@property
def sqrl_url(self):
'\n Property for getting full SQRL auth view URL including SQRL scheme and full domain with port.\n '
return '{scheme}://{host}{url}'.format(scheme=('sqrl' if self.request.is_secure() else 'qrl'), host=self.request.get_host(), url=self.url)
| -4,712,704,604,675,991,000
|
Property for getting full SQRL auth view URL including SQRL scheme and full domain with port.
|
sqrl/sqrl.py
|
sqrl_url
|
JamesonNetworks/django-sqrl
|
python
|
@property
def sqrl_url(self):
'\n \n '
return '{scheme}://{host}{url}'.format(scheme=('sqrl' if self.request.is_secure() else 'qrl'), host=self.request.get_host(), url=self.url)
|
def count_flops_attn(model, _x, y):
'\n A counter for the `thop` package to count the operations in an\n attention operation.\n Meant to be used like:\n macs, params = thop.profile(\n model,\n inputs=(inputs, timestamps),\n custom_ops={QKVAttention: QKVAttention.count_flops},\n )\n '
(b, c, *spatial) = y[0].shape
num_spatial = int(np.prod(spatial))
matmul_ops = (((2 * b) * (num_spatial ** 2)) * c)
model.total_ops += th.DoubleTensor([matmul_ops])
| 5,236,202,715,761,533,000
|
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
|
diff_dalle/unet.py
|
count_flops_attn
|
AranKomat/Diff-DALLE
|
python
|
def count_flops_attn(model, _x, y):
'\n A counter for the `thop` package to count the operations in an\n attention operation.\n Meant to be used like:\n macs, params = thop.profile(\n model,\n inputs=(inputs, timestamps),\n custom_ops={QKVAttention: QKVAttention.count_flops},\n )\n '
(b, c, *spatial) = y[0].shape
num_spatial = int(np.prod(spatial))
matmul_ops = (((2 * b) * (num_spatial ** 2)) * c)
model.total_ops += th.DoubleTensor([matmul_ops])
|
@abstractmethod
def forward(self, x, emb):
'\n Apply the module to `x` given `emb` timestep embeddings.\n '
| 774,829,112,089,547,400
|
Apply the module to `x` given `emb` timestep embeddings.
|
diff_dalle/unet.py
|
forward
|
AranKomat/Diff-DALLE
|
python
|
@abstractmethod
def forward(self, x, emb):
'\n \n '
|
@abstractmethod
def forward(self, x, y):
'\n Apply the module to `x` given `y`.\n '
| -9,143,765,492,867,446,000
|
Apply the module to `x` given `y`.
|
diff_dalle/unet.py
|
forward
|
AranKomat/Diff-DALLE
|
python
|
@abstractmethod
def forward(self, x, y):
'\n \n '
|
def forward(self, x, emb):
'\n Apply the block to a Tensor, conditioned on a timestep embedding.\n\n :param x: an [N x C x ...] Tensor of features.\n :param emb: an [N x emb_channels] Tensor of timestep embeddings.\n :return: an [N x C x ...] Tensor of outputs.\n '
return checkpoint(self._forward, (x, emb), self.parameters(), self.use_checkpoint)
| 8,049,035,836,621,033,000
|
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
|
diff_dalle/unet.py
|
forward
|
AranKomat/Diff-DALLE
|
python
|
def forward(self, x, emb):
'\n Apply the block to a Tensor, conditioned on a timestep embedding.\n\n :param x: an [N x C x ...] Tensor of features.\n :param emb: an [N x emb_channels] Tensor of timestep embeddings.\n :return: an [N x C x ...] Tensor of outputs.\n '
return checkpoint(self._forward, (x, emb), self.parameters(), self.use_checkpoint)
|
def forward(self, qkv, y):
'\n Apply QKV attention.\n\n :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.\n :return: an [N x (H * C) x T] tensor after attention.\n '
(bs, width, length) = qkv.shape
if (y is None):
assert ((width % (3 * self.n_heads)) == 0)
ch = (width // (3 * self.n_heads))
(q, k, v) = qkv.reshape((bs * self.n_heads), (ch * 3), length).split(ch, dim=1)
else:
assert ((width % self.n_heads) == 0)
ch = (width // self.n_heads)
q = qkv.reshape((bs * self.n_heads), ch, length)
k = v = y.reshape((bs * self.n_heads), ch, (- 1))
scale = (1 / math.sqrt(math.sqrt(ch)))
weight = th.einsum('bct,bcs->bts', (q * scale), (k * scale))
weight = self.dropout(th.softmax(weight.float(), dim=(- 1)).type(weight.dtype))
a = th.einsum('bts,bcs->bct', weight, v)
return a.reshape(bs, (- 1), length)
| -546,739,622,385,842,200
|
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
|
diff_dalle/unet.py
|
forward
|
AranKomat/Diff-DALLE
|
python
|
def forward(self, qkv, y):
'\n Apply QKV attention.\n\n :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.\n :return: an [N x (H * C) x T] tensor after attention.\n '
(bs, width, length) = qkv.shape
if (y is None):
assert ((width % (3 * self.n_heads)) == 0)
ch = (width // (3 * self.n_heads))
(q, k, v) = qkv.reshape((bs * self.n_heads), (ch * 3), length).split(ch, dim=1)
else:
assert ((width % self.n_heads) == 0)
ch = (width // self.n_heads)
q = qkv.reshape((bs * self.n_heads), ch, length)
k = v = y.reshape((bs * self.n_heads), ch, (- 1))
scale = (1 / math.sqrt(math.sqrt(ch)))
weight = th.einsum('bct,bcs->bts', (q * scale), (k * scale))
weight = self.dropout(th.softmax(weight.float(), dim=(- 1)).type(weight.dtype))
a = th.einsum('bts,bcs->bct', weight, v)
return a.reshape(bs, (- 1), length)
|
def forward(self, qkv):
'\n Apply QKV attention.\n\n :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.\n :return: an [N x (H * C) x T] tensor after attention.\n '
(bs, width, length) = qkv.shape
assert ((width % (3 * self.n_heads)) == 0)
ch = (width // (3 * self.n_heads))
(q, k, v) = qkv.chunk(3, dim=1)
scale = (1 / math.sqrt(math.sqrt(ch)))
weight = th.einsum('bct,bcs->bts', (q * scale).view((bs * self.n_heads), ch, length), (k * scale).view((bs * self.n_heads), ch, length))
weight = th.softmax(weight.float(), dim=(- 1)).type(weight.dtype)
a = th.einsum('bts,bcs->bct', weight, v.reshape((bs * self.n_heads), ch, length))
return a.reshape(bs, (- 1), length)
| -4,963,406,732,217,807,000
|
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
|
diff_dalle/unet.py
|
forward
|
AranKomat/Diff-DALLE
|
python
|
def forward(self, qkv):
'\n Apply QKV attention.\n\n :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.\n :return: an [N x (H * C) x T] tensor after attention.\n '
(bs, width, length) = qkv.shape
assert ((width % (3 * self.n_heads)) == 0)
ch = (width // (3 * self.n_heads))
(q, k, v) = qkv.chunk(3, dim=1)
scale = (1 / math.sqrt(math.sqrt(ch)))
weight = th.einsum('bct,bcs->bts', (q * scale).view((bs * self.n_heads), ch, length), (k * scale).view((bs * self.n_heads), ch, length))
weight = th.softmax(weight.float(), dim=(- 1)).type(weight.dtype)
a = th.einsum('bts,bcs->bct', weight, v.reshape((bs * self.n_heads), ch, length))
return a.reshape(bs, (- 1), length)
|
def convert_to_fp16(self):
'\n Convert the torso of the model to float16.\n '
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
if hasattr(self, 'text_encoder'):
self.text_encoder.apply(convert_module_to_f16_2)
| -6,390,348,050,961,245,000
|
Convert the torso of the model to float16.
|
diff_dalle/unet.py
|
convert_to_fp16
|
AranKomat/Diff-DALLE
|
python
|
def convert_to_fp16(self):
'\n \n '
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
if hasattr(self, 'text_encoder'):
self.text_encoder.apply(convert_module_to_f16_2)
|
def convert_to_fp32(self):
'\n Convert the torso of the model to float32.\n '
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
if hasattr(self, 'text_encoder'):
self.text_encoder.apply(convert_module_to_f32)
| -1,808,874,455,012,511,700
|
Convert the torso of the model to float32.
|
diff_dalle/unet.py
|
convert_to_fp32
|
AranKomat/Diff-DALLE
|
python
|
def convert_to_fp32(self):
'\n \n '
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
if hasattr(self, 'text_encoder'):
self.text_encoder.apply(convert_module_to_f32)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.