body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
b0b3a495a1561af28d8f5f64bff4eb3760c26fdce0abe64d952540fcb5b7bc9c | def broadcast(self, data):
'Takes a list of message strings and writes a broadcast message to scratch'
if isinstance(data, list):
message = 'broadcast'
for mess in data:
message += (' "%s"' % mess)
self._send(message)
else:
self._send(('broadcast "%s"' % data)) | Takes a list of message strings and writes a broadcast message to scratch | scratch/__init__.py | broadcast | qihboy/py-scratch | 2 | python | def broadcast(self, data):
if isinstance(data, list):
message = 'broadcast'
for mess in data:
message += (' "%s"' % mess)
self._send(message)
else:
self._send(('broadcast "%s"' % data)) | def broadcast(self, data):
if isinstance(data, list):
message = 'broadcast'
for mess in data:
message += (' "%s"' % mess)
self._send(message)
else:
self._send(('broadcast "%s"' % data))<|docstring|>Takes a list of message strings and writes a broadcast message to scratch<|endoftext|> |
d871c09474f98a429c33705d32e47973f987ab6fb9310763068420db90de84d7 | def add_click_for_argument(argument: Argument, user: User) -> bool:
'\n Increases clicks of a given argument.\n\n :param argument: Argument from User\n :param user: User\n :return:\n '
if (user.nickname == nick_of_anonymous_user):
LOG.debug('User is anonymous, not counting clicks')
return False
LOG.debug('Increasing vote for argument %s ', argument.uid)
if (argument.argument_uid is None):
LOG.debug('Undercut depth 0')
__add_click_for_argument(user, argument)
else:
db_undercuted_arg_step_1: Argument = argument.attacks
if (db_undercuted_arg_step_1.argument_uid is None):
LOG.debug('Undercut depth 1')
__add_click_for_undercut_step_1(argument, db_undercuted_arg_step_1, user)
else:
LOG.debug('Undercut depth 2')
__add_click_for_undercut_step_2(argument, db_undercuted_arg_step_1, user)
return True | Increases clicks of a given argument.
:param argument: Argument from User
:param user: User
:return: | dbas/handler/voting.py | add_click_for_argument | hhucn/dbas | 23 | python | def add_click_for_argument(argument: Argument, user: User) -> bool:
'\n Increases clicks of a given argument.\n\n :param argument: Argument from User\n :param user: User\n :return:\n '
if (user.nickname == nick_of_anonymous_user):
LOG.debug('User is anonymous, not counting clicks')
return False
LOG.debug('Increasing vote for argument %s ', argument.uid)
if (argument.argument_uid is None):
LOG.debug('Undercut depth 0')
__add_click_for_argument(user, argument)
else:
db_undercuted_arg_step_1: Argument = argument.attacks
if (db_undercuted_arg_step_1.argument_uid is None):
LOG.debug('Undercut depth 1')
__add_click_for_undercut_step_1(argument, db_undercuted_arg_step_1, user)
else:
LOG.debug('Undercut depth 2')
__add_click_for_undercut_step_2(argument, db_undercuted_arg_step_1, user)
return True | def add_click_for_argument(argument: Argument, user: User) -> bool:
'\n Increases clicks of a given argument.\n\n :param argument: Argument from User\n :param user: User\n :return:\n '
if (user.nickname == nick_of_anonymous_user):
LOG.debug('User is anonymous, not counting clicks')
return False
LOG.debug('Increasing vote for argument %s ', argument.uid)
if (argument.argument_uid is None):
LOG.debug('Undercut depth 0')
__add_click_for_argument(user, argument)
else:
db_undercuted_arg_step_1: Argument = argument.attacks
if (db_undercuted_arg_step_1.argument_uid is None):
LOG.debug('Undercut depth 1')
__add_click_for_undercut_step_1(argument, db_undercuted_arg_step_1, user)
else:
LOG.debug('Undercut depth 2')
__add_click_for_undercut_step_2(argument, db_undercuted_arg_step_1, user)
return True<|docstring|>Increases clicks of a given argument.
:param argument: Argument from User
:param user: User
:return:<|endoftext|> |
4c6930d77050597f2106cae9cee9e1c583848d516f431270fba047bc46e33aa4 | def __add_click_for_argument(user: User, argument: Argument):
'\n Add click for a specific argument\n\n :param user: User\n :param argument: Argument\n :return: None\n '
conclusion = argument.conclusion
__click_argument(argument, user, True)
__vote_premisesgroup(argument.premisegroup_uid, user, True)
__click_statement(conclusion, user, argument.is_supportive)
__argument_seen_by_user(user, argument) | Add click for a specific argument
:param user: User
:param argument: Argument
:return: None | dbas/handler/voting.py | __add_click_for_argument | hhucn/dbas | 23 | python | def __add_click_for_argument(user: User, argument: Argument):
'\n Add click for a specific argument\n\n :param user: User\n :param argument: Argument\n :return: None\n '
conclusion = argument.conclusion
__click_argument(argument, user, True)
__vote_premisesgroup(argument.premisegroup_uid, user, True)
__click_statement(conclusion, user, argument.is_supportive)
__argument_seen_by_user(user, argument) | def __add_click_for_argument(user: User, argument: Argument):
'\n Add click for a specific argument\n\n :param user: User\n :param argument: Argument\n :return: None\n '
conclusion = argument.conclusion
__click_argument(argument, user, True)
__vote_premisesgroup(argument.premisegroup_uid, user, True)
__click_statement(conclusion, user, argument.is_supportive)
__argument_seen_by_user(user, argument)<|docstring|>Add click for a specific argument
:param user: User
:param argument: Argument
:return: None<|endoftext|> |
e35c831c479898e68edcd0811d728df4d489f1526dd4e5c2dca6d39219f5d05b | def __add_click_for_undercut_step_1(argument: Argument, undercuted_arg_step_1: Argument, user: User):
'\n Add clicks for an first order undercut\n\n :param argument: Argument\n :param undercuted_arg_step_1: Argument\n :param user: User\n :return: None\n '
undercuted_arg_step_1_concl: Statement = undercuted_arg_step_1.conclusion
__click_argument(argument, user, True)
__vote_premisesgroup(argument.premisegroup_uid, user, True)
__click_argument(undercuted_arg_step_1, user, argument.is_supportive)
__vote_premisesgroup(undercuted_arg_step_1.premisegroup_uid, user, True)
__click_statement(undercuted_arg_step_1_concl, user, (not argument.is_supportive))
__argument_seen_by_user(user, argument)
__argument_seen_by_user(user, undercuted_arg_step_1) | Add clicks for an first order undercut
:param argument: Argument
:param undercuted_arg_step_1: Argument
:param user: User
:return: None | dbas/handler/voting.py | __add_click_for_undercut_step_1 | hhucn/dbas | 23 | python | def __add_click_for_undercut_step_1(argument: Argument, undercuted_arg_step_1: Argument, user: User):
'\n Add clicks for an first order undercut\n\n :param argument: Argument\n :param undercuted_arg_step_1: Argument\n :param user: User\n :return: None\n '
undercuted_arg_step_1_concl: Statement = undercuted_arg_step_1.conclusion
__click_argument(argument, user, True)
__vote_premisesgroup(argument.premisegroup_uid, user, True)
__click_argument(undercuted_arg_step_1, user, argument.is_supportive)
__vote_premisesgroup(undercuted_arg_step_1.premisegroup_uid, user, True)
__click_statement(undercuted_arg_step_1_concl, user, (not argument.is_supportive))
__argument_seen_by_user(user, argument)
__argument_seen_by_user(user, undercuted_arg_step_1) | def __add_click_for_undercut_step_1(argument: Argument, undercuted_arg_step_1: Argument, user: User):
'\n Add clicks for an first order undercut\n\n :param argument: Argument\n :param undercuted_arg_step_1: Argument\n :param user: User\n :return: None\n '
undercuted_arg_step_1_concl: Statement = undercuted_arg_step_1.conclusion
__click_argument(argument, user, True)
__vote_premisesgroup(argument.premisegroup_uid, user, True)
__click_argument(undercuted_arg_step_1, user, argument.is_supportive)
__vote_premisesgroup(undercuted_arg_step_1.premisegroup_uid, user, True)
__click_statement(undercuted_arg_step_1_concl, user, (not argument.is_supportive))
__argument_seen_by_user(user, argument)
__argument_seen_by_user(user, undercuted_arg_step_1)<|docstring|>Add clicks for an first order undercut
:param argument: Argument
:param undercuted_arg_step_1: Argument
:param user: User
:return: None<|endoftext|> |
57626b2da3a90e0b4d3ede70b92fd8c95886a6cc8734201d04b14ef1a8e0de65 | def __add_click_for_undercut_step_2(argument: Argument, undercuted_arg_step_1: Argument, user: User):
'\n Add clicks for an second order undercut\n\n :param argument: Argument\n :param undercuted_arg_step_1: Argument\n :param user: User\n :return: None\n '
undercuted_arg_step_2: Argument = undercuted_arg_step_1.attacks
__vote_premisesgroup(argument.premisegroup_uid, user, True)
__click_argument(argument, user, True)
__click_argument(undercuted_arg_step_1, user, False)
__vote_premisesgroup(undercuted_arg_step_1.premisegroup_uid, user, False)
__argument_seen_by_user(user, argument)
__argument_seen_by_user(user, undercuted_arg_step_1)
__argument_seen_by_user(user, undercuted_arg_step_2) | Add clicks for an second order undercut
:param argument: Argument
:param undercuted_arg_step_1: Argument
:param user: User
:return: None | dbas/handler/voting.py | __add_click_for_undercut_step_2 | hhucn/dbas | 23 | python | def __add_click_for_undercut_step_2(argument: Argument, undercuted_arg_step_1: Argument, user: User):
'\n Add clicks for an second order undercut\n\n :param argument: Argument\n :param undercuted_arg_step_1: Argument\n :param user: User\n :return: None\n '
undercuted_arg_step_2: Argument = undercuted_arg_step_1.attacks
__vote_premisesgroup(argument.premisegroup_uid, user, True)
__click_argument(argument, user, True)
__click_argument(undercuted_arg_step_1, user, False)
__vote_premisesgroup(undercuted_arg_step_1.premisegroup_uid, user, False)
__argument_seen_by_user(user, argument)
__argument_seen_by_user(user, undercuted_arg_step_1)
__argument_seen_by_user(user, undercuted_arg_step_2) | def __add_click_for_undercut_step_2(argument: Argument, undercuted_arg_step_1: Argument, user: User):
'\n Add clicks for an second order undercut\n\n :param argument: Argument\n :param undercuted_arg_step_1: Argument\n :param user: User\n :return: None\n '
undercuted_arg_step_2: Argument = undercuted_arg_step_1.attacks
__vote_premisesgroup(argument.premisegroup_uid, user, True)
__click_argument(argument, user, True)
__click_argument(undercuted_arg_step_1, user, False)
__vote_premisesgroup(undercuted_arg_step_1.premisegroup_uid, user, False)
__argument_seen_by_user(user, argument)
__argument_seen_by_user(user, undercuted_arg_step_1)
__argument_seen_by_user(user, undercuted_arg_step_2)<|docstring|>Add clicks for an second order undercut
:param argument: Argument
:param undercuted_arg_step_1: Argument
:param user: User
:return: None<|endoftext|> |
559cffbfd724eb5310101d0e6592732ce39a8139365d1bd66af4ebad4684bc29 | def add_click_for_statement(stmt_or_arg: Statement, db_user: User, supportive: bool):
'\n Adds a clicks for the given statement.\n\n :param db_user: User\n :param stmt_or_arg: Statement\n :param supportive: boolean\n :return: Boolean\n '
LOG.debug('Increasing %s vote for statement %s', ('up' if supportive else 'down'), stmt_or_arg.uid)
if (db_user.nickname == nick_of_anonymous_user):
return False
__click_statement(stmt_or_arg, db_user, supportive)
__statement_seen_by_user(db_user, stmt_or_arg)
return True | Adds a clicks for the given statement.
:param db_user: User
:param stmt_or_arg: Statement
:param supportive: boolean
:return: Boolean | dbas/handler/voting.py | add_click_for_statement | hhucn/dbas | 23 | python | def add_click_for_statement(stmt_or_arg: Statement, db_user: User, supportive: bool):
'\n Adds a clicks for the given statement.\n\n :param db_user: User\n :param stmt_or_arg: Statement\n :param supportive: boolean\n :return: Boolean\n '
LOG.debug('Increasing %s vote for statement %s', ('up' if supportive else 'down'), stmt_or_arg.uid)
if (db_user.nickname == nick_of_anonymous_user):
return False
__click_statement(stmt_or_arg, db_user, supportive)
__statement_seen_by_user(db_user, stmt_or_arg)
return True | def add_click_for_statement(stmt_or_arg: Statement, db_user: User, supportive: bool):
'\n Adds a clicks for the given statement.\n\n :param db_user: User\n :param stmt_or_arg: Statement\n :param supportive: boolean\n :return: Boolean\n '
LOG.debug('Increasing %s vote for statement %s', ('up' if supportive else 'down'), stmt_or_arg.uid)
if (db_user.nickname == nick_of_anonymous_user):
return False
__click_statement(stmt_or_arg, db_user, supportive)
__statement_seen_by_user(db_user, stmt_or_arg)
return True<|docstring|>Adds a clicks for the given statement.
:param db_user: User
:param stmt_or_arg: Statement
:param supportive: boolean
:return: Boolean<|endoftext|> |
66714d91d4d22b5b6e334b5baa8e26f94373e3097ebb2bcc9cfef8664f79effa | def add_seen_statement(statement: Statement, user: User):
'\n Adds the uid of the statement into the seen_by list, mapped with the given user uid\n\n :param user:current user\n :param statement: Statement which was seen by the user.\n :return: undefined\n '
if ((not isinstance(statement, Statement)) or (not isinstance(user, User)) or user.is_anonymous()):
return False
LOG.debug('Statement %s, for user %s', statement, user.uid)
val = __statement_seen_by_user(user, statement)
return val | Adds the uid of the statement into the seen_by list, mapped with the given user uid
:param user:current user
:param statement: Statement which was seen by the user.
:return: undefined | dbas/handler/voting.py | add_seen_statement | hhucn/dbas | 23 | python | def add_seen_statement(statement: Statement, user: User):
'\n Adds the uid of the statement into the seen_by list, mapped with the given user uid\n\n :param user:current user\n :param statement: Statement which was seen by the user.\n :return: undefined\n '
if ((not isinstance(statement, Statement)) or (not isinstance(user, User)) or user.is_anonymous()):
return False
LOG.debug('Statement %s, for user %s', statement, user.uid)
val = __statement_seen_by_user(user, statement)
return val | def add_seen_statement(statement: Statement, user: User):
'\n Adds the uid of the statement into the seen_by list, mapped with the given user uid\n\n :param user:current user\n :param statement: Statement which was seen by the user.\n :return: undefined\n '
if ((not isinstance(statement, Statement)) or (not isinstance(user, User)) or user.is_anonymous()):
return False
LOG.debug('Statement %s, for user %s', statement, user.uid)
val = __statement_seen_by_user(user, statement)
return val<|docstring|>Adds the uid of the statement into the seen_by list, mapped with the given user uid
:param user:current user
:param statement: Statement which was seen by the user.
:return: undefined<|endoftext|> |
9fb00536eb91fad4d9da86aa9794f7f0670cf9c856b2ec549617048f1b14f4e4 | def add_seen_argument(argument_uid: int, user: User):
'\n Adds the uid of the argument into the seen_by list as well as all included statements, mapped with the given user\n uid\n\n :param user: current user\n :param argument_uid: uid of the argument\n :return: undefined\n '
if ((not is_integer(argument_uid)) or (not isinstance(user, User)) or user.is_anonymous()):
return False
LOG.debug('Argument %s, for user %s', argument_uid, user.uid)
argument: Argument = DBDiscussionSession.query(Argument).get(argument_uid)
__argument_seen_by_user(user, argument)
for premise in argument.premisegroup.premises:
__statement_seen_by_user(user, premise.statement)
if argument.conclusion:
__statement_seen_by_user(user, argument.conclusion)
else:
while (not argument.conclusion):
argument = argument.attacks
__argument_seen_by_user(user, argument)
return True | Adds the uid of the argument into the seen_by list as well as all included statements, mapped with the given user
uid
:param user: current user
:param argument_uid: uid of the argument
:return: undefined | dbas/handler/voting.py | add_seen_argument | hhucn/dbas | 23 | python | def add_seen_argument(argument_uid: int, user: User):
'\n Adds the uid of the argument into the seen_by list as well as all included statements, mapped with the given user\n uid\n\n :param user: current user\n :param argument_uid: uid of the argument\n :return: undefined\n '
if ((not is_integer(argument_uid)) or (not isinstance(user, User)) or user.is_anonymous()):
return False
LOG.debug('Argument %s, for user %s', argument_uid, user.uid)
argument: Argument = DBDiscussionSession.query(Argument).get(argument_uid)
__argument_seen_by_user(user, argument)
for premise in argument.premisegroup.premises:
__statement_seen_by_user(user, premise.statement)
if argument.conclusion:
__statement_seen_by_user(user, argument.conclusion)
else:
while (not argument.conclusion):
argument = argument.attacks
__argument_seen_by_user(user, argument)
return True | def add_seen_argument(argument_uid: int, user: User):
'\n Adds the uid of the argument into the seen_by list as well as all included statements, mapped with the given user\n uid\n\n :param user: current user\n :param argument_uid: uid of the argument\n :return: undefined\n '
if ((not is_integer(argument_uid)) or (not isinstance(user, User)) or user.is_anonymous()):
return False
LOG.debug('Argument %s, for user %s', argument_uid, user.uid)
argument: Argument = DBDiscussionSession.query(Argument).get(argument_uid)
__argument_seen_by_user(user, argument)
for premise in argument.premisegroup.premises:
__statement_seen_by_user(user, premise.statement)
if argument.conclusion:
__statement_seen_by_user(user, argument.conclusion)
else:
while (not argument.conclusion):
argument = argument.attacks
__argument_seen_by_user(user, argument)
return True<|docstring|>Adds the uid of the argument into the seen_by list as well as all included statements, mapped with the given user
uid
:param user: current user
:param argument_uid: uid of the argument
:return: undefined<|endoftext|> |
42048fa45cf21b5fd4c93e3545938f22386855909abacf808cd165187d9843e0 | def clear_vote_and_seen_values_of_user(user: User):
'\n Delete all votes/clicks/mards\n\n :param user: User\n :return: Boolean\n '
DBDiscussionSession.query(SeenStatement).filter_by(user=user).delete()
DBDiscussionSession.query(SeenArgument).filter_by(user_uid=user.uid).delete()
DBDiscussionSession.query(MarkedArgument).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.query(MarkedStatement).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.query(ClickedArgument).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.query(ClickedStatement).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.flush()
return True | Delete all votes/clicks/mards
:param user: User
:return: Boolean | dbas/handler/voting.py | clear_vote_and_seen_values_of_user | hhucn/dbas | 23 | python | def clear_vote_and_seen_values_of_user(user: User):
'\n Delete all votes/clicks/mards\n\n :param user: User\n :return: Boolean\n '
DBDiscussionSession.query(SeenStatement).filter_by(user=user).delete()
DBDiscussionSession.query(SeenArgument).filter_by(user_uid=user.uid).delete()
DBDiscussionSession.query(MarkedArgument).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.query(MarkedStatement).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.query(ClickedArgument).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.query(ClickedStatement).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.flush()
return True | def clear_vote_and_seen_values_of_user(user: User):
'\n Delete all votes/clicks/mards\n\n :param user: User\n :return: Boolean\n '
DBDiscussionSession.query(SeenStatement).filter_by(user=user).delete()
DBDiscussionSession.query(SeenArgument).filter_by(user_uid=user.uid).delete()
DBDiscussionSession.query(MarkedArgument).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.query(MarkedStatement).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.query(ClickedArgument).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.query(ClickedStatement).filter_by(author_uid=user.uid).delete()
DBDiscussionSession.flush()
return True<|docstring|>Delete all votes/clicks/mards
:param user: User
:return: Boolean<|endoftext|> |
9e46e105db95367e75d6178b9e687b63d1cc1654ee716748b4e81ddb3790f876 | def __click_argument(argument, user, is_up_vote):
'\n Check if there is a vote for the argument. If not, we will create a new one, otherwise the current one will be\n invalid and we will create a new entry.\n\n :param argument: Argument\n :param user: User\n :param is_up_vote: Boolean\n :return: None\n '
if (argument is None):
LOG.debug('Argument is None')
return
LOG.debug('Argument %s, user %s', argument.uid, user.nickname)
db_all_valid_votes = DBDiscussionSession.query(ClickedArgument).filter((ClickedArgument.argument_uid == argument.uid), (ClickedArgument.author_uid == user.uid), (ClickedArgument.is_valid == True))
db_current_vote = db_all_valid_votes.filter_by(is_up_vote=is_up_vote).first()
db_old_votes = db_all_valid_votes.all()
if (db_current_vote in db_old_votes):
db_old_votes.remove(db_current_vote)
for old_vote in db_old_votes:
LOG.debug('Setting old vote %s as invalid', old_vote.uid)
old_vote.set_valid(False)
DBDiscussionSession.flush()
db_new_vote = None
if (not db_current_vote):
LOG.debug('Add vote for argument %s', argument.uid)
db_new_vote = ClickedArgument(argument=argument, user=user, is_up_vote=is_up_vote, is_valid=True)
DBDiscussionSession.add(db_new_vote)
DBDiscussionSession.flush()
db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=argument.conclusion_uid).all()
for arg in db_arguments:
db_votes_for_arg = DBDiscussionSession.query(ClickedArgument).filter((ClickedArgument.argument_uid == arg.uid), (ClickedArgument.is_valid == True), (ClickedArgument.author_uid == user.uid), (ClickedArgument.is_up_vote == argument.is_supportive)).all()
if (db_new_vote and (db_new_vote in db_votes_for_arg)):
db_votes_for_arg.remove(db_new_vote)
for vote in db_votes_for_arg:
vote.set_valid(False)
DBDiscussionSession.flush() | Check if there is a vote for the argument. If not, we will create a new one, otherwise the current one will be
invalid and we will create a new entry.
:param argument: Argument
:param user: User
:param is_up_vote: Boolean
:return: None | dbas/handler/voting.py | __click_argument | hhucn/dbas | 23 | python | def __click_argument(argument, user, is_up_vote):
'\n Check if there is a vote for the argument. If not, we will create a new one, otherwise the current one will be\n invalid and we will create a new entry.\n\n :param argument: Argument\n :param user: User\n :param is_up_vote: Boolean\n :return: None\n '
if (argument is None):
LOG.debug('Argument is None')
return
LOG.debug('Argument %s, user %s', argument.uid, user.nickname)
db_all_valid_votes = DBDiscussionSession.query(ClickedArgument).filter((ClickedArgument.argument_uid == argument.uid), (ClickedArgument.author_uid == user.uid), (ClickedArgument.is_valid == True))
db_current_vote = db_all_valid_votes.filter_by(is_up_vote=is_up_vote).first()
db_old_votes = db_all_valid_votes.all()
if (db_current_vote in db_old_votes):
db_old_votes.remove(db_current_vote)
for old_vote in db_old_votes:
LOG.debug('Setting old vote %s as invalid', old_vote.uid)
old_vote.set_valid(False)
DBDiscussionSession.flush()
db_new_vote = None
if (not db_current_vote):
LOG.debug('Add vote for argument %s', argument.uid)
db_new_vote = ClickedArgument(argument=argument, user=user, is_up_vote=is_up_vote, is_valid=True)
DBDiscussionSession.add(db_new_vote)
DBDiscussionSession.flush()
db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=argument.conclusion_uid).all()
for arg in db_arguments:
db_votes_for_arg = DBDiscussionSession.query(ClickedArgument).filter((ClickedArgument.argument_uid == arg.uid), (ClickedArgument.is_valid == True), (ClickedArgument.author_uid == user.uid), (ClickedArgument.is_up_vote == argument.is_supportive)).all()
if (db_new_vote and (db_new_vote in db_votes_for_arg)):
db_votes_for_arg.remove(db_new_vote)
for vote in db_votes_for_arg:
vote.set_valid(False)
DBDiscussionSession.flush() | def __click_argument(argument, user, is_up_vote):
'\n Check if there is a vote for the argument. If not, we will create a new one, otherwise the current one will be\n invalid and we will create a new entry.\n\n :param argument: Argument\n :param user: User\n :param is_up_vote: Boolean\n :return: None\n '
if (argument is None):
LOG.debug('Argument is None')
return
LOG.debug('Argument %s, user %s', argument.uid, user.nickname)
db_all_valid_votes = DBDiscussionSession.query(ClickedArgument).filter((ClickedArgument.argument_uid == argument.uid), (ClickedArgument.author_uid == user.uid), (ClickedArgument.is_valid == True))
db_current_vote = db_all_valid_votes.filter_by(is_up_vote=is_up_vote).first()
db_old_votes = db_all_valid_votes.all()
if (db_current_vote in db_old_votes):
db_old_votes.remove(db_current_vote)
for old_vote in db_old_votes:
LOG.debug('Setting old vote %s as invalid', old_vote.uid)
old_vote.set_valid(False)
DBDiscussionSession.flush()
db_new_vote = None
if (not db_current_vote):
LOG.debug('Add vote for argument %s', argument.uid)
db_new_vote = ClickedArgument(argument=argument, user=user, is_up_vote=is_up_vote, is_valid=True)
DBDiscussionSession.add(db_new_vote)
DBDiscussionSession.flush()
db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=argument.conclusion_uid).all()
for arg in db_arguments:
db_votes_for_arg = DBDiscussionSession.query(ClickedArgument).filter((ClickedArgument.argument_uid == arg.uid), (ClickedArgument.is_valid == True), (ClickedArgument.author_uid == user.uid), (ClickedArgument.is_up_vote == argument.is_supportive)).all()
if (db_new_vote and (db_new_vote in db_votes_for_arg)):
db_votes_for_arg.remove(db_new_vote)
for vote in db_votes_for_arg:
vote.set_valid(False)
DBDiscussionSession.flush()<|docstring|>Check if there is a vote for the argument. If not, we will create a new one, otherwise the current one will be
invalid and we will create a new entry.
:param argument: Argument
:param user: User
:param is_up_vote: Boolean
:return: None<|endoftext|> |
deed80c24b61f94e53306a2d26124e8f539f9aee5b41cc63b775eeb5dd41db36 | def __click_statement(statement: Statement, user: User, is_up_vote: bool):
'\n Check if there is a vote for the statement. If not, we will create a new one, otherwise the current one will be\n invalid and we will create a new entry.\n\n :param statement: Statement\n :param user: User\n :param is_up_vote: Boolean\n :return: None\n '
if (statement is None):
LOG.debug('Statement is None')
return
LOG.debug('Statement %s, db_user %s', statement.uid, user.nickname)
db_all_valid_votes = DBDiscussionSession.query(ClickedStatement).filter((ClickedStatement.statement_uid == statement.uid), (ClickedStatement.author_uid == user.uid), (ClickedStatement.is_valid == True))
db_current_vote = db_all_valid_votes.filter_by(is_up_vote=is_up_vote).first()
db_old_votes = db_all_valid_votes.all()
if (db_current_vote in db_old_votes):
db_old_votes.remove(db_current_vote)
for old_vote in db_old_votes:
LOG.debug('Setting old vote %s as invalid', old_vote.uid)
old_vote.set_valid(False)
DBDiscussionSession.flush()
if (not db_current_vote):
LOG.debug('Add vote for statement %s', statement.uid)
db_new_vote = ClickedStatement(statement=statement, user=user, is_up_vote=is_up_vote, is_valid=True)
DBDiscussionSession.add(db_new_vote)
DBDiscussionSession.flush() | Check if there is a vote for the statement. If not, we will create a new one, otherwise the current one will be
invalid and we will create a new entry.
:param statement: Statement
:param user: User
:param is_up_vote: Boolean
:return: None | dbas/handler/voting.py | __click_statement | hhucn/dbas | 23 | python | def __click_statement(statement: Statement, user: User, is_up_vote: bool):
'\n Check if there is a vote for the statement. If not, we will create a new one, otherwise the current one will be\n invalid and we will create a new entry.\n\n :param statement: Statement\n :param user: User\n :param is_up_vote: Boolean\n :return: None\n '
if (statement is None):
LOG.debug('Statement is None')
return
LOG.debug('Statement %s, db_user %s', statement.uid, user.nickname)
db_all_valid_votes = DBDiscussionSession.query(ClickedStatement).filter((ClickedStatement.statement_uid == statement.uid), (ClickedStatement.author_uid == user.uid), (ClickedStatement.is_valid == True))
db_current_vote = db_all_valid_votes.filter_by(is_up_vote=is_up_vote).first()
db_old_votes = db_all_valid_votes.all()
if (db_current_vote in db_old_votes):
db_old_votes.remove(db_current_vote)
for old_vote in db_old_votes:
LOG.debug('Setting old vote %s as invalid', old_vote.uid)
old_vote.set_valid(False)
DBDiscussionSession.flush()
if (not db_current_vote):
LOG.debug('Add vote for statement %s', statement.uid)
db_new_vote = ClickedStatement(statement=statement, user=user, is_up_vote=is_up_vote, is_valid=True)
DBDiscussionSession.add(db_new_vote)
DBDiscussionSession.flush() | def __click_statement(statement: Statement, user: User, is_up_vote: bool):
'\n Check if there is a vote for the statement. If not, we will create a new one, otherwise the current one will be\n invalid and we will create a new entry.\n\n :param statement: Statement\n :param user: User\n :param is_up_vote: Boolean\n :return: None\n '
if (statement is None):
LOG.debug('Statement is None')
return
LOG.debug('Statement %s, db_user %s', statement.uid, user.nickname)
db_all_valid_votes = DBDiscussionSession.query(ClickedStatement).filter((ClickedStatement.statement_uid == statement.uid), (ClickedStatement.author_uid == user.uid), (ClickedStatement.is_valid == True))
db_current_vote = db_all_valid_votes.filter_by(is_up_vote=is_up_vote).first()
db_old_votes = db_all_valid_votes.all()
if (db_current_vote in db_old_votes):
db_old_votes.remove(db_current_vote)
for old_vote in db_old_votes:
LOG.debug('Setting old vote %s as invalid', old_vote.uid)
old_vote.set_valid(False)
DBDiscussionSession.flush()
if (not db_current_vote):
LOG.debug('Add vote for statement %s', statement.uid)
db_new_vote = ClickedStatement(statement=statement, user=user, is_up_vote=is_up_vote, is_valid=True)
DBDiscussionSession.add(db_new_vote)
DBDiscussionSession.flush()<|docstring|>Check if there is a vote for the statement. If not, we will create a new one, otherwise the current one will be
invalid and we will create a new entry.
:param statement: Statement
:param user: User
:param is_up_vote: Boolean
:return: None<|endoftext|> |
8e9a0a3b6e79bf833d4fbd4b49275e6e1ce5e4e5453e7761530f64a43b7a2e78 | def __vote_premisesgroup(premisegroup_uid, user, is_up_vote):
'\n Calls statements-methods for every premise.\n\n :param premisegroup_uid: PremiseGroup.uid\n :param user: User\n :param is_up_vote: Boolean\n :return:\n '
if ((premisegroup_uid is None) or (premisegroup_uid == 0)):
LOG.debug('Premisegroup_uid is None')
return
LOG.debug('Premisegroup_uid %s, user %s', premisegroup_uid, user.nickname)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=premisegroup_uid).all()
for premise in db_premises:
db_statement = DBDiscussionSession.query(Statement).get(premise.statement_uid)
__click_statement(db_statement, user, is_up_vote) | Calls statements-methods for every premise.
:param premisegroup_uid: PremiseGroup.uid
:param user: User
:param is_up_vote: Boolean
:return: | dbas/handler/voting.py | __vote_premisesgroup | hhucn/dbas | 23 | python | def __vote_premisesgroup(premisegroup_uid, user, is_up_vote):
'\n Calls statements-methods for every premise.\n\n :param premisegroup_uid: PremiseGroup.uid\n :param user: User\n :param is_up_vote: Boolean\n :return:\n '
if ((premisegroup_uid is None) or (premisegroup_uid == 0)):
LOG.debug('Premisegroup_uid is None')
return
LOG.debug('Premisegroup_uid %s, user %s', premisegroup_uid, user.nickname)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=premisegroup_uid).all()
for premise in db_premises:
db_statement = DBDiscussionSession.query(Statement).get(premise.statement_uid)
__click_statement(db_statement, user, is_up_vote) | def __vote_premisesgroup(premisegroup_uid, user, is_up_vote):
'\n Calls statements-methods for every premise.\n\n :param premisegroup_uid: PremiseGroup.uid\n :param user: User\n :param is_up_vote: Boolean\n :return:\n '
if ((premisegroup_uid is None) or (premisegroup_uid == 0)):
LOG.debug('Premisegroup_uid is None')
return
LOG.debug('Premisegroup_uid %s, user %s', premisegroup_uid, user.nickname)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=premisegroup_uid).all()
for premise in db_premises:
db_statement = DBDiscussionSession.query(Statement).get(premise.statement_uid)
__click_statement(db_statement, user, is_up_vote)<|docstring|>Calls statements-methods for every premise.
:param premisegroup_uid: PremiseGroup.uid
:param user: User
:param is_up_vote: Boolean
:return:<|endoftext|> |
981250102bad093f6a17d3ccf8f641b45789bddec959f9cbd4b1c7081f4f5bbd | def __argument_seen_by_user(user: User, argument: Argument):
'\n Adds a reference for a seen argument\n\n :param user: current user\n :param argument: uid of the argument\n :return: True if the argument was not seen by the user (until now), false otherwise\n '
db_seen_by = DBDiscussionSession.query(SeenArgument).filter((SeenArgument.argument_uid == argument.uid), (SeenArgument.user_uid == user.uid)).first()
if (not db_seen_by):
DBDiscussionSession.add(SeenArgument(argument=argument, user=user))
DBDiscussionSession.flush()
return True
return False | Adds a reference for a seen argument
:param user: current user
:param argument: uid of the argument
:return: True if the argument was not seen by the user (until now), false otherwise | dbas/handler/voting.py | __argument_seen_by_user | hhucn/dbas | 23 | python | def __argument_seen_by_user(user: User, argument: Argument):
'\n Adds a reference for a seen argument\n\n :param user: current user\n :param argument: uid of the argument\n :return: True if the argument was not seen by the user (until now), false otherwise\n '
db_seen_by = DBDiscussionSession.query(SeenArgument).filter((SeenArgument.argument_uid == argument.uid), (SeenArgument.user_uid == user.uid)).first()
if (not db_seen_by):
DBDiscussionSession.add(SeenArgument(argument=argument, user=user))
DBDiscussionSession.flush()
return True
return False | def __argument_seen_by_user(user: User, argument: Argument):
'\n Adds a reference for a seen argument\n\n :param user: current user\n :param argument: uid of the argument\n :return: True if the argument was not seen by the user (until now), false otherwise\n '
db_seen_by = DBDiscussionSession.query(SeenArgument).filter((SeenArgument.argument_uid == argument.uid), (SeenArgument.user_uid == user.uid)).first()
if (not db_seen_by):
DBDiscussionSession.add(SeenArgument(argument=argument, user=user))
DBDiscussionSession.flush()
return True
return False<|docstring|>Adds a reference for a seen argument
:param user: current user
:param argument: uid of the argument
:return: True if the argument was not seen by the user (until now), false otherwise<|endoftext|> |
ea299f740343647a65f1c74293e858ef8146d0682ddb26d864451d63a9a76f76 | def __statement_seen_by_user(user: User, statement: Statement):
'\n Adds a reference for a seen statement\n\n :param db_user: current user\n :param statement: uid of the statement\n :return: True if the statement was not seen by the user (until now), false otherwise\n '
db_seen_by = DBDiscussionSession.query(SeenStatement).filter((SeenStatement.statement_uid == statement.uid), (SeenStatement.user_uid == user.uid)).first()
if (not db_seen_by):
DBDiscussionSession.add(SeenStatement(statement=statement, user=user))
DBDiscussionSession.flush()
return True
return False | Adds a reference for a seen statement
:param db_user: current user
:param statement: uid of the statement
:return: True if the statement was not seen by the user (until now), false otherwise | dbas/handler/voting.py | __statement_seen_by_user | hhucn/dbas | 23 | python | def __statement_seen_by_user(user: User, statement: Statement):
'\n Adds a reference for a seen statement\n\n :param db_user: current user\n :param statement: uid of the statement\n :return: True if the statement was not seen by the user (until now), false otherwise\n '
db_seen_by = DBDiscussionSession.query(SeenStatement).filter((SeenStatement.statement_uid == statement.uid), (SeenStatement.user_uid == user.uid)).first()
if (not db_seen_by):
DBDiscussionSession.add(SeenStatement(statement=statement, user=user))
DBDiscussionSession.flush()
return True
return False | def __statement_seen_by_user(user: User, statement: Statement):
'\n Adds a reference for a seen statement\n\n :param db_user: current user\n :param statement: uid of the statement\n :return: True if the statement was not seen by the user (until now), false otherwise\n '
db_seen_by = DBDiscussionSession.query(SeenStatement).filter((SeenStatement.statement_uid == statement.uid), (SeenStatement.user_uid == user.uid)).first()
if (not db_seen_by):
DBDiscussionSession.add(SeenStatement(statement=statement, user=user))
DBDiscussionSession.flush()
return True
return False<|docstring|>Adds a reference for a seen statement
:param db_user: current user
:param statement: uid of the statement
:return: True if the statement was not seen by the user (until now), false otherwise<|endoftext|> |
a57684569d23cf11c07be1863dce69599685a181919a34a7ec6671972d385e64 | def __init__(self, **kwds):
'Initialize the model.\n\n The model is specified for three different distance metrics. However,\n the implementation uses only one distance metric. They are used in\n the following order:\n\n 1. `dist_jb`\n\n 2. `dist_hyp`\n\n 3. `dist_epi`\n\n This order was selected based on evaluation of the total standard\n deviation. To compute the response for differing metrics, call the\n model multiple times with different keywords.\n\n Keyword Args:\n dist_jb (float): Joyner-Boore distance to the rupture plane\n (:math:`R_\\text{JB}`, km)\n\n dist_epi (float): Epicentral distance to the rupture plane\n (:math:`R_\\text{epi}`, km)\n\n dist_hyp (float): Hypocentral distance to the rupture plane\n (:math:`R_\\text{hyp}`, km).\n\n mag (float): moment magnitude of the event (:math:`M_w`)\n\n mechanism (str): fault mechanism. Valid options: "SS", "NS", "RS".\n\n v_s30 (float): time-averaged shear-wave velocity over the top 30 m\n of the site (:math:`V_{s30}`, m/s).\n '
super(AkkarSandikkayaBommer2014, self).__init__(**kwds)
p = self.params
for k in self.COEFF:
if (p[k] is not None):
dist = p[k]
c = self.COEFF[k]
break
else:
raise NotImplementedError('Must provide at least one distance metric.')
ln_resp_ref = ((c.a_1 + (c.a_3 * ((8.5 - p['mag']) ** 2))) + ((c.a_4 + (c.a_5 * (p['mag'] - c.c_1))) * np.log(np.sqrt(((dist ** 2) + (c.a_6 ** 2))))))
mask = (p['mag'] <= c.c_1)
ln_resp_ref[mask] += (c.a_2 * (p['mag'] - c.c_1))[mask]
ln_resp_ref[(~ mask)] += (c.a_7 * (p['mag'] - c.c_1))[(~ mask)]
if (p['mechanism'] == 'NS'):
ln_resp_ref += c.a_8
elif (p['mechanism'] == 'RS'):
ln_resp_ref += c.a_9
pga_ref = np.exp(ln_resp_ref[self.INDEX_PGA])
if (p['v_s30'] <= self.V_REF):
vs_ratio = (p['v_s30'] / self.V_REF)
site = ((c.b_1 * np.log(vs_ratio)) + (c.b_2 * np.log(((pga_ref + (c.c * (vs_ratio ** c.n))) / ((pga_ref + c.c) * (vs_ratio ** c.n))))))
else:
site = (c.b_1 * np.log((np.minimum(p['v_s30'], c.v_con) / self.V_REF)))
self._ln_resp = (ln_resp_ref + site)
self._ln_std = np.array(c.sd_total) | Initialize the model.
The model is specified for three different distance metrics. However,
the implementation uses only one distance metric. They are used in
the following order:
1. `dist_jb`
2. `dist_hyp`
3. `dist_epi`
This order was selected based on evaluation of the total standard
deviation. To compute the response for differing metrics, call the
model multiple times with different keywords.
Keyword Args:
dist_jb (float): Joyner-Boore distance to the rupture plane
(:math:`R_\text{JB}`, km)
dist_epi (float): Epicentral distance to the rupture plane
(:math:`R_\text{epi}`, km)
dist_hyp (float): Hypocentral distance to the rupture plane
(:math:`R_\text{hyp}`, km).
mag (float): moment magnitude of the event (:math:`M_w`)
mechanism (str): fault mechanism. Valid options: "SS", "NS", "RS".
v_s30 (float): time-averaged shear-wave velocity over the top 30 m
of the site (:math:`V_{s30}`, m/s). | pygmm/akkar_sandikkaya_bommer_2014.py | __init__ | nassermarafi/pygmm | 0 | python | def __init__(self, **kwds):
'Initialize the model.\n\n The model is specified for three different distance metrics. However,\n the implementation uses only one distance metric. They are used in\n the following order:\n\n 1. `dist_jb`\n\n 2. `dist_hyp`\n\n 3. `dist_epi`\n\n This order was selected based on evaluation of the total standard\n deviation. To compute the response for differing metrics, call the\n model multiple times with different keywords.\n\n Keyword Args:\n dist_jb (float): Joyner-Boore distance to the rupture plane\n (:math:`R_\\text{JB}`, km)\n\n dist_epi (float): Epicentral distance to the rupture plane\n (:math:`R_\\text{epi}`, km)\n\n dist_hyp (float): Hypocentral distance to the rupture plane\n (:math:`R_\\text{hyp}`, km).\n\n mag (float): moment magnitude of the event (:math:`M_w`)\n\n mechanism (str): fault mechanism. Valid options: "SS", "NS", "RS".\n\n v_s30 (float): time-averaged shear-wave velocity over the top 30 m\n of the site (:math:`V_{s30}`, m/s).\n '
super(AkkarSandikkayaBommer2014, self).__init__(**kwds)
p = self.params
for k in self.COEFF:
if (p[k] is not None):
dist = p[k]
c = self.COEFF[k]
break
else:
raise NotImplementedError('Must provide at least one distance metric.')
ln_resp_ref = ((c.a_1 + (c.a_3 * ((8.5 - p['mag']) ** 2))) + ((c.a_4 + (c.a_5 * (p['mag'] - c.c_1))) * np.log(np.sqrt(((dist ** 2) + (c.a_6 ** 2))))))
mask = (p['mag'] <= c.c_1)
ln_resp_ref[mask] += (c.a_2 * (p['mag'] - c.c_1))[mask]
ln_resp_ref[(~ mask)] += (c.a_7 * (p['mag'] - c.c_1))[(~ mask)]
if (p['mechanism'] == 'NS'):
ln_resp_ref += c.a_8
elif (p['mechanism'] == 'RS'):
ln_resp_ref += c.a_9
pga_ref = np.exp(ln_resp_ref[self.INDEX_PGA])
if (p['v_s30'] <= self.V_REF):
vs_ratio = (p['v_s30'] / self.V_REF)
site = ((c.b_1 * np.log(vs_ratio)) + (c.b_2 * np.log(((pga_ref + (c.c * (vs_ratio ** c.n))) / ((pga_ref + c.c) * (vs_ratio ** c.n))))))
else:
site = (c.b_1 * np.log((np.minimum(p['v_s30'], c.v_con) / self.V_REF)))
self._ln_resp = (ln_resp_ref + site)
self._ln_std = np.array(c.sd_total) | def __init__(self, **kwds):
'Initialize the model.\n\n The model is specified for three different distance metrics. However,\n the implementation uses only one distance metric. They are used in\n the following order:\n\n 1. `dist_jb`\n\n 2. `dist_hyp`\n\n 3. `dist_epi`\n\n This order was selected based on evaluation of the total standard\n deviation. To compute the response for differing metrics, call the\n model multiple times with different keywords.\n\n Keyword Args:\n dist_jb (float): Joyner-Boore distance to the rupture plane\n (:math:`R_\\text{JB}`, km)\n\n dist_epi (float): Epicentral distance to the rupture plane\n (:math:`R_\\text{epi}`, km)\n\n dist_hyp (float): Hypocentral distance to the rupture plane\n (:math:`R_\\text{hyp}`, km).\n\n mag (float): moment magnitude of the event (:math:`M_w`)\n\n mechanism (str): fault mechanism. Valid options: "SS", "NS", "RS".\n\n v_s30 (float): time-averaged shear-wave velocity over the top 30 m\n of the site (:math:`V_{s30}`, m/s).\n '
super(AkkarSandikkayaBommer2014, self).__init__(**kwds)
p = self.params
for k in self.COEFF:
if (p[k] is not None):
dist = p[k]
c = self.COEFF[k]
break
else:
raise NotImplementedError('Must provide at least one distance metric.')
ln_resp_ref = ((c.a_1 + (c.a_3 * ((8.5 - p['mag']) ** 2))) + ((c.a_4 + (c.a_5 * (p['mag'] - c.c_1))) * np.log(np.sqrt(((dist ** 2) + (c.a_6 ** 2))))))
mask = (p['mag'] <= c.c_1)
ln_resp_ref[mask] += (c.a_2 * (p['mag'] - c.c_1))[mask]
ln_resp_ref[(~ mask)] += (c.a_7 * (p['mag'] - c.c_1))[(~ mask)]
if (p['mechanism'] == 'NS'):
ln_resp_ref += c.a_8
elif (p['mechanism'] == 'RS'):
ln_resp_ref += c.a_9
pga_ref = np.exp(ln_resp_ref[self.INDEX_PGA])
if (p['v_s30'] <= self.V_REF):
vs_ratio = (p['v_s30'] / self.V_REF)
site = ((c.b_1 * np.log(vs_ratio)) + (c.b_2 * np.log(((pga_ref + (c.c * (vs_ratio ** c.n))) / ((pga_ref + c.c) * (vs_ratio ** c.n))))))
else:
site = (c.b_1 * np.log((np.minimum(p['v_s30'], c.v_con) / self.V_REF)))
self._ln_resp = (ln_resp_ref + site)
self._ln_std = np.array(c.sd_total)<|docstring|>Initialize the model.
The model is specified for three different distance metrics. However,
the implementation uses only one distance metric. They are used in
the following order:
1. `dist_jb`
2. `dist_hyp`
3. `dist_epi`
This order was selected based on evaluation of the total standard
deviation. To compute the response for differing metrics, call the
model multiple times with different keywords.
Keyword Args:
dist_jb (float): Joyner-Boore distance to the rupture plane
(:math:`R_\text{JB}`, km)
dist_epi (float): Epicentral distance to the rupture plane
(:math:`R_\text{epi}`, km)
dist_hyp (float): Hypocentral distance to the rupture plane
(:math:`R_\text{hyp}`, km).
mag (float): moment magnitude of the event (:math:`M_w`)
mechanism (str): fault mechanism. Valid options: "SS", "NS", "RS".
v_s30 (float): time-averaged shear-wave velocity over the top 30 m
of the site (:math:`V_{s30}`, m/s).<|endoftext|> |
f57f6b0ad337f2f3d3d56abb63ec29bbcdf04b9854d15389e1230d6ad0ea186d | @classmethod
def from_json(cls, json_filename: str):
'Construct the configuration from a json file.'
with open(json_filename, 'r') as f:
config_map = json.load(f)
config = CONFIG_CLASS[config_map['config_class']].from_dict(config_map)
return config | Construct the configuration from a json file. | src/textpruner/configurations.py | from_json | techthiyanes/TextPruner | 133 | python | @classmethod
def from_json(cls, json_filename: str):
with open(json_filename, 'r') as f:
config_map = json.load(f)
config = CONFIG_CLASS[config_map['config_class']].from_dict(config_map)
return config | @classmethod
def from_json(cls, json_filename: str):
with open(json_filename, 'r') as f:
config_map = json.load(f)
config = CONFIG_CLASS[config_map['config_class']].from_dict(config_map)
return config<|docstring|>Construct the configuration from a json file.<|endoftext|> |
3752672f4f3d93185564e5823328fd397e2809e66ed9eb277930988f070633f3 | @classmethod
def from_dict(cls, config_map: dict):
'Construct the configuration from a dict.'
config = CONFIG_CLASS[config_map['config_class']](**config_map)
return config | Construct the configuration from a dict. | src/textpruner/configurations.py | from_dict | techthiyanes/TextPruner | 133 | python | @classmethod
def from_dict(cls, config_map: dict):
config = CONFIG_CLASS[config_map['config_class']](**config_map)
return config | @classmethod
def from_dict(cls, config_map: dict):
config = CONFIG_CLASS[config_map['config_class']](**config_map)
return config<|docstring|>Construct the configuration from a dict.<|endoftext|> |
493804877287f85c4e6455ddaf62b35e636b5f592538b8cf1d0b61269709c054 | def save_to_json(self, json_filename: str):
'Save the configuration the a json file.'
config_map = asdict(self)
with open(json_filename, 'w') as f:
json.dump(config_map, f, indent=2) | Save the configuration the a json file. | src/textpruner/configurations.py | save_to_json | techthiyanes/TextPruner | 133 | python | def save_to_json(self, json_filename: str):
config_map = asdict(self)
with open(json_filename, 'w') as f:
json.dump(config_map, f, indent=2) | def save_to_json(self, json_filename: str):
config_map = asdict(self)
with open(json_filename, 'w') as f:
json.dump(config_map, f, indent=2)<|docstring|>Save the configuration the a json file.<|endoftext|> |
32a83f5a7bec74f857bb90697d733b64ab5415e7b22f153f96653f72ad9b58e0 | def make_chem_pot_diag_from_mp(target: Union[(Composition, str)], additional_elements: List[str]=None, vertex_elements: List[str]=None, atom_energy_yaml: Optional[str]=None):
'Obtain the energies from Materials Project.'
properties = ['task_id', 'full_formula', 'final_energy']
target = (target if isinstance(target, Composition) else Composition(target))
elements = target.chemical_system.split('-')
vertex_elements = (vertex_elements or elements)
vertex_elements = [Element(e) for e in vertex_elements]
if additional_elements:
elements.extend(additional_elements)
query = MpQuery(elements, properties=properties)
comp_es = []
if atom_energy_yaml:
if ('.yaml' in atom_energy_yaml):
energies = loadfn(atom_energy_yaml)
else:
logger.info(f'Atom energy set for {atom_energy_yaml} is used.')
energies = AtomEnergyType.from_string(atom_energy_yaml).energies
diff = {e: (energies[e] - mp_energies[e]) for e in elements}
else:
diff = None
for m in query.materials:
energy = m['final_energy']
if diff:
for (k, v) in Composition(m['full_formula']).as_dict().items():
energy += (diff[k] * v)
comp_es.append(CompositionEnergy(Composition(m['full_formula']), energy, m['task_id']))
comp_es = remove_higher_energy_comp(comp_es)
return ChemPotDiag(comp_es, target, vertex_elements) | Obtain the energies from Materials Project. | pydefect/chem_pot_diag/make_chem_pot_diag.py | make_chem_pot_diag_from_mp | KazMorita/pydefect | 1 | python | def make_chem_pot_diag_from_mp(target: Union[(Composition, str)], additional_elements: List[str]=None, vertex_elements: List[str]=None, atom_energy_yaml: Optional[str]=None):
properties = ['task_id', 'full_formula', 'final_energy']
target = (target if isinstance(target, Composition) else Composition(target))
elements = target.chemical_system.split('-')
vertex_elements = (vertex_elements or elements)
vertex_elements = [Element(e) for e in vertex_elements]
if additional_elements:
elements.extend(additional_elements)
query = MpQuery(elements, properties=properties)
comp_es = []
if atom_energy_yaml:
if ('.yaml' in atom_energy_yaml):
energies = loadfn(atom_energy_yaml)
else:
logger.info(f'Atom energy set for {atom_energy_yaml} is used.')
energies = AtomEnergyType.from_string(atom_energy_yaml).energies
diff = {e: (energies[e] - mp_energies[e]) for e in elements}
else:
diff = None
for m in query.materials:
energy = m['final_energy']
if diff:
for (k, v) in Composition(m['full_formula']).as_dict().items():
energy += (diff[k] * v)
comp_es.append(CompositionEnergy(Composition(m['full_formula']), energy, m['task_id']))
comp_es = remove_higher_energy_comp(comp_es)
return ChemPotDiag(comp_es, target, vertex_elements) | def make_chem_pot_diag_from_mp(target: Union[(Composition, str)], additional_elements: List[str]=None, vertex_elements: List[str]=None, atom_energy_yaml: Optional[str]=None):
properties = ['task_id', 'full_formula', 'final_energy']
target = (target if isinstance(target, Composition) else Composition(target))
elements = target.chemical_system.split('-')
vertex_elements = (vertex_elements or elements)
vertex_elements = [Element(e) for e in vertex_elements]
if additional_elements:
elements.extend(additional_elements)
query = MpQuery(elements, properties=properties)
comp_es = []
if atom_energy_yaml:
if ('.yaml' in atom_energy_yaml):
energies = loadfn(atom_energy_yaml)
else:
logger.info(f'Atom energy set for {atom_energy_yaml} is used.')
energies = AtomEnergyType.from_string(atom_energy_yaml).energies
diff = {e: (energies[e] - mp_energies[e]) for e in elements}
else:
diff = None
for m in query.materials:
energy = m['final_energy']
if diff:
for (k, v) in Composition(m['full_formula']).as_dict().items():
energy += (diff[k] * v)
comp_es.append(CompositionEnergy(Composition(m['full_formula']), energy, m['task_id']))
comp_es = remove_higher_energy_comp(comp_es)
return ChemPotDiag(comp_es, target, vertex_elements)<|docstring|>Obtain the energies from Materials Project.<|endoftext|> |
4c2fad9325269ef2407718c1e628d88152be54761139ce388f698d35ea0f64f2 | @staticmethod
def print_progress(iteration, total, prefix='Progress:', suffix='Complete', decimals=1, barLength=70):
'\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n barLength - Optional : character length of bar (Int)\n '
format_str = (('{0:.' + str(decimals)) + 'f}')
percent = format_str.format((100 * (iteration / float(total))))
filled_length = int(round(((barLength * iteration) / float(total))))
bar = (('█' * filled_length) + ('-' * (barLength - filled_length)))
(sys.stdout.write(('\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix))),)
if (iteration == total):
sys.stdout.write('\n')
sys.stdout.flush() | Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int) | SE4TeC_demo/utils/use_utils.py | print_progress | JingweiZuo/SE2TeC | 1 | python | @staticmethod
def print_progress(iteration, total, prefix='Progress:', suffix='Complete', decimals=1, barLength=70):
'\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n barLength - Optional : character length of bar (Int)\n '
format_str = (('{0:.' + str(decimals)) + 'f}')
percent = format_str.format((100 * (iteration / float(total))))
filled_length = int(round(((barLength * iteration) / float(total))))
bar = (('█' * filled_length) + ('-' * (barLength - filled_length)))
(sys.stdout.write(('\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix))),)
if (iteration == total):
sys.stdout.write('\n')
sys.stdout.flush() | @staticmethod
def print_progress(iteration, total, prefix='Progress:', suffix='Complete', decimals=1, barLength=70):
'\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n barLength - Optional : character length of bar (Int)\n '
format_str = (('{0:.' + str(decimals)) + 'f}')
percent = format_str.format((100 * (iteration / float(total))))
filled_length = int(round(((barLength * iteration) / float(total))))
bar = (('█' * filled_length) + ('-' * (barLength - filled_length)))
(sys.stdout.write(('\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix))),)
if (iteration == total):
sys.stdout.write('\n')
sys.stdout.flush()<|docstring|>Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)<|endoftext|> |
d0b7808b0dbbf3b81cc6813e3916dd474d3f1f9fdad33eb95096de17b97df73a | def __init__(self, rephrased_option_index=None, rephrased_sentence_text=None):
'RephrasedSentenceOption - a model defined in Swagger'
self._rephrased_option_index = None
self._rephrased_sentence_text = None
self.discriminator = None
if (rephrased_option_index is not None):
self.rephrased_option_index = rephrased_option_index
if (rephrased_sentence_text is not None):
self.rephrased_sentence_text = rephrased_sentence_text | RephrasedSentenceOption - a model defined in Swagger | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | __init__ | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | def __init__(self, rephrased_option_index=None, rephrased_sentence_text=None):
self._rephrased_option_index = None
self._rephrased_sentence_text = None
self.discriminator = None
if (rephrased_option_index is not None):
self.rephrased_option_index = rephrased_option_index
if (rephrased_sentence_text is not None):
self.rephrased_sentence_text = rephrased_sentence_text | def __init__(self, rephrased_option_index=None, rephrased_sentence_text=None):
self._rephrased_option_index = None
self._rephrased_sentence_text = None
self.discriminator = None
if (rephrased_option_index is not None):
self.rephrased_option_index = rephrased_option_index
if (rephrased_sentence_text is not None):
self.rephrased_sentence_text = rephrased_sentence_text<|docstring|>RephrasedSentenceOption - a model defined in Swagger<|endoftext|> |
962a4182c704f490442cf9c6e61463b6db2a4520d6819dc2a6671b7126dceaf0 | @property
def rephrased_option_index(self):
'Gets the rephrased_option_index of this RephrasedSentenceOption. # noqa: E501\n\n Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501\n\n :return: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501\n :rtype: int\n '
return self._rephrased_option_index | Gets the rephrased_option_index of this RephrasedSentenceOption. # noqa: E501
Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501
:return: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501
:rtype: int | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | rephrased_option_index | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | @property
def rephrased_option_index(self):
'Gets the rephrased_option_index of this RephrasedSentenceOption. # noqa: E501\n\n Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501\n\n :return: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501\n :rtype: int\n '
return self._rephrased_option_index | @property
def rephrased_option_index(self):
'Gets the rephrased_option_index of this RephrasedSentenceOption. # noqa: E501\n\n Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501\n\n :return: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501\n :rtype: int\n '
return self._rephrased_option_index<|docstring|>Gets the rephrased_option_index of this RephrasedSentenceOption. # noqa: E501
Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501
:return: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501
:rtype: int<|endoftext|> |
e6c1bf16c7bd2591ac242597d53cf609c14fc821cc568baefa6fdb9747e860f2 | @rephrased_option_index.setter
def rephrased_option_index(self, rephrased_option_index):
'Sets the rephrased_option_index of this RephrasedSentenceOption.\n\n Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501\n\n :param rephrased_option_index: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501\n :type: int\n '
self._rephrased_option_index = rephrased_option_index | Sets the rephrased_option_index of this RephrasedSentenceOption.
Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501
:param rephrased_option_index: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501
:type: int | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | rephrased_option_index | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | @rephrased_option_index.setter
def rephrased_option_index(self, rephrased_option_index):
'Sets the rephrased_option_index of this RephrasedSentenceOption.\n\n Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501\n\n :param rephrased_option_index: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501\n :type: int\n '
self._rephrased_option_index = rephrased_option_index | @rephrased_option_index.setter
def rephrased_option_index(self, rephrased_option_index):
'Sets the rephrased_option_index of this RephrasedSentenceOption.\n\n Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501\n\n :param rephrased_option_index: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501\n :type: int\n '
self._rephrased_option_index = rephrased_option_index<|docstring|>Sets the rephrased_option_index of this RephrasedSentenceOption.
Ordered index of the rephrasing option, 1-based, with 1 being the best option # noqa: E501
:param rephrased_option_index: The rephrased_option_index of this RephrasedSentenceOption. # noqa: E501
:type: int<|endoftext|> |
aaca88cfae24cabcae5fc6de87d1abc9dc593d839143f09aa85213760226b8fe | @property
def rephrased_sentence_text(self):
'Gets the rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501\n\n One sentence of output rephrased text of original input sentence # noqa: E501\n\n :return: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501\n :rtype: str\n '
return self._rephrased_sentence_text | Gets the rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501
One sentence of output rephrased text of original input sentence # noqa: E501
:return: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501
:rtype: str | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | rephrased_sentence_text | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | @property
def rephrased_sentence_text(self):
'Gets the rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501\n\n One sentence of output rephrased text of original input sentence # noqa: E501\n\n :return: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501\n :rtype: str\n '
return self._rephrased_sentence_text | @property
def rephrased_sentence_text(self):
'Gets the rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501\n\n One sentence of output rephrased text of original input sentence # noqa: E501\n\n :return: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501\n :rtype: str\n '
return self._rephrased_sentence_text<|docstring|>Gets the rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501
One sentence of output rephrased text of original input sentence # noqa: E501
:return: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501
:rtype: str<|endoftext|> |
ca652bc1d43a3f8d74d5bcb52ead9539fef0db1c583c180084b936b052513a12 | @rephrased_sentence_text.setter
def rephrased_sentence_text(self, rephrased_sentence_text):
'Sets the rephrased_sentence_text of this RephrasedSentenceOption.\n\n One sentence of output rephrased text of original input sentence # noqa: E501\n\n :param rephrased_sentence_text: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501\n :type: str\n '
self._rephrased_sentence_text = rephrased_sentence_text | Sets the rephrased_sentence_text of this RephrasedSentenceOption.
One sentence of output rephrased text of original input sentence # noqa: E501
:param rephrased_sentence_text: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501
:type: str | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | rephrased_sentence_text | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | @rephrased_sentence_text.setter
def rephrased_sentence_text(self, rephrased_sentence_text):
'Sets the rephrased_sentence_text of this RephrasedSentenceOption.\n\n One sentence of output rephrased text of original input sentence # noqa: E501\n\n :param rephrased_sentence_text: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501\n :type: str\n '
self._rephrased_sentence_text = rephrased_sentence_text | @rephrased_sentence_text.setter
def rephrased_sentence_text(self, rephrased_sentence_text):
'Sets the rephrased_sentence_text of this RephrasedSentenceOption.\n\n One sentence of output rephrased text of original input sentence # noqa: E501\n\n :param rephrased_sentence_text: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501\n :type: str\n '
self._rephrased_sentence_text = rephrased_sentence_text<|docstring|>Sets the rephrased_sentence_text of this RephrasedSentenceOption.
One sentence of output rephrased text of original input sentence # noqa: E501
:param rephrased_sentence_text: The rephrased_sentence_text of this RephrasedSentenceOption. # noqa: E501
:type: str<|endoftext|> |
a51ca593d3ec1c8fed31a7b68b9ad2869152e5d41c98e64c197d46cac4967b57 | def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(RephrasedSentenceOption, dict):
for (key, value) in self.items():
result[key] = value
return result | Returns the model properties as a dict | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | to_dict | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(RephrasedSentenceOption, dict):
for (key, value) in self.items():
result[key] = value
return result | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(RephrasedSentenceOption, dict):
for (key, value) in self.items():
result[key] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|> |
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99 | def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | Returns the string representation of the model | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | to_str | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | def to_str(self):
return pprint.pformat(self.to_dict()) | def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|> |
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703 | def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | For `print` and `pprint` | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | __repr__ | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | def __repr__(self):
return self.to_str() | def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|> |
80a4350fbae9df5057d525f810e7200103593f8b606b44ef61bfd9bf0f9df326 | def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, RephrasedSentenceOption)):
return False
return (self.__dict__ == other.__dict__) | Returns true if both objects are equal | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | __eq__ | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | def __eq__(self, other):
if (not isinstance(other, RephrasedSentenceOption)):
return False
return (self.__dict__ == other.__dict__) | def __eq__(self, other):
if (not isinstance(other, RephrasedSentenceOption)):
return False
return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|> |
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42 | def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | Returns true if both objects are not equal | cloudmersive_nlp_api_client/models/rephrased_sentence_option.py | __ne__ | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | python | def __ne__(self, other):
return (not (self == other)) | def __ne__(self, other):
return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|> |
cd01c238595bde18cc6e3a4ae4d56a392e683e61a58c7c7a65a49dd8f548d00c | def join_df_list(df_list):
'\n Performs the merge_asof in a list of dfs\n\n :param df_list: list of data frames\n :type df_list: [pd.DataFrame]\n :return: merged dataframe\n :rtype: pd.DataFrame\n '
size = len(df_list)
df = pd.merge(df_list[0], df_list[1], left_index=True, right_index=True)
for i in range(2, size):
df = pd.merge_asof(df, df_list[i], left_index=True, right_index=True)
return df | Performs the merge_asof in a list of dfs
:param df_list: list of data frames
:type df_list: [pd.DataFrame]
:return: merged dataframe
:rtype: pd.DataFrame | src/lr/analysis/util.py | join_df_list | lukewanless/looking-for-equivalences | 0 | python | def join_df_list(df_list):
'\n Performs the merge_asof in a list of dfs\n\n :param df_list: list of data frames\n :type df_list: [pd.DataFrame]\n :return: merged dataframe\n :rtype: pd.DataFrame\n '
size = len(df_list)
df = pd.merge(df_list[0], df_list[1], left_index=True, right_index=True)
for i in range(2, size):
df = pd.merge_asof(df, df_list[i], left_index=True, right_index=True)
return df | def join_df_list(df_list):
'\n Performs the merge_asof in a list of dfs\n\n :param df_list: list of data frames\n :type df_list: [pd.DataFrame]\n :return: merged dataframe\n :rtype: pd.DataFrame\n '
size = len(df_list)
df = pd.merge(df_list[0], df_list[1], left_index=True, right_index=True)
for i in range(2, size):
df = pd.merge_asof(df, df_list[i], left_index=True, right_index=True)
return df<|docstring|>Performs the merge_asof in a list of dfs
:param df_list: list of data frames
:type df_list: [pd.DataFrame]
:return: merged dataframe
:rtype: pd.DataFrame<|endoftext|> |
c3d19fed5b2f48f6c39f72d5dba7b317222833ac156786232a5f35829cff4a33 | def get(isamAppliance, check_mode=False, force=False):
'\n Retrieve runtime component status\n '
requires_model = None
return isamAppliance.invoke_get('Retrieving web runtime component status', '/isam/runtime_components/', requires_model=requires_model) | Retrieve runtime component status | ibmsecurity/isam/web/runtime/process.py | get | iyartsev/ibmsecurity | 46 | python | def get(isamAppliance, check_mode=False, force=False):
'\n \n '
requires_model = None
return isamAppliance.invoke_get('Retrieving web runtime component status', '/isam/runtime_components/', requires_model=requires_model) | def get(isamAppliance, check_mode=False, force=False):
'\n \n '
requires_model = None
return isamAppliance.invoke_get('Retrieving web runtime component status', '/isam/runtime_components/', requires_model=requires_model)<|docstring|>Retrieve runtime component status<|endoftext|> |
d7fa567af635eeaac1aa6c5edbf53c9aec4d0861aaa9168eb86c85de1f896d4b | def _check(isamAppliance):
'\n Check if the runtime process is configured or not\n :param isamAppliance:\n :return:\n '
ret_obj = get(isamAppliance)
(check_value, warnings) = (False, ret_obj['warnings'])
if (warnings == []):
if (ret_obj['data']['modecode'] == '-1'):
check_value = False
return (check_value, warnings)
else:
check_value = True
return (check_value, warnings)
else:
return (check_value, warnings) | Check if the runtime process is configured or not
:param isamAppliance:
:return: | ibmsecurity/isam/web/runtime/process.py | _check | iyartsev/ibmsecurity | 46 | python | def _check(isamAppliance):
'\n Check if the runtime process is configured or not\n :param isamAppliance:\n :return:\n '
ret_obj = get(isamAppliance)
(check_value, warnings) = (False, ret_obj['warnings'])
if (warnings == []):
if (ret_obj['data']['modecode'] == '-1'):
check_value = False
return (check_value, warnings)
else:
check_value = True
return (check_value, warnings)
else:
return (check_value, warnings) | def _check(isamAppliance):
'\n Check if the runtime process is configured or not\n :param isamAppliance:\n :return:\n '
ret_obj = get(isamAppliance)
(check_value, warnings) = (False, ret_obj['warnings'])
if (warnings == []):
if (ret_obj['data']['modecode'] == '-1'):
check_value = False
return (check_value, warnings)
else:
check_value = True
return (check_value, warnings)
else:
return (check_value, warnings)<|docstring|>Check if the runtime process is configured or not
:param isamAppliance:
:return:<|endoftext|> |
f084fdf5b3b2449d020b360fc7381b9da090738da2f84b2679b08edf5afe60b6 | def config(isamAppliance, admin_pwd, ps_mode='local', user_registry='local', ldap_host=None, ldap_port=None, ldap_dn=None, ldap_pwd=None, ldap_ssl_db=None, ldap_ssl_label=None, ldap_suffix=None, clean_ldap=False, domain='Default', admin_cert_lifetime='1460', ssl_compliance='none', isam_host=None, isam_port='7135', check_mode=False, force=False):
'\n Configure Runtime Component\n :param isamAppliance:\n :return:\n '
requires_model = None
(check_value, warnings) = _check(isamAppliance)
if ((force is True) or (check_value is False)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post('Configure web runtime Component', '/isam/runtime_components/', {'ps_mode': ps_mode, 'user_registry': user_registry, 'ldap_host': ldap_host, 'ldap_port': ldap_port, 'ldap_dn': ldap_dn, 'ldap_pwd': ldap_pwd, 'ldap_ssl_db': ldap_ssl_db, 'ldap_ssl_label': ldap_ssl_label, 'ldap_suffix': ldap_suffix, 'clean_ldap': clean_ldap, 'domain': domain, 'admin_pwd': admin_pwd, 'admin_cert_lifetime': admin_cert_lifetime, 'ssl_compliance': ssl_compliance, 'isam_host': isam_host, 'isam_port': isam_port}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings) | Configure Runtime Component
:param isamAppliance:
:return: | ibmsecurity/isam/web/runtime/process.py | config | iyartsev/ibmsecurity | 46 | python | def config(isamAppliance, admin_pwd, ps_mode='local', user_registry='local', ldap_host=None, ldap_port=None, ldap_dn=None, ldap_pwd=None, ldap_ssl_db=None, ldap_ssl_label=None, ldap_suffix=None, clean_ldap=False, domain='Default', admin_cert_lifetime='1460', ssl_compliance='none', isam_host=None, isam_port='7135', check_mode=False, force=False):
'\n Configure Runtime Component\n :param isamAppliance:\n :return:\n '
requires_model = None
(check_value, warnings) = _check(isamAppliance)
if ((force is True) or (check_value is False)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post('Configure web runtime Component', '/isam/runtime_components/', {'ps_mode': ps_mode, 'user_registry': user_registry, 'ldap_host': ldap_host, 'ldap_port': ldap_port, 'ldap_dn': ldap_dn, 'ldap_pwd': ldap_pwd, 'ldap_ssl_db': ldap_ssl_db, 'ldap_ssl_label': ldap_ssl_label, 'ldap_suffix': ldap_suffix, 'clean_ldap': clean_ldap, 'domain': domain, 'admin_pwd': admin_pwd, 'admin_cert_lifetime': admin_cert_lifetime, 'ssl_compliance': ssl_compliance, 'isam_host': isam_host, 'isam_port': isam_port}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings) | def config(isamAppliance, admin_pwd, ps_mode='local', user_registry='local', ldap_host=None, ldap_port=None, ldap_dn=None, ldap_pwd=None, ldap_ssl_db=None, ldap_ssl_label=None, ldap_suffix=None, clean_ldap=False, domain='Default', admin_cert_lifetime='1460', ssl_compliance='none', isam_host=None, isam_port='7135', check_mode=False, force=False):
'\n Configure Runtime Component\n :param isamAppliance:\n :return:\n '
requires_model = None
(check_value, warnings) = _check(isamAppliance)
if ((force is True) or (check_value is False)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post('Configure web runtime Component', '/isam/runtime_components/', {'ps_mode': ps_mode, 'user_registry': user_registry, 'ldap_host': ldap_host, 'ldap_port': ldap_port, 'ldap_dn': ldap_dn, 'ldap_pwd': ldap_pwd, 'ldap_ssl_db': ldap_ssl_db, 'ldap_ssl_label': ldap_ssl_label, 'ldap_suffix': ldap_suffix, 'clean_ldap': clean_ldap, 'domain': domain, 'admin_pwd': admin_pwd, 'admin_cert_lifetime': admin_cert_lifetime, 'ssl_compliance': ssl_compliance, 'isam_host': isam_host, 'isam_port': isam_port}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings)<|docstring|>Configure Runtime Component
:param isamAppliance:
:return:<|endoftext|> |
dfb25ff0f98f117872fc1157e5c5b4759d14d1b825fb5cbcbd141e4f56de5cf8 | def unconfig(isamAppliance, clean=False, ldap_dn=None, ldap_pwd=None, check_mode=False, force=False):
'\n Unconfigure existing runtime component\n '
requires_model = None
(check_value, warnings) = _check(isamAppliance)
if ((force is True) or (check_value is True)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put('Unconfigure web runtime component', '/isam/runtime_components/RTE', {'operation': 'unconfigure', 'force': force, 'clean': clean, 'ldap_dn': ldap_dn, 'ldap_pwd': ldap_pwd}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings) | Unconfigure existing runtime component | ibmsecurity/isam/web/runtime/process.py | unconfig | iyartsev/ibmsecurity | 46 | python | def unconfig(isamAppliance, clean=False, ldap_dn=None, ldap_pwd=None, check_mode=False, force=False):
'\n \n '
requires_model = None
(check_value, warnings) = _check(isamAppliance)
if ((force is True) or (check_value is True)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put('Unconfigure web runtime component', '/isam/runtime_components/RTE', {'operation': 'unconfigure', 'force': force, 'clean': clean, 'ldap_dn': ldap_dn, 'ldap_pwd': ldap_pwd}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings) | def unconfig(isamAppliance, clean=False, ldap_dn=None, ldap_pwd=None, check_mode=False, force=False):
'\n \n '
requires_model = None
(check_value, warnings) = _check(isamAppliance)
if ((force is True) or (check_value is True)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put('Unconfigure web runtime component', '/isam/runtime_components/RTE', {'operation': 'unconfigure', 'force': force, 'clean': clean, 'ldap_dn': ldap_dn, 'ldap_pwd': ldap_pwd}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings)<|docstring|>Unconfigure existing runtime component<|endoftext|> |
36756775e2b1eff2ea4d77aed149fd2822b91ca3febc3184d65a25b2713b42f8 | def import_config(isamAppliance, migrate_file, check_mode=False, force=False):
'\n Import or migrate runtime component\n '
(check_value, warnings) = _check(isamAppliance)
if ((force is True) or (check_value is True)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post_files('Import or Migrate web runtime component', '/isam/runtime_components/', [{'file_formfield': 'migrate_file', 'filename': migrate_file, 'mimetype': 'application/octet-stream'}], {}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings) | Import or migrate runtime component | ibmsecurity/isam/web/runtime/process.py | import_config | iyartsev/ibmsecurity | 46 | python | def import_config(isamAppliance, migrate_file, check_mode=False, force=False):
'\n \n '
(check_value, warnings) = _check(isamAppliance)
if ((force is True) or (check_value is True)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post_files('Import or Migrate web runtime component', '/isam/runtime_components/', [{'file_formfield': 'migrate_file', 'filename': migrate_file, 'mimetype': 'application/octet-stream'}], {}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings) | def import_config(isamAppliance, migrate_file, check_mode=False, force=False):
'\n \n '
(check_value, warnings) = _check(isamAppliance)
if ((force is True) or (check_value is True)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post_files('Import or Migrate web runtime component', '/isam/runtime_components/', [{'file_formfield': 'migrate_file', 'filename': migrate_file, 'mimetype': 'application/octet-stream'}], {}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings)<|docstring|>Import or migrate runtime component<|endoftext|> |
2e837231293dbc396c4916ffc0fde58524682590391344d1f9a02145b054f4f3 | def execute(isamAppliance, operation='restart', check_mode=False, force=False):
'\n Execute an operation on runtime component\n\n :param isamAppliance:\n :param operation:\n :return:\n '
(check_value, warnings) = _check(isamAppliance)
if (force is False):
if (warnings == []):
ret_obj = get(isamAppliance)
if (ret_obj['data']['statuscode'] == '1'):
logger.info('ISAM web runtime is unconfigured.')
return isamAppliance.create_return_object()
if ((ret_obj['data']['statuscode'] == '0') and (operation == 'start')):
logger.info('ISAM web runtime is already started.')
return isamAppliance.create_return_object()
if ((ret_obj['data']['statuscode'] == '2') and (operation == 'stop')):
logger.info('ISAM web runtime is already stopped.')
return isamAppliance.create_return_object()
else:
return isamAppliance.create_return_object(warnings=warnings)
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put('Execute an operation on web runtime component', '/isam/runtime_components/', {'operation': operation}, requires_model=requires_model) | Execute an operation on runtime component
:param isamAppliance:
:param operation:
:return: | ibmsecurity/isam/web/runtime/process.py | execute | iyartsev/ibmsecurity | 46 | python | def execute(isamAppliance, operation='restart', check_mode=False, force=False):
'\n Execute an operation on runtime component\n\n :param isamAppliance:\n :param operation:\n :return:\n '
(check_value, warnings) = _check(isamAppliance)
if (force is False):
if (warnings == []):
ret_obj = get(isamAppliance)
if (ret_obj['data']['statuscode'] == '1'):
logger.info('ISAM web runtime is unconfigured.')
return isamAppliance.create_return_object()
if ((ret_obj['data']['statuscode'] == '0') and (operation == 'start')):
logger.info('ISAM web runtime is already started.')
return isamAppliance.create_return_object()
if ((ret_obj['data']['statuscode'] == '2') and (operation == 'stop')):
logger.info('ISAM web runtime is already stopped.')
return isamAppliance.create_return_object()
else:
return isamAppliance.create_return_object(warnings=warnings)
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put('Execute an operation on web runtime component', '/isam/runtime_components/', {'operation': operation}, requires_model=requires_model) | def execute(isamAppliance, operation='restart', check_mode=False, force=False):
'\n Execute an operation on runtime component\n\n :param isamAppliance:\n :param operation:\n :return:\n '
(check_value, warnings) = _check(isamAppliance)
if (force is False):
if (warnings == []):
ret_obj = get(isamAppliance)
if (ret_obj['data']['statuscode'] == '1'):
logger.info('ISAM web runtime is unconfigured.')
return isamAppliance.create_return_object()
if ((ret_obj['data']['statuscode'] == '0') and (operation == 'start')):
logger.info('ISAM web runtime is already started.')
return isamAppliance.create_return_object()
if ((ret_obj['data']['statuscode'] == '2') and (operation == 'stop')):
logger.info('ISAM web runtime is already stopped.')
return isamAppliance.create_return_object()
else:
return isamAppliance.create_return_object(warnings=warnings)
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put('Execute an operation on web runtime component', '/isam/runtime_components/', {'operation': operation}, requires_model=requires_model)<|docstring|>Execute an operation on runtime component
:param isamAppliance:
:param operation:
:return:<|endoftext|> |
64aa84b5f694e5473c03d4db87c854f8c10e32faa10efecec11f2160b8cc27a1 | def compare(isamAppliance1, isamAppliance2):
'\n Compare web runtime between 2 appliances\n '
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
del ret_obj1['data']['status']
del ret_obj1['data']['statuscode']
del ret_obj2['data']['status']
del ret_obj2['data']['statuscode']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['status', 'statuscode']) | Compare web runtime between 2 appliances | ibmsecurity/isam/web/runtime/process.py | compare | iyartsev/ibmsecurity | 46 | python | def compare(isamAppliance1, isamAppliance2):
'\n \n '
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
del ret_obj1['data']['status']
del ret_obj1['data']['statuscode']
del ret_obj2['data']['status']
del ret_obj2['data']['statuscode']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['status', 'statuscode']) | def compare(isamAppliance1, isamAppliance2):
'\n \n '
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
del ret_obj1['data']['status']
del ret_obj1['data']['statuscode']
del ret_obj2['data']['status']
del ret_obj2['data']['statuscode']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['status', 'statuscode'])<|docstring|>Compare web runtime between 2 appliances<|endoftext|> |
b4df89754132ddd91f7087c6a3a57c692e1de4b405eb1d3407373828ce6edf0d | def __init__(self, num_params, npop=50, sigma=0.1, alpha=0.001):
'\n Args:\n num_params: number of model parameters\n npop: population size\n sigma: noise standard deviation\n alpha: learning rate\n '
self.num_params = num_params
self.npop = npop
self.sigma = sigma
self.alpha = alpha
self.solution = np.random.rand(num_params) | Args:
num_params: number of model parameters
npop: population size
sigma: noise standard deviation
alpha: learning rate | es/simple_es.py | __init__ | exe1023/GA-final | 0 | python | def __init__(self, num_params, npop=50, sigma=0.1, alpha=0.001):
'\n Args:\n num_params: number of model parameters\n npop: population size\n sigma: noise standard deviation\n alpha: learning rate\n '
self.num_params = num_params
self.npop = npop
self.sigma = sigma
self.alpha = alpha
self.solution = np.random.rand(num_params) | def __init__(self, num_params, npop=50, sigma=0.1, alpha=0.001):
'\n Args:\n num_params: number of model parameters\n npop: population size\n sigma: noise standard deviation\n alpha: learning rate\n '
self.num_params = num_params
self.npop = npop
self.sigma = sigma
self.alpha = alpha
self.solution = np.random.rand(num_params)<|docstring|>Args:
num_params: number of model parameters
npop: population size
sigma: noise standard deviation
alpha: learning rate<|endoftext|> |
4275dab5ef8262c03b7878ac51c294399cff143fdb3511207d0394acd4b948d0 | def ask(self):
'\n Returns a list of parameters with guassian noise\n '
self.N = np.random.randn(self.npop, self.num_params)
solutions = []
for i in range(self.npop):
solutions.append((self.solution + (self.sigma * self.N[i])))
return solutions | Returns a list of parameters with guassian noise | es/simple_es.py | ask | exe1023/GA-final | 0 | python | def ask(self):
'\n \n '
self.N = np.random.randn(self.npop, self.num_params)
solutions = []
for i in range(self.npop):
solutions.append((self.solution + (self.sigma * self.N[i])))
return solutions | def ask(self):
'\n \n '
self.N = np.random.randn(self.npop, self.num_params)
solutions = []
for i in range(self.npop):
solutions.append((self.solution + (self.sigma * self.N[i])))
return solutions<|docstring|>Returns a list of parameters with guassian noise<|endoftext|> |
f3fae8807109f827d92a046a0f32c7f6c5cfa57202241309083d7310a9c6e943 | def tell(self, rewards):
'\n Args:\n rewards: np.array, shape = (npop)\n '
A = ((rewards - np.mean(rewards)) / (np.std(rewards) + 1e-06))
self.solution = (self.solution + ((self.alpha / (self.npop * self.sigma)) * np.dot(self.N.T, A))) | Args:
rewards: np.array, shape = (npop) | es/simple_es.py | tell | exe1023/GA-final | 0 | python | def tell(self, rewards):
'\n Args:\n rewards: np.array, shape = (npop)\n '
A = ((rewards - np.mean(rewards)) / (np.std(rewards) + 1e-06))
self.solution = (self.solution + ((self.alpha / (self.npop * self.sigma)) * np.dot(self.N.T, A))) | def tell(self, rewards):
'\n Args:\n rewards: np.array, shape = (npop)\n '
A = ((rewards - np.mean(rewards)) / (np.std(rewards) + 1e-06))
self.solution = (self.solution + ((self.alpha / (self.npop * self.sigma)) * np.dot(self.N.T, A)))<|docstring|>Args:
rewards: np.array, shape = (npop)<|endoftext|> |
c1c0616140acaf38ff7c05d22bab89879c03410e3c79d0fd4135cde211859a08 | def parse_xml(xml_file):
"\n XML TEI poem parser for 'disco 3' corpus.\n We read the data and find elements like title, author, etc with XPath\n expressions.\n Then, we iterate over the poem text and we look for each stanza and line\n data.\n :param xml_file: Path for the xml file\n :return: Poem python dict with the data obtained\n "
tree = ETree.parse(xml_file)
root = tree.getroot()
corpus_name = xml_file.parts[(- 6)]
poem = {}
stanza_list = []
analysis_description = root.find(f'.//{NS}metDecl/{NS}p').text
title = root.find(f'.//{NS}head').text
author = root.find(f'.//{NS}author').text
line_group_list = root.findall(f'.//*{NS}lg')
manually_checked = ('manual' in analysis_description)
alt_title = root.find(f".//*{NS}bibl/{NS}title[@property='dc:alternative']")
poem.update({'manually_checked': manually_checked, 'poem_title': title, 'author': author, 'corpus': corpus_name})
if (alt_title is not None):
alt_title = re.sub('[\\n ]+', ' ', ''.join(alt_title.itertext()))
poem.update({'poem_alt_title': alt_title})
line_number = 1
for (stanza_number, line_group) in enumerate(line_group_list[1:], start=1):
line_list = []
stanza_text = []
for line in line_group:
line_text = re.sub('[\\n ]+', ' ', ''.join(line.itertext()))
line_list.append({'line_number': line_number, 'line_text': line_text, 'metrical_pattern': line.get('met')})
stanza_text.append(line_text)
line_number += 1
stanza_list.append({'stanza_number': str(stanza_number), 'stanza_type': line_group.attrib['type'], 'lines': line_list, 'stanza_text': '\n'.join(stanza_text)})
poem.update({'stanzas': stanza_list})
return poem | XML TEI poem parser for 'disco 3' corpus.
We read the data and find elements like title, author, etc with XPath
expressions.
Then, we iterate over the poem text and we look for each stanza and line
data.
:param xml_file: Path for the xml file
:return: Poem python dict with the data obtained | src/averell/readers/disco3.py | parse_xml | linhd-postdata/dalton | 2 | python | def parse_xml(xml_file):
"\n XML TEI poem parser for 'disco 3' corpus.\n We read the data and find elements like title, author, etc with XPath\n expressions.\n Then, we iterate over the poem text and we look for each stanza and line\n data.\n :param xml_file: Path for the xml file\n :return: Poem python dict with the data obtained\n "
tree = ETree.parse(xml_file)
root = tree.getroot()
corpus_name = xml_file.parts[(- 6)]
poem = {}
stanza_list = []
analysis_description = root.find(f'.//{NS}metDecl/{NS}p').text
title = root.find(f'.//{NS}head').text
author = root.find(f'.//{NS}author').text
line_group_list = root.findall(f'.//*{NS}lg')
manually_checked = ('manual' in analysis_description)
alt_title = root.find(f".//*{NS}bibl/{NS}title[@property='dc:alternative']")
poem.update({'manually_checked': manually_checked, 'poem_title': title, 'author': author, 'corpus': corpus_name})
if (alt_title is not None):
alt_title = re.sub('[\\n ]+', ' ', .join(alt_title.itertext()))
poem.update({'poem_alt_title': alt_title})
line_number = 1
for (stanza_number, line_group) in enumerate(line_group_list[1:], start=1):
line_list = []
stanza_text = []
for line in line_group:
line_text = re.sub('[\\n ]+', ' ', .join(line.itertext()))
line_list.append({'line_number': line_number, 'line_text': line_text, 'metrical_pattern': line.get('met')})
stanza_text.append(line_text)
line_number += 1
stanza_list.append({'stanza_number': str(stanza_number), 'stanza_type': line_group.attrib['type'], 'lines': line_list, 'stanza_text': '\n'.join(stanza_text)})
poem.update({'stanzas': stanza_list})
return poem | def parse_xml(xml_file):
"\n XML TEI poem parser for 'disco 3' corpus.\n We read the data and find elements like title, author, etc with XPath\n expressions.\n Then, we iterate over the poem text and we look for each stanza and line\n data.\n :param xml_file: Path for the xml file\n :return: Poem python dict with the data obtained\n "
tree = ETree.parse(xml_file)
root = tree.getroot()
corpus_name = xml_file.parts[(- 6)]
poem = {}
stanza_list = []
analysis_description = root.find(f'.//{NS}metDecl/{NS}p').text
title = root.find(f'.//{NS}head').text
author = root.find(f'.//{NS}author').text
line_group_list = root.findall(f'.//*{NS}lg')
manually_checked = ('manual' in analysis_description)
alt_title = root.find(f".//*{NS}bibl/{NS}title[@property='dc:alternative']")
poem.update({'manually_checked': manually_checked, 'poem_title': title, 'author': author, 'corpus': corpus_name})
if (alt_title is not None):
alt_title = re.sub('[\\n ]+', ' ', .join(alt_title.itertext()))
poem.update({'poem_alt_title': alt_title})
line_number = 1
for (stanza_number, line_group) in enumerate(line_group_list[1:], start=1):
line_list = []
stanza_text = []
for line in line_group:
line_text = re.sub('[\\n ]+', ' ', .join(line.itertext()))
line_list.append({'line_number': line_number, 'line_text': line_text, 'metrical_pattern': line.get('met')})
stanza_text.append(line_text)
line_number += 1
stanza_list.append({'stanza_number': str(stanza_number), 'stanza_type': line_group.attrib['type'], 'lines': line_list, 'stanza_text': '\n'.join(stanza_text)})
poem.update({'stanzas': stanza_list})
return poem<|docstring|>XML TEI poem parser for 'disco 3' corpus.
We read the data and find elements like title, author, etc with XPath
expressions.
Then, we iterate over the poem text and we look for each stanza and line
data.
:param xml_file: Path for the xml file
:return: Poem python dict with the data obtained<|endoftext|> |
f87675fd212e019537cb53590bd82edda37fbab5a6ad1bacdf95d4e9bc79cada | def get_features(path):
'\n Function to find each poem file and parse it\n :param path: Corpus Path\n :return: List of poem dicts\n '
xml_files = ((Path('*') / 'per-sonnet') / '*.xml')
feature_list = []
for filename in Path(path).rglob(str(xml_files)):
result = parse_xml(filename)
feature_list.append(result)
return feature_list | Function to find each poem file and parse it
:param path: Corpus Path
:return: List of poem dicts | src/averell/readers/disco3.py | get_features | linhd-postdata/dalton | 2 | python | def get_features(path):
'\n Function to find each poem file and parse it\n :param path: Corpus Path\n :return: List of poem dicts\n '
xml_files = ((Path('*') / 'per-sonnet') / '*.xml')
feature_list = []
for filename in Path(path).rglob(str(xml_files)):
result = parse_xml(filename)
feature_list.append(result)
return feature_list | def get_features(path):
'\n Function to find each poem file and parse it\n :param path: Corpus Path\n :return: List of poem dicts\n '
xml_files = ((Path('*') / 'per-sonnet') / '*.xml')
feature_list = []
for filename in Path(path).rglob(str(xml_files)):
result = parse_xml(filename)
feature_list.append(result)
return feature_list<|docstring|>Function to find each poem file and parse it
:param path: Corpus Path
:return: List of poem dicts<|endoftext|> |
5fbb120b2de08a373a0b43ecbf4cd11fdf17a16e348c08e32e3c57d08e17cb9c | def run(input_dir=None, output_dir=None, *, replace_str=None, oggm_working_dir='', set_oggm_params=None, n_processes=None):
"Computes the hypsometries for an entire RGI directory.\n\n Parameters\n ----------\n input_dir : str\n path to the RGI directory\n output_dir : str\n path to the output directory\n replace_str : callable\n a function to call on the file's basename. A good example is:\n ``replace_str=lambda x : x.replace('rgi60', 'rgi61')``\n oggm_working_dir : str\n str, optional\n path to the folder where oggm will write its GlacierDirectories.\n Default is to use a temporary folder (not recommended)\n set_oggm_params : callable\n a function which sets the OGGM params on cfg. The default is to\n turn multiprocessing off.\n n_processes : int, optional\n the number of processors to use\n "
if (set_oggm_params is None):
set_oggm_params = _set_oggm_params
fp = '*_rgi*_*.shp'
rgi_shps = list(glob(os.path.join(input_dir, '*', fp)))
rgi_shps = sorted([r for r in rgi_shps if ('Regions' not in r)])
funcs.mkdir(output_dir)
out_paths = []
log_names = []
for rgi_shp in rgi_shps:
odir = os.path.basename(os.path.dirname(rgi_shp))
if replace_str:
odir = replace_str(odir)
odir = os.path.join(output_dir, odir)
funcs.mkdir(odir)
bn = os.path.basename(rgi_shp)
if replace_str:
bn = replace_str(bn)
bn = bn.replace('.shp', '')
of = os.path.join(odir, bn)
out_paths.append(of)
log_names.append(bn)
with mp.Pool(n_processes) as p:
p.starmap(funcs.mappable_func, zip(([funcs.hypsometries] * len(rgi_shps)), rgi_shps, out_paths, log_names, ([set_oggm_params] * len(rgi_shps)), ([oggm_working_dir] * len(rgi_shps))), chunksize=1) | Computes the hypsometries for an entire RGI directory.
Parameters
----------
input_dir : str
path to the RGI directory
output_dir : str
path to the output directory
replace_str : callable
a function to call on the file's basename. A good example is:
``replace_str=lambda x : x.replace('rgi60', 'rgi61')``
oggm_working_dir : str
str, optional
path to the folder where oggm will write its GlacierDirectories.
Default is to use a temporary folder (not recommended)
set_oggm_params : callable
a function which sets the OGGM params on cfg. The default is to
turn multiprocessing off.
n_processes : int, optional
the number of processors to use | rgitools/cli/compute_hypsometries.py | run | OGGM/rgi-toolkit | 6 | python | def run(input_dir=None, output_dir=None, *, replace_str=None, oggm_working_dir=, set_oggm_params=None, n_processes=None):
"Computes the hypsometries for an entire RGI directory.\n\n Parameters\n ----------\n input_dir : str\n path to the RGI directory\n output_dir : str\n path to the output directory\n replace_str : callable\n a function to call on the file's basename. A good example is:\n ``replace_str=lambda x : x.replace('rgi60', 'rgi61')``\n oggm_working_dir : str\n str, optional\n path to the folder where oggm will write its GlacierDirectories.\n Default is to use a temporary folder (not recommended)\n set_oggm_params : callable\n a function which sets the OGGM params on cfg. The default is to\n turn multiprocessing off.\n n_processes : int, optional\n the number of processors to use\n "
if (set_oggm_params is None):
set_oggm_params = _set_oggm_params
fp = '*_rgi*_*.shp'
rgi_shps = list(glob(os.path.join(input_dir, '*', fp)))
rgi_shps = sorted([r for r in rgi_shps if ('Regions' not in r)])
funcs.mkdir(output_dir)
out_paths = []
log_names = []
for rgi_shp in rgi_shps:
odir = os.path.basename(os.path.dirname(rgi_shp))
if replace_str:
odir = replace_str(odir)
odir = os.path.join(output_dir, odir)
funcs.mkdir(odir)
bn = os.path.basename(rgi_shp)
if replace_str:
bn = replace_str(bn)
bn = bn.replace('.shp', )
of = os.path.join(odir, bn)
out_paths.append(of)
log_names.append(bn)
with mp.Pool(n_processes) as p:
p.starmap(funcs.mappable_func, zip(([funcs.hypsometries] * len(rgi_shps)), rgi_shps, out_paths, log_names, ([set_oggm_params] * len(rgi_shps)), ([oggm_working_dir] * len(rgi_shps))), chunksize=1) | def run(input_dir=None, output_dir=None, *, replace_str=None, oggm_working_dir=, set_oggm_params=None, n_processes=None):
"Computes the hypsometries for an entire RGI directory.\n\n Parameters\n ----------\n input_dir : str\n path to the RGI directory\n output_dir : str\n path to the output directory\n replace_str : callable\n a function to call on the file's basename. A good example is:\n ``replace_str=lambda x : x.replace('rgi60', 'rgi61')``\n oggm_working_dir : str\n str, optional\n path to the folder where oggm will write its GlacierDirectories.\n Default is to use a temporary folder (not recommended)\n set_oggm_params : callable\n a function which sets the OGGM params on cfg. The default is to\n turn multiprocessing off.\n n_processes : int, optional\n the number of processors to use\n "
if (set_oggm_params is None):
set_oggm_params = _set_oggm_params
fp = '*_rgi*_*.shp'
rgi_shps = list(glob(os.path.join(input_dir, '*', fp)))
rgi_shps = sorted([r for r in rgi_shps if ('Regions' not in r)])
funcs.mkdir(output_dir)
out_paths = []
log_names = []
for rgi_shp in rgi_shps:
odir = os.path.basename(os.path.dirname(rgi_shp))
if replace_str:
odir = replace_str(odir)
odir = os.path.join(output_dir, odir)
funcs.mkdir(odir)
bn = os.path.basename(rgi_shp)
if replace_str:
bn = replace_str(bn)
bn = bn.replace('.shp', )
of = os.path.join(odir, bn)
out_paths.append(of)
log_names.append(bn)
with mp.Pool(n_processes) as p:
p.starmap(funcs.mappable_func, zip(([funcs.hypsometries] * len(rgi_shps)), rgi_shps, out_paths, log_names, ([set_oggm_params] * len(rgi_shps)), ([oggm_working_dir] * len(rgi_shps))), chunksize=1)<|docstring|>Computes the hypsometries for an entire RGI directory.
Parameters
----------
input_dir : str
path to the RGI directory
output_dir : str
path to the output directory
replace_str : callable
a function to call on the file's basename. A good example is:
``replace_str=lambda x : x.replace('rgi60', 'rgi61')``
oggm_working_dir : str
str, optional
path to the folder where oggm will write its GlacierDirectories.
Default is to use a temporary folder (not recommended)
set_oggm_params : callable
a function which sets the OGGM params on cfg. The default is to
turn multiprocessing off.
n_processes : int, optional
the number of processors to use<|endoftext|> |
3222becb9f01250678c96195fc228b2573ee21d7fc4e75e76a182b85261dabcf | def parse_args(args):
'Check input arguments'
description = 'Computes the hypsometries for an entire RGI directory.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--input-dir', type=str, help='the rgi directory to process.')
parser.add_argument('--output-dir', type=str, help='the directory where to write the processed files.')
parser.add_argument('--oggm-working-dir', type=str, help='the directory where to write the processed files.')
parser.add_argument('--replace-str', nargs='*', type=str, help='a string to change on the file basename. A good example is: --replace-str rgi60 rgi61')
parser.add_argument('--n-processes', type=int, help='Number of processors to use.')
args = parser.parse_args(args)
if (not args.input_dir):
raise ValueError('--input-dir is required!')
if (not args.output_dir):
raise ValueError('--output-dir is required!')
if args.replace_str:
if (len(args.replace_str) != 2):
raise ValueError('--replace-str needs two values!')
(s1, s2) = args.replace_str
def replace_str(x):
return x.replace(s1, s2)
else:
replace_str = None
return dict(input_dir=args.input_dir, output_dir=args.output_dir, replace_str=replace_str, n_processes=args.n_processes, oggm_working_dir=args.oggm_working_dir) | Check input arguments | rgitools/cli/compute_hypsometries.py | parse_args | OGGM/rgi-toolkit | 6 | python | def parse_args(args):
description = 'Computes the hypsometries for an entire RGI directory.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--input-dir', type=str, help='the rgi directory to process.')
parser.add_argument('--output-dir', type=str, help='the directory where to write the processed files.')
parser.add_argument('--oggm-working-dir', type=str, help='the directory where to write the processed files.')
parser.add_argument('--replace-str', nargs='*', type=str, help='a string to change on the file basename. A good example is: --replace-str rgi60 rgi61')
parser.add_argument('--n-processes', type=int, help='Number of processors to use.')
args = parser.parse_args(args)
if (not args.input_dir):
raise ValueError('--input-dir is required!')
if (not args.output_dir):
raise ValueError('--output-dir is required!')
if args.replace_str:
if (len(args.replace_str) != 2):
raise ValueError('--replace-str needs two values!')
(s1, s2) = args.replace_str
def replace_str(x):
return x.replace(s1, s2)
else:
replace_str = None
return dict(input_dir=args.input_dir, output_dir=args.output_dir, replace_str=replace_str, n_processes=args.n_processes, oggm_working_dir=args.oggm_working_dir) | def parse_args(args):
description = 'Computes the hypsometries for an entire RGI directory.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--input-dir', type=str, help='the rgi directory to process.')
parser.add_argument('--output-dir', type=str, help='the directory where to write the processed files.')
parser.add_argument('--oggm-working-dir', type=str, help='the directory where to write the processed files.')
parser.add_argument('--replace-str', nargs='*', type=str, help='a string to change on the file basename. A good example is: --replace-str rgi60 rgi61')
parser.add_argument('--n-processes', type=int, help='Number of processors to use.')
args = parser.parse_args(args)
if (not args.input_dir):
raise ValueError('--input-dir is required!')
if (not args.output_dir):
raise ValueError('--output-dir is required!')
if args.replace_str:
if (len(args.replace_str) != 2):
raise ValueError('--replace-str needs two values!')
(s1, s2) = args.replace_str
def replace_str(x):
return x.replace(s1, s2)
else:
replace_str = None
return dict(input_dir=args.input_dir, output_dir=args.output_dir, replace_str=replace_str, n_processes=args.n_processes, oggm_working_dir=args.oggm_working_dir)<|docstring|>Check input arguments<|endoftext|> |
a27fa8936c6401703d07fe37d7a0597b421ef679c462248b00db040054a19ff1 | def main():
'Script entry point'
run(**parse_args(sys.argv[1:])) | Script entry point | rgitools/cli/compute_hypsometries.py | main | OGGM/rgi-toolkit | 6 | python | def main():
run(**parse_args(sys.argv[1:])) | def main():
run(**parse_args(sys.argv[1:]))<|docstring|>Script entry point<|endoftext|> |
0738c9b1b1e5992cac9c43e7c1b1314b85d864d652aa8ba3734709304900c9bd | def asset(config, value):
'Get asset file url'
asset_path = config.path['asset']
rel = urljoin(asset_path, value)
return real_url(config.site.url, rel) | Get asset file url | peanut/context.py | asset | linmuhe/peanut | 0 | python | def asset(config, value):
asset_path = config.path['asset']
rel = urljoin(asset_path, value)
return real_url(config.site.url, rel) | def asset(config, value):
asset_path = config.path['asset']
rel = urljoin(asset_path, value)
return real_url(config.site.url, rel)<|docstring|>Get asset file url<|endoftext|> |
057619bba1ce368918746833f60ed822843de5f9fa5ddc8120175467fb3fe442 | def strftime(value, date_format):
'Date formatter'
return datetime.strftime(value, date_format) | Date formatter | peanut/context.py | strftime | linmuhe/peanut | 0 | python | def strftime(value, date_format):
return datetime.strftime(value, date_format) | def strftime(value, date_format):
return datetime.strftime(value, date_format)<|docstring|>Date formatter<|endoftext|> |
1d8575d9575d483f7306f364c92ad006efffb38b1d092bedf73b6f63a9942645 | def abs_url(config, value):
'Absolute url'
return urljoin(config.site['url'], value) | Absolute url | peanut/context.py | abs_url | linmuhe/peanut | 0 | python | def abs_url(config, value):
return urljoin(config.site['url'], value) | def abs_url(config, value):
return urljoin(config.site['url'], value)<|docstring|>Absolute url<|endoftext|> |
014f17a828821deab1c64c7bd94e04fbbaacdefe241b85a0e77089f95da1c5e0 | def get_filters(config):
"Get all filters 在模版里使用 'css/main.css'|asset\n "
return {'asset': partial(asset, config), 'strftime': strftime, 'abs_url': partial(abs_url, config)} | Get all filters 在模版里使用 'css/main.css'|asset | peanut/context.py | get_filters | linmuhe/peanut | 0 | python | def get_filters(config):
"\n "
return {'asset': partial(asset, config), 'strftime': strftime, 'abs_url': partial(abs_url, config)} | def get_filters(config):
"\n "
return {'asset': partial(asset, config), 'strftime': strftime, 'abs_url': partial(abs_url, config)}<|docstring|>Get all filters 在模版里使用 'css/main.css'|asset<|endoftext|> |
ceedccdfc340bbad5b4e323c79d98e047798dedffd373e4d864775a755eef2c5 | def test_client_flow_generation_two_tools_two_inst(logged_in):
'Previously, this caused a KeyError due to overwriting a flow on a tool.'
@generate_flow_definition
class MyClient(GladierBaseClient):
'Example Docs'
gladier_tools = ['gladier.tests.test_data.gladier_mocks.MockTool', GeneratedTool]
MyClient()
mc = MyClient()
flow_def = mc.flow_definition
assert isinstance(flow_def, dict)
assert (len(flow_def['States']) == 2)
assert (flow_def['Comment'] == 'Example Docs') | Previously, this caused a KeyError due to overwriting a flow on a tool. | gladier/tests/test_client_flow_generation.py | test_client_flow_generation_two_tools_two_inst | rohithj494/gladier | 2 | python | def test_client_flow_generation_two_tools_two_inst(logged_in):
@generate_flow_definition
class MyClient(GladierBaseClient):
'Example Docs'
gladier_tools = ['gladier.tests.test_data.gladier_mocks.MockTool', GeneratedTool]
MyClient()
mc = MyClient()
flow_def = mc.flow_definition
assert isinstance(flow_def, dict)
assert (len(flow_def['States']) == 2)
assert (flow_def['Comment'] == 'Example Docs') | def test_client_flow_generation_two_tools_two_inst(logged_in):
@generate_flow_definition
class MyClient(GladierBaseClient):
'Example Docs'
gladier_tools = ['gladier.tests.test_data.gladier_mocks.MockTool', GeneratedTool]
MyClient()
mc = MyClient()
flow_def = mc.flow_definition
assert isinstance(flow_def, dict)
assert (len(flow_def['States']) == 2)
assert (flow_def['Comment'] == 'Example Docs')<|docstring|>Previously, this caused a KeyError due to overwriting a flow on a tool.<|endoftext|> |
f513f64c66d00a61641da58af670a741955ae353c55ff721277b12ce5dbf8643 | def __init__(self, folder, createCallback):
"\n The DataManager is the main handler of LaptopLogger. It is the console\n version of LaptopLogger. Note that this file can be run independently,\n without the QT dependence. \n\n The DataManager takes the folder in which to look for existing data,\n and a callback that is used for creation. The DataManager alls this when\n the loggers are not set up. The create callbacks are to use DataManager's \n functions:\n - create \n - login\n - createAndImport\n The functions will perform the necessary preparation and importing of data. \n After giving DataManager this information, it will handle everything else itself. \n "
self.versionfile = os.path.join(folder, 'laptoplogger.json')
self.cachefile = os.path.join(folder, 'cache.db')
self.dbdir = os.path.join(folder, 'cdb')
try:
self.manager = cdbmanager.Manager(self.dbdir)
self.cdbversion = self.manager.version()
logging.info(('Local ConnectorDB found: ' + self.cdbversion))
except:
logging.warning("Could not find ConnectorDB executable. Can't manage local database.")
self.manager = None
self.cdbversion = ''
self.ismanaging = False
self.isgathering = False
self.issyncing = False
self.syncer = None
if (not os.path.isfile(self.versionfile)):
logging.debug('DataManager: Could not find existing instance. Running create callbacks')
self.remove()
if (not os.path.isdir(folder)):
os.makedirs(folder)
self.logger = Logger(self.cachefile, on_create=self.onCacheCreate)
createCallback(self)
else:
self.logger = Logger(self.cachefile, on_create=self.onCacheCreate)
if (not os.path.isfile(self.versionfile)):
raise Exception((self.versionfile + ' was not found, and is required. Starting LaptopLogger must have somehow failed.'))
self.info = files.readJSON(self.versionfile)
if (self.info['version'] != 1):
raise Exception((('An incompatible version already exists at ' + folder) + ". Delete the folder if you don't need the existing data."))
if (self.info['managed'] and (not self.ismanaging)):
if (self.cdbversion != self.info['connectordb']):
logging.warning(('ConnectorDB version used to make managed logger (%s) does not match current version (%s)' % (self.info['connectordb'], self.cdbversion)))
logging.info('Starting ConnectorDB server')
self.manager.start()
self.ismanaging = True
self.currentplugins = {}
self.plugins = {}
logging.info('Setting up data gathering plugins')
for p in getplugins():
g = p()
logging.info(('Initialized plugin ' + g.streamname))
self.currentplugins[g.streamname] = g
self.plugins[g.streamname] = g
logging.debug(str(self.logger.data))
for g in self.logger.data['disabled_plugins']:
logging.info(('Disabling plugin ' + g))
if (g in self.currentplugins):
del self.currentplugins[g]
if self.logger.data['isgathering']:
self.startgathering()
if self.logger.data['issyncing']:
self.startsync() | The DataManager is the main handler of LaptopLogger. It is the console
version of LaptopLogger. Note that this file can be run independently,
without the QT dependence.
The DataManager takes the folder in which to look for existing data,
and a callback that is used for creation. The DataManager alls this when
the loggers are not set up. The create callbacks are to use DataManager's
functions:
- create
- login
- createAndImport
The functions will perform the necessary preparation and importing of data.
After giving DataManager this information, it will handle everything else itself. | src/datamanager.py | __init__ | connectordb/connectordb-laptoplogger | 4 | python | def __init__(self, folder, createCallback):
"\n The DataManager is the main handler of LaptopLogger. It is the console\n version of LaptopLogger. Note that this file can be run independently,\n without the QT dependence. \n\n The DataManager takes the folder in which to look for existing data,\n and a callback that is used for creation. The DataManager alls this when\n the loggers are not set up. The create callbacks are to use DataManager's \n functions:\n - create \n - login\n - createAndImport\n The functions will perform the necessary preparation and importing of data. \n After giving DataManager this information, it will handle everything else itself. \n "
self.versionfile = os.path.join(folder, 'laptoplogger.json')
self.cachefile = os.path.join(folder, 'cache.db')
self.dbdir = os.path.join(folder, 'cdb')
try:
self.manager = cdbmanager.Manager(self.dbdir)
self.cdbversion = self.manager.version()
logging.info(('Local ConnectorDB found: ' + self.cdbversion))
except:
logging.warning("Could not find ConnectorDB executable. Can't manage local database.")
self.manager = None
self.cdbversion =
self.ismanaging = False
self.isgathering = False
self.issyncing = False
self.syncer = None
if (not os.path.isfile(self.versionfile)):
logging.debug('DataManager: Could not find existing instance. Running create callbacks')
self.remove()
if (not os.path.isdir(folder)):
os.makedirs(folder)
self.logger = Logger(self.cachefile, on_create=self.onCacheCreate)
createCallback(self)
else:
self.logger = Logger(self.cachefile, on_create=self.onCacheCreate)
if (not os.path.isfile(self.versionfile)):
raise Exception((self.versionfile + ' was not found, and is required. Starting LaptopLogger must have somehow failed.'))
self.info = files.readJSON(self.versionfile)
if (self.info['version'] != 1):
raise Exception((('An incompatible version already exists at ' + folder) + ". Delete the folder if you don't need the existing data."))
if (self.info['managed'] and (not self.ismanaging)):
if (self.cdbversion != self.info['connectordb']):
logging.warning(('ConnectorDB version used to make managed logger (%s) does not match current version (%s)' % (self.info['connectordb'], self.cdbversion)))
logging.info('Starting ConnectorDB server')
self.manager.start()
self.ismanaging = True
self.currentplugins = {}
self.plugins = {}
logging.info('Setting up data gathering plugins')
for p in getplugins():
g = p()
logging.info(('Initialized plugin ' + g.streamname))
self.currentplugins[g.streamname] = g
self.plugins[g.streamname] = g
logging.debug(str(self.logger.data))
for g in self.logger.data['disabled_plugins']:
logging.info(('Disabling plugin ' + g))
if (g in self.currentplugins):
del self.currentplugins[g]
if self.logger.data['isgathering']:
self.startgathering()
if self.logger.data['issyncing']:
self.startsync() | def __init__(self, folder, createCallback):
"\n The DataManager is the main handler of LaptopLogger. It is the console\n version of LaptopLogger. Note that this file can be run independently,\n without the QT dependence. \n\n The DataManager takes the folder in which to look for existing data,\n and a callback that is used for creation. The DataManager alls this when\n the loggers are not set up. The create callbacks are to use DataManager's \n functions:\n - create \n - login\n - createAndImport\n The functions will perform the necessary preparation and importing of data. \n After giving DataManager this information, it will handle everything else itself. \n "
self.versionfile = os.path.join(folder, 'laptoplogger.json')
self.cachefile = os.path.join(folder, 'cache.db')
self.dbdir = os.path.join(folder, 'cdb')
try:
self.manager = cdbmanager.Manager(self.dbdir)
self.cdbversion = self.manager.version()
logging.info(('Local ConnectorDB found: ' + self.cdbversion))
except:
logging.warning("Could not find ConnectorDB executable. Can't manage local database.")
self.manager = None
self.cdbversion =
self.ismanaging = False
self.isgathering = False
self.issyncing = False
self.syncer = None
if (not os.path.isfile(self.versionfile)):
logging.debug('DataManager: Could not find existing instance. Running create callbacks')
self.remove()
if (not os.path.isdir(folder)):
os.makedirs(folder)
self.logger = Logger(self.cachefile, on_create=self.onCacheCreate)
createCallback(self)
else:
self.logger = Logger(self.cachefile, on_create=self.onCacheCreate)
if (not os.path.isfile(self.versionfile)):
raise Exception((self.versionfile + ' was not found, and is required. Starting LaptopLogger must have somehow failed.'))
self.info = files.readJSON(self.versionfile)
if (self.info['version'] != 1):
raise Exception((('An incompatible version already exists at ' + folder) + ". Delete the folder if you don't need the existing data."))
if (self.info['managed'] and (not self.ismanaging)):
if (self.cdbversion != self.info['connectordb']):
logging.warning(('ConnectorDB version used to make managed logger (%s) does not match current version (%s)' % (self.info['connectordb'], self.cdbversion)))
logging.info('Starting ConnectorDB server')
self.manager.start()
self.ismanaging = True
self.currentplugins = {}
self.plugins = {}
logging.info('Setting up data gathering plugins')
for p in getplugins():
g = p()
logging.info(('Initialized plugin ' + g.streamname))
self.currentplugins[g.streamname] = g
self.plugins[g.streamname] = g
logging.debug(str(self.logger.data))
for g in self.logger.data['disabled_plugins']:
logging.info(('Disabling plugin ' + g))
if (g in self.currentplugins):
del self.currentplugins[g]
if self.logger.data['isgathering']:
self.startgathering()
if self.logger.data['issyncing']:
self.startsync()<|docstring|>The DataManager is the main handler of LaptopLogger. It is the console
version of LaptopLogger. Note that this file can be run independently,
without the QT dependence.
The DataManager takes the folder in which to look for existing data,
and a callback that is used for creation. The DataManager alls this when
the loggers are not set up. The create callbacks are to use DataManager's
functions:
- create
- login
- createAndImport
The functions will perform the necessary preparation and importing of data.
After giving DataManager this information, it will handle everything else itself.<|endoftext|> |
b7bc21ad39dee17e9fd814f5088a2e2de066f4af975e6c9ff7da402ac36837b1 | def remove(self):
'\n Given a folder with possibly an existing LaptopLogger installation,\n removes all relevant files.\n '
if self.ismanaging:
self.manager.stop()
self.ismanaging = False
if os.path.exists(self.dbdir):
shutil.rmtree(self.dbdir)
if os.path.exists(self.versionfile):
os.remove(self.versionfile)
if os.path.exists(self.cachefile):
os.remove(self.cachefile) | Given a folder with possibly an existing LaptopLogger installation,
removes all relevant files. | src/datamanager.py | remove | connectordb/connectordb-laptoplogger | 4 | python | def remove(self):
'\n Given a folder with possibly an existing LaptopLogger installation,\n removes all relevant files.\n '
if self.ismanaging:
self.manager.stop()
self.ismanaging = False
if os.path.exists(self.dbdir):
shutil.rmtree(self.dbdir)
if os.path.exists(self.versionfile):
os.remove(self.versionfile)
if os.path.exists(self.cachefile):
os.remove(self.cachefile) | def remove(self):
'\n Given a folder with possibly an existing LaptopLogger installation,\n removes all relevant files.\n '
if self.ismanaging:
self.manager.stop()
self.ismanaging = False
if os.path.exists(self.dbdir):
shutil.rmtree(self.dbdir)
if os.path.exists(self.versionfile):
os.remove(self.versionfile)
if os.path.exists(self.cachefile):
os.remove(self.cachefile)<|docstring|>Given a folder with possibly an existing LaptopLogger installation,
removes all relevant files.<|endoftext|> |
bd77e0668576e635d5b1d7ca2ea0195202308d9984040a5848bae5c9a0ff947a | def test_nft(self):
'Test NFT optimizer by using it'
vqe = VQE(ansatz=RealAmplitudes(), optimizer=NFT(), quantum_instance=QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_simulator=algorithm_globals.random_seed, seed_transpiler=algorithm_globals.random_seed))
result = vqe.compute_minimum_eigenvalue(operator=self.qubit_op)
self.assertAlmostEqual(result.eigenvalue.real, (- 1.857275), places=6) | Test NFT optimizer by using it | test/python/algorithms/optimizers/test_optimizer_nft.py | test_nft | biblio-techers/Qiskit-Fall-Fest-2021 | 1,599 | python | def test_nft(self):
vqe = VQE(ansatz=RealAmplitudes(), optimizer=NFT(), quantum_instance=QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_simulator=algorithm_globals.random_seed, seed_transpiler=algorithm_globals.random_seed))
result = vqe.compute_minimum_eigenvalue(operator=self.qubit_op)
self.assertAlmostEqual(result.eigenvalue.real, (- 1.857275), places=6) | def test_nft(self):
vqe = VQE(ansatz=RealAmplitudes(), optimizer=NFT(), quantum_instance=QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_simulator=algorithm_globals.random_seed, seed_transpiler=algorithm_globals.random_seed))
result = vqe.compute_minimum_eigenvalue(operator=self.qubit_op)
self.assertAlmostEqual(result.eigenvalue.real, (- 1.857275), places=6)<|docstring|>Test NFT optimizer by using it<|endoftext|> |
08c6b50aa47382dbfd60690ce81e2575ccb78e3173b95a4f0bdf005f976a0288 | def send_alert_photo(self, *args, **kwargs):
'\n Send photo to receivers\n '
for (name, value) in self.groups.items():
self.command__photo({'sender': {'username': name}, 'receiver': {'name': name}}) | Send photo to receivers | commands.py | send_alert_photo | rafen/raspTelegram | 0 | python | def send_alert_photo(self, *args, **kwargs):
'\n \n '
for (name, value) in self.groups.items():
self.command__photo({'sender': {'username': name}, 'receiver': {'name': name}}) | def send_alert_photo(self, *args, **kwargs):
'\n \n '
for (name, value) in self.groups.items():
self.command__photo({'sender': {'username': name}, 'receiver': {'name': name}})<|docstring|>Send photo to receivers<|endoftext|> |
f0f27e1c611d427c340efe0ffceabcc3e68b24c8b2580da489abcf3e1206b036 | def has_object_permission(self, request, view, obj):
'Check if the reviewer is the creator of the review.'
try:
CameraReview.objects.filter(user=request.user)
except CameraReview.DoesNotExist:
return False
return True | Check if the reviewer is the creator of the review. | apps/reviews/permissions/reviews.py | has_object_permission | Haizza1/RandomCameras-Backend | 1 | python | def has_object_permission(self, request, view, obj):
try:
CameraReview.objects.filter(user=request.user)
except CameraReview.DoesNotExist:
return False
return True | def has_object_permission(self, request, view, obj):
try:
CameraReview.objects.filter(user=request.user)
except CameraReview.DoesNotExist:
return False
return True<|docstring|>Check if the reviewer is the creator of the review.<|endoftext|> |
145ec61535c7222c928aa8f5b113b18a82d5860a7aadcadc3d6c536634620f94 | def has_permission(self, request, view):
'Check if the requesting user is reviewer.'
if (request.user.is_reviewer == True):
return True
else:
return False | Check if the requesting user is reviewer. | apps/reviews/permissions/reviews.py | has_permission | Haizza1/RandomCameras-Backend | 1 | python | def has_permission(self, request, view):
if (request.user.is_reviewer == True):
return True
else:
return False | def has_permission(self, request, view):
if (request.user.is_reviewer == True):
return True
else:
return False<|docstring|>Check if the requesting user is reviewer.<|endoftext|> |
2e338fb556f1d8277ee84dda7ff58ee2363e5c2509691a0d5ed9e7d4bf92b9c0 | def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[(Union[(None, ModelsAddPlayerResponse)], Union[(None, HttpResponse, RestapiErrorResponseV2)])]:
'Parse the given response.\n\n 200: OK - ModelsAddPlayerResponse (player removed)\n\n 400: Bad Request - RestapiErrorResponseV2 (malformed request)\n\n 404: Not Found - RestapiErrorResponseV2 (session not found)\n\n 500: Internal Server Error - RestapiErrorResponseV2 (Internal Server Error)\n\n ---: HttpResponse (Undocumented Response)\n\n ---: HttpResponse (Unexpected Content-Type Error)\n\n ---: HttpResponse (Unhandled Error)\n '
(pre_processed_response, error) = self.pre_process_response(code=code, content_type=content_type, content=content)
if (error is not None):
return (None, (None if error.is_no_content() else error))
(code, content_type, content) = pre_processed_response
if (code == 200):
return (ModelsAddPlayerResponse.create_from_dict(content), None)
if (code == 400):
return (None, RestapiErrorResponseV2.create_from_dict(content))
if (code == 404):
return (None, RestapiErrorResponseV2.create_from_dict(content))
if (code == 500):
return (None, RestapiErrorResponseV2.create_from_dict(content))
return (None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)) | Parse the given response.
200: OK - ModelsAddPlayerResponse (player removed)
400: Bad Request - RestapiErrorResponseV2 (malformed request)
404: Not Found - RestapiErrorResponseV2 (session not found)
500: Internal Server Error - RestapiErrorResponseV2 (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error) | accelbyte_py_sdk/api/sessionbrowser/operations/session/remove_player_from_session.py | parse_response | AccelByte/accelbyte-python-sdk | 0 | python | def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[(Union[(None, ModelsAddPlayerResponse)], Union[(None, HttpResponse, RestapiErrorResponseV2)])]:
'Parse the given response.\n\n 200: OK - ModelsAddPlayerResponse (player removed)\n\n 400: Bad Request - RestapiErrorResponseV2 (malformed request)\n\n 404: Not Found - RestapiErrorResponseV2 (session not found)\n\n 500: Internal Server Error - RestapiErrorResponseV2 (Internal Server Error)\n\n ---: HttpResponse (Undocumented Response)\n\n ---: HttpResponse (Unexpected Content-Type Error)\n\n ---: HttpResponse (Unhandled Error)\n '
(pre_processed_response, error) = self.pre_process_response(code=code, content_type=content_type, content=content)
if (error is not None):
return (None, (None if error.is_no_content() else error))
(code, content_type, content) = pre_processed_response
if (code == 200):
return (ModelsAddPlayerResponse.create_from_dict(content), None)
if (code == 400):
return (None, RestapiErrorResponseV2.create_from_dict(content))
if (code == 404):
return (None, RestapiErrorResponseV2.create_from_dict(content))
if (code == 500):
return (None, RestapiErrorResponseV2.create_from_dict(content))
return (None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)) | def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[(Union[(None, ModelsAddPlayerResponse)], Union[(None, HttpResponse, RestapiErrorResponseV2)])]:
'Parse the given response.\n\n 200: OK - ModelsAddPlayerResponse (player removed)\n\n 400: Bad Request - RestapiErrorResponseV2 (malformed request)\n\n 404: Not Found - RestapiErrorResponseV2 (session not found)\n\n 500: Internal Server Error - RestapiErrorResponseV2 (Internal Server Error)\n\n ---: HttpResponse (Undocumented Response)\n\n ---: HttpResponse (Unexpected Content-Type Error)\n\n ---: HttpResponse (Unhandled Error)\n '
(pre_processed_response, error) = self.pre_process_response(code=code, content_type=content_type, content=content)
if (error is not None):
return (None, (None if error.is_no_content() else error))
(code, content_type, content) = pre_processed_response
if (code == 200):
return (ModelsAddPlayerResponse.create_from_dict(content), None)
if (code == 400):
return (None, RestapiErrorResponseV2.create_from_dict(content))
if (code == 404):
return (None, RestapiErrorResponseV2.create_from_dict(content))
if (code == 500):
return (None, RestapiErrorResponseV2.create_from_dict(content))
return (None, self.handle_undocumented_response(code=code, content_type=content_type, content=content))<|docstring|>Parse the given response.
200: OK - ModelsAddPlayerResponse (player removed)
400: Bad Request - RestapiErrorResponseV2 (malformed request)
404: Not Found - RestapiErrorResponseV2 (session not found)
500: Internal Server Error - RestapiErrorResponseV2 (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)<|endoftext|> |
b0eb2b62c801e17c9d97e0a29f650559469cecfeb3d683e3f200580e1eebcd46 | def open_file(filename, encoding='utf-8', mode='r'):
' Opens file using codecs module.\n\n Args:\n filename: A string.\n encoding: A string specifies the encoding which is to be used for the\n file.\n mode: A string epecifies the opening mode.\n\n Returns: A file descriptor.\n '
if ((mode == 'r') and (not gfile.Exists(filename))):
raise OSError('File: "{}" not exists.'.format(filename))
return codecs.open(filename, mode=mode, encoding=encoding) | Opens file using codecs module.
Args:
filename: A string.
encoding: A string specifies the encoding which is to be used for the
file.
mode: A string epecifies the opening mode.
Returns: A file descriptor. | njunmt/utils/misc.py | open_file | zhaocq-nlp/NJUNMT-tf | 111 | python | def open_file(filename, encoding='utf-8', mode='r'):
' Opens file using codecs module.\n\n Args:\n filename: A string.\n encoding: A string specifies the encoding which is to be used for the\n file.\n mode: A string epecifies the opening mode.\n\n Returns: A file descriptor.\n '
if ((mode == 'r') and (not gfile.Exists(filename))):
raise OSError('File: "{}" not exists.'.format(filename))
return codecs.open(filename, mode=mode, encoding=encoding) | def open_file(filename, encoding='utf-8', mode='r'):
' Opens file using codecs module.\n\n Args:\n filename: A string.\n encoding: A string specifies the encoding which is to be used for the\n file.\n mode: A string epecifies the opening mode.\n\n Returns: A file descriptor.\n '
if ((mode == 'r') and (not gfile.Exists(filename))):
raise OSError('File: "{}" not exists.'.format(filename))
return codecs.open(filename, mode=mode, encoding=encoding)<|docstring|>Opens file using codecs module.
Args:
filename: A string.
encoding: A string specifies the encoding which is to be used for the
file.
mode: A string epecifies the opening mode.
Returns: A file descriptor.<|endoftext|> |
5fad3a633ca15c7bac35f3d9e2127227d45ee45b624ea1485d4a01a4ca1342e6 | def close_file(fp):
' Closes a file descriptor.\n\n Args:\n fp: A file descriptor.\n '
if (not fp.closed):
fp.close() | Closes a file descriptor.
Args:
fp: A file descriptor. | njunmt/utils/misc.py | close_file | zhaocq-nlp/NJUNMT-tf | 111 | python | def close_file(fp):
' Closes a file descriptor.\n\n Args:\n fp: A file descriptor.\n '
if (not fp.closed):
fp.close() | def close_file(fp):
' Closes a file descriptor.\n\n Args:\n fp: A file descriptor.\n '
if (not fp.closed):
fp.close()<|docstring|>Closes a file descriptor.
Args:
fp: A file descriptor.<|endoftext|> |
3b0001b666b826bfeaa28c691cd27ad505458bcdfd89053e10ba3495b5e49ea2 | def compute_non_padding_num(input_fields, name_prefix):
' Computes non-padding num and total tokens num.\n\n Args:\n input_fields: A dict of placeholders.\n name_prefix: The key prefix name, Constants.FEATURE_NAME_PREFIX\n or Constants.LABEL_NAME_PREFIX\n\n Returns: A tuple (non-padding tokens num, total tokens num)\n '
length = input_fields[concat_name(name_prefix, Constants.LENGTH_NAME)]
ids = input_fields[concat_name(name_prefix, Constants.IDS_NAME)]
nonpadding_tokens_num = tf.reduce_sum(length)
shape = tf.shape(ids)
total_tokens_num = (shape[0] * shape[1])
return (nonpadding_tokens_num, total_tokens_num) | Computes non-padding num and total tokens num.
Args:
input_fields: A dict of placeholders.
name_prefix: The key prefix name, Constants.FEATURE_NAME_PREFIX
or Constants.LABEL_NAME_PREFIX
Returns: A tuple (non-padding tokens num, total tokens num) | njunmt/utils/misc.py | compute_non_padding_num | zhaocq-nlp/NJUNMT-tf | 111 | python | def compute_non_padding_num(input_fields, name_prefix):
' Computes non-padding num and total tokens num.\n\n Args:\n input_fields: A dict of placeholders.\n name_prefix: The key prefix name, Constants.FEATURE_NAME_PREFIX\n or Constants.LABEL_NAME_PREFIX\n\n Returns: A tuple (non-padding tokens num, total tokens num)\n '
length = input_fields[concat_name(name_prefix, Constants.LENGTH_NAME)]
ids = input_fields[concat_name(name_prefix, Constants.IDS_NAME)]
nonpadding_tokens_num = tf.reduce_sum(length)
shape = tf.shape(ids)
total_tokens_num = (shape[0] * shape[1])
return (nonpadding_tokens_num, total_tokens_num) | def compute_non_padding_num(input_fields, name_prefix):
' Computes non-padding num and total tokens num.\n\n Args:\n input_fields: A dict of placeholders.\n name_prefix: The key prefix name, Constants.FEATURE_NAME_PREFIX\n or Constants.LABEL_NAME_PREFIX\n\n Returns: A tuple (non-padding tokens num, total tokens num)\n '
length = input_fields[concat_name(name_prefix, Constants.LENGTH_NAME)]
ids = input_fields[concat_name(name_prefix, Constants.IDS_NAME)]
nonpadding_tokens_num = tf.reduce_sum(length)
shape = tf.shape(ids)
total_tokens_num = (shape[0] * shape[1])
return (nonpadding_tokens_num, total_tokens_num)<|docstring|>Computes non-padding num and total tokens num.
Args:
input_fields: A dict of placeholders.
name_prefix: The key prefix name, Constants.FEATURE_NAME_PREFIX
or Constants.LABEL_NAME_PREFIX
Returns: A tuple (non-padding tokens num, total tokens num)<|endoftext|> |
14da1f74a8e8031e19b393dfcd4547fc321bd19d6458af3358674ff054dba8ea | def port_is_open(host):
' Checks whether the port is open.\n\n Args:\n host: A string has format "ip:port".\n\n Returns: True if the port is open, False otherwise.\n '
(ip, port) = host.strip().split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False | Checks whether the port is open.
Args:
host: A string has format "ip:port".
Returns: True if the port is open, False otherwise. | njunmt/utils/misc.py | port_is_open | zhaocq-nlp/NJUNMT-tf | 111 | python | def port_is_open(host):
' Checks whether the port is open.\n\n Args:\n host: A string has format "ip:port".\n\n Returns: True if the port is open, False otherwise.\n '
(ip, port) = host.strip().split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False | def port_is_open(host):
' Checks whether the port is open.\n\n Args:\n host: A string has format "ip:port".\n\n Returns: True if the port is open, False otherwise.\n '
(ip, port) = host.strip().split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False<|docstring|>Checks whether the port is open.
Args:
host: A string has format "ip:port".
Returns: True if the port is open, False otherwise.<|endoftext|> |
257360e3ea838cdfaabed8b38849abdb53065c82f26814d9d11a4d480fb28837 | def create_ps_worker(ps_hosts, worker_hosts, task_index, ps):
' Creates tf ps and workers.\n\n Args:\n ps_hosts: A list of host strings with format "ip:port".\n worker_hosts: A list of worker strings with format "ip:port".\n task_index: The task index.\n ps: Whether it is a parameter server.\n\n Returns: A tuple `(server, clusters, num_workers, gpu_options)`.\n '
ps_hosts = ps_hosts
worker_hosts = worker_hosts
num_workers = len(worker_hosts)
cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts})
gpu_options = tf.GPUOptions(allocator_type='BFC', allow_growth=True)
if ps:
for host in ps_hosts:
if port_is_open(host):
raise ValueError(('Error with ps_hosts: %s, the port %s is already occupied.' % (host, host.split(':')[1])))
server_def = tf.train.ServerDef(cluster=cluster.as_cluster_def(), job_name='ps', task_index=task_index, default_session_config=tf.ConfigProto(gpu_options=gpu_options, device_count={'GPU': 0}), protocol='grpc')
else:
host = worker_hosts[task_index]
if port_is_open(host):
raise ValueError(('Error with worker_hosts: %s, the port %s is already occupied.' % (host, host.split(':')[1])))
server_def = tf.train.ServerDef(cluster=cluster.as_cluster_def(), job_name='worker', task_index=task_index, default_session_config=tf.ConfigProto(gpu_options=gpu_options), protocol='grpc')
server = tf.train.Server(server_def)
return (server, cluster, num_workers, gpu_options) | Creates tf ps and workers.
Args:
ps_hosts: A list of host strings with format "ip:port".
worker_hosts: A list of worker strings with format "ip:port".
task_index: The task index.
ps: Whether it is a parameter server.
Returns: A tuple `(server, clusters, num_workers, gpu_options)`. | njunmt/utils/misc.py | create_ps_worker | zhaocq-nlp/NJUNMT-tf | 111 | python | def create_ps_worker(ps_hosts, worker_hosts, task_index, ps):
' Creates tf ps and workers.\n\n Args:\n ps_hosts: A list of host strings with format "ip:port".\n worker_hosts: A list of worker strings with format "ip:port".\n task_index: The task index.\n ps: Whether it is a parameter server.\n\n Returns: A tuple `(server, clusters, num_workers, gpu_options)`.\n '
ps_hosts = ps_hosts
worker_hosts = worker_hosts
num_workers = len(worker_hosts)
cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts})
gpu_options = tf.GPUOptions(allocator_type='BFC', allow_growth=True)
if ps:
for host in ps_hosts:
if port_is_open(host):
raise ValueError(('Error with ps_hosts: %s, the port %s is already occupied.' % (host, host.split(':')[1])))
server_def = tf.train.ServerDef(cluster=cluster.as_cluster_def(), job_name='ps', task_index=task_index, default_session_config=tf.ConfigProto(gpu_options=gpu_options, device_count={'GPU': 0}), protocol='grpc')
else:
host = worker_hosts[task_index]
if port_is_open(host):
raise ValueError(('Error with worker_hosts: %s, the port %s is already occupied.' % (host, host.split(':')[1])))
server_def = tf.train.ServerDef(cluster=cluster.as_cluster_def(), job_name='worker', task_index=task_index, default_session_config=tf.ConfigProto(gpu_options=gpu_options), protocol='grpc')
server = tf.train.Server(server_def)
return (server, cluster, num_workers, gpu_options) | def create_ps_worker(ps_hosts, worker_hosts, task_index, ps):
' Creates tf ps and workers.\n\n Args:\n ps_hosts: A list of host strings with format "ip:port".\n worker_hosts: A list of worker strings with format "ip:port".\n task_index: The task index.\n ps: Whether it is a parameter server.\n\n Returns: A tuple `(server, clusters, num_workers, gpu_options)`.\n '
ps_hosts = ps_hosts
worker_hosts = worker_hosts
num_workers = len(worker_hosts)
cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts})
gpu_options = tf.GPUOptions(allocator_type='BFC', allow_growth=True)
if ps:
for host in ps_hosts:
if port_is_open(host):
raise ValueError(('Error with ps_hosts: %s, the port %s is already occupied.' % (host, host.split(':')[1])))
server_def = tf.train.ServerDef(cluster=cluster.as_cluster_def(), job_name='ps', task_index=task_index, default_session_config=tf.ConfigProto(gpu_options=gpu_options, device_count={'GPU': 0}), protocol='grpc')
else:
host = worker_hosts[task_index]
if port_is_open(host):
raise ValueError(('Error with worker_hosts: %s, the port %s is already occupied.' % (host, host.split(':')[1])))
server_def = tf.train.ServerDef(cluster=cluster.as_cluster_def(), job_name='worker', task_index=task_index, default_session_config=tf.ConfigProto(gpu_options=gpu_options), protocol='grpc')
server = tf.train.Server(server_def)
return (server, cluster, num_workers, gpu_options)<|docstring|>Creates tf ps and workers.
Args:
ps_hosts: A list of host strings with format "ip:port".
worker_hosts: A list of worker strings with format "ip:port".
task_index: The task index.
ps: Whether it is a parameter server.
Returns: A tuple `(server, clusters, num_workers, gpu_options)`.<|endoftext|> |
16539e651dc213753d0d733808724f7206814c40d04ac99db672b105cd772cc5 | def dump_model_analysis(model_dir):
' Dumps detailed model size.\n\n Args:\n model_dir: The directory name to save to.\n '
filename = os.path.join(model_dir, Constants.MODEL_ANALYSIS_FILENAME)
profile_opt_builder = tf.profiler.ProfileOptionBuilder
opts = profile_opt_builder.trainable_variables_parameter()
opts['output'] = 'file:outfile={}'.format(filename)
param_stats = tf.profiler.profile(tf.get_default_graph(), options=opts)
with open_file(filename) as file:
tf.logging.info(file.read()) | Dumps detailed model size.
Args:
model_dir: The directory name to save to. | njunmt/utils/misc.py | dump_model_analysis | zhaocq-nlp/NJUNMT-tf | 111 | python | def dump_model_analysis(model_dir):
' Dumps detailed model size.\n\n Args:\n model_dir: The directory name to save to.\n '
filename = os.path.join(model_dir, Constants.MODEL_ANALYSIS_FILENAME)
profile_opt_builder = tf.profiler.ProfileOptionBuilder
opts = profile_opt_builder.trainable_variables_parameter()
opts['output'] = 'file:outfile={}'.format(filename)
param_stats = tf.profiler.profile(tf.get_default_graph(), options=opts)
with open_file(filename) as file:
tf.logging.info(file.read()) | def dump_model_analysis(model_dir):
' Dumps detailed model size.\n\n Args:\n model_dir: The directory name to save to.\n '
filename = os.path.join(model_dir, Constants.MODEL_ANALYSIS_FILENAME)
profile_opt_builder = tf.profiler.ProfileOptionBuilder
opts = profile_opt_builder.trainable_variables_parameter()
opts['output'] = 'file:outfile={}'.format(filename)
param_stats = tf.profiler.profile(tf.get_default_graph(), options=opts)
with open_file(filename) as file:
tf.logging.info(file.read())<|docstring|>Dumps detailed model size.
Args:
model_dir: The directory name to save to.<|endoftext|> |
2a6b62318811d5643ec0285b76dd17c6e60c7ea07ad9ed35660bc096012450ce | def get_available_gpus():
'Returns a list of available GPU devices names. '
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if (x.device_type == 'GPU')] | Returns a list of available GPU devices names. | njunmt/utils/misc.py | get_available_gpus | zhaocq-nlp/NJUNMT-tf | 111 | python | def get_available_gpus():
' '
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if (x.device_type == 'GPU')] | def get_available_gpus():
' '
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if (x.device_type == 'GPU')]<|docstring|>Returns a list of available GPU devices names.<|endoftext|> |
dd83c3e4dd94a008afad19dc3b52a4feef79393f0a94e3454ee8fbbc8db12c57 | def get_available_devices():
' Returns aa list of '
gpus = get_available_gpus()
if (len(gpus) == 0):
return ['/cpu:0']
return ['/gpu:{}'.format(i) for (i, _) in enumerate(gpus)] | Returns aa list of | njunmt/utils/misc.py | get_available_devices | zhaocq-nlp/NJUNMT-tf | 111 | python | def get_available_devices():
' '
gpus = get_available_gpus()
if (len(gpus) == 0):
return ['/cpu:0']
return ['/gpu:{}'.format(i) for (i, _) in enumerate(gpus)] | def get_available_devices():
' '
gpus = get_available_gpus()
if (len(gpus) == 0):
return ['/cpu:0']
return ['/gpu:{}'.format(i) for (i, _) in enumerate(gpus)]<|docstring|>Returns aa list of<|endoftext|> |
465d9df374dd8feae04012e53d8baea6fff3db23d7726f9a6500c032fcc5c757 | def label_smoothing(labels, vocab_size, epsilon=0.1):
'Applies label smoothing. See https://arxiv.org/abs/1512.00567.\n\n Args:\n labels: A 2d tensor with shape of [N, T].\n vocab_size: The size of vocabulary.\n epsilon: Smoothing rate.\n\n Returns: The smoothed labels.\n\n For example,\n ```\n import tensorflow as tf\n inputs = tf.convert_to_tensor([[[0, 0, 1],\n [0, 1, 0],\n [1, 0, 0]],\n [[1, 0, 0],\n [1, 0, 0],\n [0, 1, 0]]], tf.float32)\n outputs = label_smoothing(inputs)\n with tf.Session() as sess:\n print(sess.run([outputs]))\n >>\n [array([[[ 0.03333334, 0.03333334, 0.93333334],\n [ 0.03333334, 0.93333334, 0.03333334],\n [ 0.93333334, 0.03333334, 0.03333334]],\n [[ 0.93333334, 0.03333334, 0.03333334],\n [ 0.93333334, 0.03333334, 0.03333334],\n [ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]\n ```\n '
confidence = (1.0 - epsilon)
low_confidence = (epsilon / tf.to_float((vocab_size - 1)))
normalizing = (- ((confidence * tf.log(confidence)) + ((tf.to_float((vocab_size - 1)) * low_confidence) * tf.log((low_confidence + 1e-20)))))
soft_targets = tf.one_hot(indices=labels, depth=vocab_size, on_value=confidence, off_value=low_confidence)
return (soft_targets, normalizing) | Applies label smoothing. See https://arxiv.org/abs/1512.00567.
Args:
labels: A 2d tensor with shape of [N, T].
vocab_size: The size of vocabulary.
epsilon: Smoothing rate.
Returns: The smoothed labels.
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
``` | njunmt/utils/misc.py | label_smoothing | zhaocq-nlp/NJUNMT-tf | 111 | python | def label_smoothing(labels, vocab_size, epsilon=0.1):
'Applies label smoothing. See https://arxiv.org/abs/1512.00567.\n\n Args:\n labels: A 2d tensor with shape of [N, T].\n vocab_size: The size of vocabulary.\n epsilon: Smoothing rate.\n\n Returns: The smoothed labels.\n\n For example,\n ```\n import tensorflow as tf\n inputs = tf.convert_to_tensor([[[0, 0, 1],\n [0, 1, 0],\n [1, 0, 0]],\n [[1, 0, 0],\n [1, 0, 0],\n [0, 1, 0]]], tf.float32)\n outputs = label_smoothing(inputs)\n with tf.Session() as sess:\n print(sess.run([outputs]))\n >>\n [array([[[ 0.03333334, 0.03333334, 0.93333334],\n [ 0.03333334, 0.93333334, 0.03333334],\n [ 0.93333334, 0.03333334, 0.03333334]],\n [[ 0.93333334, 0.03333334, 0.03333334],\n [ 0.93333334, 0.03333334, 0.03333334],\n [ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]\n ```\n '
confidence = (1.0 - epsilon)
low_confidence = (epsilon / tf.to_float((vocab_size - 1)))
normalizing = (- ((confidence * tf.log(confidence)) + ((tf.to_float((vocab_size - 1)) * low_confidence) * tf.log((low_confidence + 1e-20)))))
soft_targets = tf.one_hot(indices=labels, depth=vocab_size, on_value=confidence, off_value=low_confidence)
return (soft_targets, normalizing) | def label_smoothing(labels, vocab_size, epsilon=0.1):
'Applies label smoothing. See https://arxiv.org/abs/1512.00567.\n\n Args:\n labels: A 2d tensor with shape of [N, T].\n vocab_size: The size of vocabulary.\n epsilon: Smoothing rate.\n\n Returns: The smoothed labels.\n\n For example,\n ```\n import tensorflow as tf\n inputs = tf.convert_to_tensor([[[0, 0, 1],\n [0, 1, 0],\n [1, 0, 0]],\n [[1, 0, 0],\n [1, 0, 0],\n [0, 1, 0]]], tf.float32)\n outputs = label_smoothing(inputs)\n with tf.Session() as sess:\n print(sess.run([outputs]))\n >>\n [array([[[ 0.03333334, 0.03333334, 0.93333334],\n [ 0.03333334, 0.93333334, 0.03333334],\n [ 0.93333334, 0.03333334, 0.03333334]],\n [[ 0.93333334, 0.03333334, 0.03333334],\n [ 0.93333334, 0.03333334, 0.03333334],\n [ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]\n ```\n '
confidence = (1.0 - epsilon)
low_confidence = (epsilon / tf.to_float((vocab_size - 1)))
normalizing = (- ((confidence * tf.log(confidence)) + ((tf.to_float((vocab_size - 1)) * low_confidence) * tf.log((low_confidence + 1e-20)))))
soft_targets = tf.one_hot(indices=labels, depth=vocab_size, on_value=confidence, off_value=low_confidence)
return (soft_targets, normalizing)<|docstring|>Applies label smoothing. See https://arxiv.org/abs/1512.00567.
Args:
labels: A 2d tensor with shape of [N, T].
vocab_size: The size of vocabulary.
epsilon: Smoothing rate.
Returns: The smoothed labels.
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
```<|endoftext|> |
ab06d858575464b09c114ff4b2a50a0eaa8d85eb483f9db92c83824f2336deec | def get_model_top_scope_name(model_name, problem_name):
' Returns the top scope name of all models.\n\n Args:\n model_name: The model string.\n problem_name: The problem name.\n\n Returns: A str.\n '
if (model_name is None):
model_name = 'SequenceToSequence'
return (problem_name or model_name.split('.')[(- 1)]) | Returns the top scope name of all models.
Args:
model_name: The model string.
problem_name: The problem name.
Returns: A str. | njunmt/utils/misc.py | get_model_top_scope_name | zhaocq-nlp/NJUNMT-tf | 111 | python | def get_model_top_scope_name(model_name, problem_name):
' Returns the top scope name of all models.\n\n Args:\n model_name: The model string.\n problem_name: The problem name.\n\n Returns: A str.\n '
if (model_name is None):
model_name = 'SequenceToSequence'
return (problem_name or model_name.split('.')[(- 1)]) | def get_model_top_scope_name(model_name, problem_name):
' Returns the top scope name of all models.\n\n Args:\n model_name: The model string.\n problem_name: The problem name.\n\n Returns: A str.\n '
if (model_name is None):
model_name = 'SequenceToSequence'
return (problem_name or model_name.split('.')[(- 1)])<|docstring|>Returns the top scope name of all models.
Args:
model_name: The model string.
problem_name: The problem name.
Returns: A str.<|endoftext|> |
9c26acbb360b72b4568aa0a19ffe63e7ea9ba8f31811afcade9615b452c18d69 | def load_pretrain_model(model_name, pretrain_model_dir, problem_name):
' Loads pretrained model.\n\n Args:\n model_name: The name of the model.\n pretrain_model_dir: The pretrained model dir.\n problem_name: The problem name.\n\n Returns:\n A list of assign ops.\n '
from njunmt.utils.configurable import ModelConfigs
top_scope_name = get_model_top_scope_name(model_name, problem_name)
pt_model_configs = ModelConfigs.load(pretrain_model_dir)
pt_model_top_scope_name = get_model_top_scope_name(pt_model_configs['model'], pt_model_configs['problem_name'])
tf.logging.info('loading variables from {}'.format(pretrain_model_dir))
assign_op = []
for (var_name, _) in tf.contrib.framework.list_variables(pretrain_model_dir):
if var_name.startswith('OptimizeLoss'):
continue
if ((tf.GraphKeys.GLOBAL_STEP in var_name) or ('learning_rate' in var_name) or ('lr' in var_name)):
tf.logging.info('Pretrain: ignore {}'.format(var_name))
continue
tf.logging.info('Pretrain: reload {}'.format(var_name))
var = tf.contrib.framework.load_variable(pretrain_model_dir, var_name)
with tf.variable_scope(top_scope_name, reuse=True):
v = tf.get_variable(name=var_name[(len(pt_model_top_scope_name) + 1):], shape=var.shape, dtype=var.dtype)
assign_op.append(v.assign(var))
return assign_op | Loads pretrained model.
Args:
model_name: The name of the model.
pretrain_model_dir: The pretrained model dir.
problem_name: The problem name.
Returns:
A list of assign ops. | njunmt/utils/misc.py | load_pretrain_model | zhaocq-nlp/NJUNMT-tf | 111 | python | def load_pretrain_model(model_name, pretrain_model_dir, problem_name):
' Loads pretrained model.\n\n Args:\n model_name: The name of the model.\n pretrain_model_dir: The pretrained model dir.\n problem_name: The problem name.\n\n Returns:\n A list of assign ops.\n '
from njunmt.utils.configurable import ModelConfigs
top_scope_name = get_model_top_scope_name(model_name, problem_name)
pt_model_configs = ModelConfigs.load(pretrain_model_dir)
pt_model_top_scope_name = get_model_top_scope_name(pt_model_configs['model'], pt_model_configs['problem_name'])
tf.logging.info('loading variables from {}'.format(pretrain_model_dir))
assign_op = []
for (var_name, _) in tf.contrib.framework.list_variables(pretrain_model_dir):
if var_name.startswith('OptimizeLoss'):
continue
if ((tf.GraphKeys.GLOBAL_STEP in var_name) or ('learning_rate' in var_name) or ('lr' in var_name)):
tf.logging.info('Pretrain: ignore {}'.format(var_name))
continue
tf.logging.info('Pretrain: reload {}'.format(var_name))
var = tf.contrib.framework.load_variable(pretrain_model_dir, var_name)
with tf.variable_scope(top_scope_name, reuse=True):
v = tf.get_variable(name=var_name[(len(pt_model_top_scope_name) + 1):], shape=var.shape, dtype=var.dtype)
assign_op.append(v.assign(var))
return assign_op | def load_pretrain_model(model_name, pretrain_model_dir, problem_name):
' Loads pretrained model.\n\n Args:\n model_name: The name of the model.\n pretrain_model_dir: The pretrained model dir.\n problem_name: The problem name.\n\n Returns:\n A list of assign ops.\n '
from njunmt.utils.configurable import ModelConfigs
top_scope_name = get_model_top_scope_name(model_name, problem_name)
pt_model_configs = ModelConfigs.load(pretrain_model_dir)
pt_model_top_scope_name = get_model_top_scope_name(pt_model_configs['model'], pt_model_configs['problem_name'])
tf.logging.info('loading variables from {}'.format(pretrain_model_dir))
assign_op = []
for (var_name, _) in tf.contrib.framework.list_variables(pretrain_model_dir):
if var_name.startswith('OptimizeLoss'):
continue
if ((tf.GraphKeys.GLOBAL_STEP in var_name) or ('learning_rate' in var_name) or ('lr' in var_name)):
tf.logging.info('Pretrain: ignore {}'.format(var_name))
continue
tf.logging.info('Pretrain: reload {}'.format(var_name))
var = tf.contrib.framework.load_variable(pretrain_model_dir, var_name)
with tf.variable_scope(top_scope_name, reuse=True):
v = tf.get_variable(name=var_name[(len(pt_model_top_scope_name) + 1):], shape=var.shape, dtype=var.dtype)
assign_op.append(v.assign(var))
return assign_op<|docstring|>Loads pretrained model.
Args:
model_name: The name of the model.
pretrain_model_dir: The pretrained model dir.
problem_name: The problem name.
Returns:
A list of assign ops.<|endoftext|> |
9d9eb3e368ece137b6b619c02ffad5e4aeecc992221c9663103020727596c219 | def padding_batch_data(seqs_x, padding_x):
' Creates batch data tensor.\n\n Args:\n seqs_x: A list of word sequence ids. Each word sequence is also\n a list.\n padding_x: The symbol id to be added to empty position.\n\n Returns: A tuple `(seqs, seq_lengths)`, where `seqs` is a 2-d\n numpy.ndarray with shape [len(seqs_x), max_seq_len] and\n `seq_lengths` is a 1-d numpy.ndarray with shape [len(seqs_x), ].\n\n '
lengths_x = [len(s) for s in seqs_x]
max_len_x = numpy.max(lengths_x)
n_samples = len(seqs_x)
x = numpy.full([n_samples, max_len_x], padding_x, numpy.int32)
for (idx, s_x) in enumerate(seqs_x):
x[(idx, :lengths_x[idx])] = s_x
return (x, numpy.array(lengths_x, dtype=numpy.int32)) | Creates batch data tensor.
Args:
seqs_x: A list of word sequence ids. Each word sequence is also
a list.
padding_x: The symbol id to be added to empty position.
Returns: A tuple `(seqs, seq_lengths)`, where `seqs` is a 2-d
numpy.ndarray with shape [len(seqs_x), max_seq_len] and
`seq_lengths` is a 1-d numpy.ndarray with shape [len(seqs_x), ]. | njunmt/utils/misc.py | padding_batch_data | zhaocq-nlp/NJUNMT-tf | 111 | python | def padding_batch_data(seqs_x, padding_x):
' Creates batch data tensor.\n\n Args:\n seqs_x: A list of word sequence ids. Each word sequence is also\n a list.\n padding_x: The symbol id to be added to empty position.\n\n Returns: A tuple `(seqs, seq_lengths)`, where `seqs` is a 2-d\n numpy.ndarray with shape [len(seqs_x), max_seq_len] and\n `seq_lengths` is a 1-d numpy.ndarray with shape [len(seqs_x), ].\n\n '
lengths_x = [len(s) for s in seqs_x]
max_len_x = numpy.max(lengths_x)
n_samples = len(seqs_x)
x = numpy.full([n_samples, max_len_x], padding_x, numpy.int32)
for (idx, s_x) in enumerate(seqs_x):
x[(idx, :lengths_x[idx])] = s_x
return (x, numpy.array(lengths_x, dtype=numpy.int32)) | def padding_batch_data(seqs_x, padding_x):
' Creates batch data tensor.\n\n Args:\n seqs_x: A list of word sequence ids. Each word sequence is also\n a list.\n padding_x: The symbol id to be added to empty position.\n\n Returns: A tuple `(seqs, seq_lengths)`, where `seqs` is a 2-d\n numpy.ndarray with shape [len(seqs_x), max_seq_len] and\n `seq_lengths` is a 1-d numpy.ndarray with shape [len(seqs_x), ].\n\n '
lengths_x = [len(s) for s in seqs_x]
max_len_x = numpy.max(lengths_x)
n_samples = len(seqs_x)
x = numpy.full([n_samples, max_len_x], padding_x, numpy.int32)
for (idx, s_x) in enumerate(seqs_x):
x[(idx, :lengths_x[idx])] = s_x
return (x, numpy.array(lengths_x, dtype=numpy.int32))<|docstring|>Creates batch data tensor.
Args:
seqs_x: A list of word sequence ids. Each word sequence is also
a list.
padding_x: The symbol id to be added to empty position.
Returns: A tuple `(seqs, seq_lengths)`, where `seqs` is a 2-d
numpy.ndarray with shape [len(seqs_x), max_seq_len] and
`seq_lengths` is a 1-d numpy.ndarray with shape [len(seqs_x), ].<|endoftext|> |
3caa03f221b0a4959998b4614f3133adbfa4416786c1eb02c10f954a12581d1a | def add_dict_to_collection(collection_name, dict_):
' Adds a dictionary to a graph collection.\n\n Args:\n collection_name: The name of the collection to add the dictionary to.\n dict_: A dictionary of string keys to tensor values.\n '
key_collection = (collection_name + '_keys')
value_collection = (collection_name + '_values')
for (key, value) in dict_.items():
tf.add_to_collection(key_collection, key)
tf.add_to_collection(value_collection, value) | Adds a dictionary to a graph collection.
Args:
collection_name: The name of the collection to add the dictionary to.
dict_: A dictionary of string keys to tensor values. | njunmt/utils/misc.py | add_dict_to_collection | zhaocq-nlp/NJUNMT-tf | 111 | python | def add_dict_to_collection(collection_name, dict_):
' Adds a dictionary to a graph collection.\n\n Args:\n collection_name: The name of the collection to add the dictionary to.\n dict_: A dictionary of string keys to tensor values.\n '
key_collection = (collection_name + '_keys')
value_collection = (collection_name + '_values')
for (key, value) in dict_.items():
tf.add_to_collection(key_collection, key)
tf.add_to_collection(value_collection, value) | def add_dict_to_collection(collection_name, dict_):
' Adds a dictionary to a graph collection.\n\n Args:\n collection_name: The name of the collection to add the dictionary to.\n dict_: A dictionary of string keys to tensor values.\n '
key_collection = (collection_name + '_keys')
value_collection = (collection_name + '_values')
for (key, value) in dict_.items():
tf.add_to_collection(key_collection, key)
tf.add_to_collection(value_collection, value)<|docstring|>Adds a dictionary to a graph collection.
Args:
collection_name: The name of the collection to add the dictionary to.
dict_: A dictionary of string keys to tensor values.<|endoftext|> |
9c3667ba869aefba428303f5f7ae29fd7497a9385db1e6e1a1daa1b0d67c60fd | def get_dict_from_collection(collection_name):
' Gets a dictionary from a graph collection.\n\n Args:\n collection_name: A collection name to read a dictionary from.\n\n Returns: A dictionary with string keys and tensor values\n '
key_collection = (collection_name + '_keys')
value_collection = (collection_name + '_values')
keys = tf.get_collection(key_collection)
values = tf.get_collection(value_collection)
return dict(zip(keys, values)) | Gets a dictionary from a graph collection.
Args:
collection_name: A collection name to read a dictionary from.
Returns: A dictionary with string keys and tensor values | njunmt/utils/misc.py | get_dict_from_collection | zhaocq-nlp/NJUNMT-tf | 111 | python | def get_dict_from_collection(collection_name):
' Gets a dictionary from a graph collection.\n\n Args:\n collection_name: A collection name to read a dictionary from.\n\n Returns: A dictionary with string keys and tensor values\n '
key_collection = (collection_name + '_keys')
value_collection = (collection_name + '_values')
keys = tf.get_collection(key_collection)
values = tf.get_collection(value_collection)
return dict(zip(keys, values)) | def get_dict_from_collection(collection_name):
' Gets a dictionary from a graph collection.\n\n Args:\n collection_name: A collection name to read a dictionary from.\n\n Returns: A dictionary with string keys and tensor values\n '
key_collection = (collection_name + '_keys')
value_collection = (collection_name + '_values')
keys = tf.get_collection(key_collection)
values = tf.get_collection(value_collection)
return dict(zip(keys, values))<|docstring|>Gets a dictionary from a graph collection.
Args:
collection_name: A collection name to read a dictionary from.
Returns: A dictionary with string keys and tensor values<|endoftext|> |
fa9e91b718d68b53473f822922946ac36b1761a64599ed52987d8b4a60db0a7d | def deprecated(obj):
'This is a decorator which can be used to mark functions or classes\n as deprecated. It will result in a warning being emmitted\n when the function/class is used.'
def new_obj(*args, **kwargs):
tf.logging.info(('Call to deprecated function/class %s.' % obj.__name__))
tf.logging.warn(('Call to deprecated function/class %s.' % obj.__name__))
return obj(*args, **kwargs)
return new_obj | This is a decorator which can be used to mark functions or classes
as deprecated. It will result in a warning being emmitted
when the function/class is used. | njunmt/utils/misc.py | deprecated | zhaocq-nlp/NJUNMT-tf | 111 | python | def deprecated(obj):
'This is a decorator which can be used to mark functions or classes\n as deprecated. It will result in a warning being emmitted\n when the function/class is used.'
def new_obj(*args, **kwargs):
tf.logging.info(('Call to deprecated function/class %s.' % obj.__name__))
tf.logging.warn(('Call to deprecated function/class %s.' % obj.__name__))
return obj(*args, **kwargs)
return new_obj | def deprecated(obj):
'This is a decorator which can be used to mark functions or classes\n as deprecated. It will result in a warning being emmitted\n when the function/class is used.'
def new_obj(*args, **kwargs):
tf.logging.info(('Call to deprecated function/class %s.' % obj.__name__))
tf.logging.warn(('Call to deprecated function/class %s.' % obj.__name__))
return obj(*args, **kwargs)
return new_obj<|docstring|>This is a decorator which can be used to mark functions or classes
as deprecated. It will result in a warning being emmitted
when the function/class is used.<|endoftext|> |
d5f4edb4d2d77f0e35702571ac633bedb4d7bbf317b378fc1e6772ebafdceae9 | def shuffle_data(from_binding, to_binding):
' Calls njunmt/tools/shuffle.py to shuffle data.\n\n Args:\n from_binding: The original data files with same number of lines.\n to_binding: The files to save to.\n '
cmd = 'python {script} {from_} {to_}'.format(script='njunmt/tools/shuffle.py', from_=','.join(from_binding), to_=','.join(to_binding))
os.system(cmd) | Calls njunmt/tools/shuffle.py to shuffle data.
Args:
from_binding: The original data files with same number of lines.
to_binding: The files to save to. | njunmt/utils/misc.py | shuffle_data | zhaocq-nlp/NJUNMT-tf | 111 | python | def shuffle_data(from_binding, to_binding):
' Calls njunmt/tools/shuffle.py to shuffle data.\n\n Args:\n from_binding: The original data files with same number of lines.\n to_binding: The files to save to.\n '
cmd = 'python {script} {from_} {to_}'.format(script='njunmt/tools/shuffle.py', from_=','.join(from_binding), to_=','.join(to_binding))
os.system(cmd) | def shuffle_data(from_binding, to_binding):
' Calls njunmt/tools/shuffle.py to shuffle data.\n\n Args:\n from_binding: The original data files with same number of lines.\n to_binding: The files to save to.\n '
cmd = 'python {script} {from_} {to_}'.format(script='njunmt/tools/shuffle.py', from_=','.join(from_binding), to_=','.join(to_binding))
os.system(cmd)<|docstring|>Calls njunmt/tools/shuffle.py to shuffle data.
Args:
from_binding: The original data files with same number of lines.
to_binding: The files to save to.<|endoftext|> |
090f687ed68ca9792f218b2635943fd91ff995894f96351f9291b5f39e164004 | def access_multiple_files(name):
' Gets the list of files.\n\n Args:\n name: A string, the prefix of the files.\n\n Returns: A list or None.\n '
assert name
ret = []
if gfile.Exists(name):
ret.append(name)
else:
idx = 0
while gfile.Exists((name + str(idx))):
ret.append((name + str(idx)))
idx += 1
assert (len(ret) > 0), 'Fail to access file {} or {}0...'.format(name, name)
return ret | Gets the list of files.
Args:
name: A string, the prefix of the files.
Returns: A list or None. | njunmt/utils/misc.py | access_multiple_files | zhaocq-nlp/NJUNMT-tf | 111 | python | def access_multiple_files(name):
' Gets the list of files.\n\n Args:\n name: A string, the prefix of the files.\n\n Returns: A list or None.\n '
assert name
ret = []
if gfile.Exists(name):
ret.append(name)
else:
idx = 0
while gfile.Exists((name + str(idx))):
ret.append((name + str(idx)))
idx += 1
assert (len(ret) > 0), 'Fail to access file {} or {}0...'.format(name, name)
return ret | def access_multiple_files(name):
' Gets the list of files.\n\n Args:\n name: A string, the prefix of the files.\n\n Returns: A list or None.\n '
assert name
ret = []
if gfile.Exists(name):
ret.append(name)
else:
idx = 0
while gfile.Exists((name + str(idx))):
ret.append((name + str(idx)))
idx += 1
assert (len(ret) > 0), 'Fail to access file {} or {}0...'.format(name, name)
return ret<|docstring|>Gets the list of files.
Args:
name: A string, the prefix of the files.
Returns: A list or None.<|endoftext|> |
ecdb58d506b9d8944a37a77945b173c7753e5540f9057bd32570337f0f341618 | def inspect_varname_prefix(var_name):
' Returns the top variable scope name. '
keywords = '/input_symbol_modality'
if (keywords in var_name):
return var_name[:var_name.index(keywords)]
keywords = '/symbol_modality_'
if (keywords in var_name):
return var_name[:var_name.index(keywords)]
return None | Returns the top variable scope name. | njunmt/utils/misc.py | inspect_varname_prefix | zhaocq-nlp/NJUNMT-tf | 111 | python | def inspect_varname_prefix(var_name):
' '
keywords = '/input_symbol_modality'
if (keywords in var_name):
return var_name[:var_name.index(keywords)]
keywords = '/symbol_modality_'
if (keywords in var_name):
return var_name[:var_name.index(keywords)]
return None | def inspect_varname_prefix(var_name):
' '
keywords = '/input_symbol_modality'
if (keywords in var_name):
return var_name[:var_name.index(keywords)]
keywords = '/symbol_modality_'
if (keywords in var_name):
return var_name[:var_name.index(keywords)]
return None<|docstring|>Returns the top variable scope name.<|endoftext|> |
da47b7a35805b224acd53e1f1d4eb1bf8ff4f46e816e14f17bf6f4fcbdf8f70b | def set_fflayers_layer_norm(layer_norm=False):
' Set laye norm flag. '
from njunmt.layers import common_layers
common_layers.FFLAYERS_LAYER_NORM = layer_norm | Set laye norm flag. | njunmt/utils/misc.py | set_fflayers_layer_norm | zhaocq-nlp/NJUNMT-tf | 111 | python | def set_fflayers_layer_norm(layer_norm=False):
' '
from njunmt.layers import common_layers
common_layers.FFLAYERS_LAYER_NORM = layer_norm | def set_fflayers_layer_norm(layer_norm=False):
' '
from njunmt.layers import common_layers
common_layers.FFLAYERS_LAYER_NORM = layer_norm<|docstring|>Set laye norm flag.<|endoftext|> |
881174104245b56513822a24bc16bb7cce70f9129528f8baf3b8caaa06b93d1e | def get_saver_or_default(**kwargs):
' Returns the saver from SAVERS collection, or creates a default one.\n\n This method is used by other members of the training module, such as\n `CheckpointSaverHook`.\n\n This method is modified from tensorflow.python.training.saver._get_saver_or_default.\n\n Args:\n kwargs: Parameters passed to tf.train.Saver.\n\n Returns: `Saver`.\n\n Raises:\n RuntimeError: If the SAVERS collection already has more than one items.\n '
collection_key = tf.GraphKeys.SAVERS
savers = tf.get_collection(collection_key)
if savers:
if (len(savers) > 1):
raise RuntimeError('More than one item in collection {}. Please indicate which one to use by passing it to the constructor.'.format(collection_key))
return savers[0]
saver = tf.train.Saver(sharded=True, allow_empty=True, **kwargs)
if (saver is not None):
tf.add_to_collection(collection_key, saver)
return saver | Returns the saver from SAVERS collection, or creates a default one.
This method is used by other members of the training module, such as
`CheckpointSaverHook`.
This method is modified from tensorflow.python.training.saver._get_saver_or_default.
Args:
kwargs: Parameters passed to tf.train.Saver.
Returns: `Saver`.
Raises:
RuntimeError: If the SAVERS collection already has more than one items. | njunmt/utils/misc.py | get_saver_or_default | zhaocq-nlp/NJUNMT-tf | 111 | python | def get_saver_or_default(**kwargs):
' Returns the saver from SAVERS collection, or creates a default one.\n\n This method is used by other members of the training module, such as\n `CheckpointSaverHook`.\n\n This method is modified from tensorflow.python.training.saver._get_saver_or_default.\n\n Args:\n kwargs: Parameters passed to tf.train.Saver.\n\n Returns: `Saver`.\n\n Raises:\n RuntimeError: If the SAVERS collection already has more than one items.\n '
collection_key = tf.GraphKeys.SAVERS
savers = tf.get_collection(collection_key)
if savers:
if (len(savers) > 1):
raise RuntimeError('More than one item in collection {}. Please indicate which one to use by passing it to the constructor.'.format(collection_key))
return savers[0]
saver = tf.train.Saver(sharded=True, allow_empty=True, **kwargs)
if (saver is not None):
tf.add_to_collection(collection_key, saver)
return saver | def get_saver_or_default(**kwargs):
' Returns the saver from SAVERS collection, or creates a default one.\n\n This method is used by other members of the training module, such as\n `CheckpointSaverHook`.\n\n This method is modified from tensorflow.python.training.saver._get_saver_or_default.\n\n Args:\n kwargs: Parameters passed to tf.train.Saver.\n\n Returns: `Saver`.\n\n Raises:\n RuntimeError: If the SAVERS collection already has more than one items.\n '
collection_key = tf.GraphKeys.SAVERS
savers = tf.get_collection(collection_key)
if savers:
if (len(savers) > 1):
raise RuntimeError('More than one item in collection {}. Please indicate which one to use by passing it to the constructor.'.format(collection_key))
return savers[0]
saver = tf.train.Saver(sharded=True, allow_empty=True, **kwargs)
if (saver is not None):
tf.add_to_collection(collection_key, saver)
return saver<|docstring|>Returns the saver from SAVERS collection, or creates a default one.
This method is used by other members of the training module, such as
`CheckpointSaverHook`.
This method is modified from tensorflow.python.training.saver._get_saver_or_default.
Args:
kwargs: Parameters passed to tf.train.Saver.
Returns: `Saver`.
Raises:
RuntimeError: If the SAVERS collection already has more than one items.<|endoftext|> |
38b3aa433efae83d82ab5e98946698650b3bd9a09efdbdb567298e05d5dbf2d6 | def escape(s, level):
'Bash-escape the string `s`, `level` times.'
if (not level):
return s
out = ''
for c in s:
if (c in "\\$'<[]"):
out += f'\{c}'
else:
out += c
return escape(out, (level - 1)) | Bash-escape the string `s`, `level` times. | bashell.py | escape | benjaminjkraft/junk | 4 | python | def escape(s, level):
if (not level):
return s
out =
for c in s:
if (c in "\\$'<[]"):
out += f'\{c}'
else:
out += c
return escape(out, (level - 1)) | def escape(s, level):
if (not level):
return s
out =
for c in s:
if (c in "\\$'<[]"):
out += f'\{c}'
else:
out += c
return escape(out, (level - 1))<|docstring|>Bash-escape the string `s`, `level` times.<|endoftext|> |
229153559ae856ef56cdaebd216563271dabd051bdb713abd6df9b0004ff42b4 | def comp(s, level=0):
'Compiles a string to use the given characters.\n \n The return value will take `level+1` bash-expansions to collapse to the\n given string.\n\n The cases in this function are clearest if read in the following order:\n 0\n 1, 2, 4\n 53\n else\n 3, 6, 7\n\n Note that each case is a mix of recursive calls to comp(..., level-1), and\n calls to escape(..., level). In particular, everything that needs to be\n ignored by all previous levels we escape; while things that the previous\n level needs to expand for us go through the recursive call to comp. We\n also sometimes call comp(0), for things we know can be expanded without\n going any deeper.\n '
if (level < 0):
raise Exception('We have to go deeper')
out = ''
while s:
c = s[0]
if (s[:2] == '53'):
out += escape(f"$[{comp('0')}$[{comp('0101')}]]", level)
s = s[2:]
continue
elif (c == '0'):
out += escape('$[$$<$$]', level)
elif (c == '1'):
out += escape(f"$[{comp('0')}<$$]", level)
elif (c == '2'):
out += escape(f"$[{comp('1')}<<{comp('1')}]", level)
elif (c == '3'):
out += ((escape(f"$[{comp('1')}", level) + comp('+', (level - 1))) + escape(f"{comp('2')}]", level))
elif (c == '4'):
out += escape(f"$[{comp('2')}<<{comp('1')}]", level)
elif (c == '5'):
out += ((escape(f"$[{comp('1')}", level) + comp('+', (level - 1))) + escape(f"{comp('4')}]", level))
elif (c == '6'):
out += ((escape(f"$[{comp('2')}", level) + comp('+', (level - 1))) + escape(f"{comp('4')}]", level))
elif (c == '7'):
out += ((((escape(f"$[{comp('1')}", level) + comp('+', (level - 1))) + escape(f"{comp('2')}", level)) + comp('+', (level - 1))) + escape(f"{comp('4')}]", level))
elif (c in "\\$'<[]_"):
out += escape(c, level)
else:
out += ((escape("$'\\", level) + comp(f'{ord(c):03o}', (level - 1))) + escape("'", level))
s = s[1:]
return out | Compiles a string to use the given characters.
The return value will take `level+1` bash-expansions to collapse to the
given string.
The cases in this function are clearest if read in the following order:
0
1, 2, 4
53
else
3, 6, 7
Note that each case is a mix of recursive calls to comp(..., level-1), and
calls to escape(..., level). In particular, everything that needs to be
ignored by all previous levels we escape; while things that the previous
level needs to expand for us go through the recursive call to comp. We
also sometimes call comp(0), for things we know can be expanded without
going any deeper. | bashell.py | comp | benjaminjkraft/junk | 4 | python | def comp(s, level=0):
'Compiles a string to use the given characters.\n \n The return value will take `level+1` bash-expansions to collapse to the\n given string.\n\n The cases in this function are clearest if read in the following order:\n 0\n 1, 2, 4\n 53\n else\n 3, 6, 7\n\n Note that each case is a mix of recursive calls to comp(..., level-1), and\n calls to escape(..., level). In particular, everything that needs to be\n ignored by all previous levels we escape; while things that the previous\n level needs to expand for us go through the recursive call to comp. We\n also sometimes call comp(0), for things we know can be expanded without\n going any deeper.\n '
if (level < 0):
raise Exception('We have to go deeper')
out =
while s:
c = s[0]
if (s[:2] == '53'):
out += escape(f"$[{comp('0')}$[{comp('0101')}]]", level)
s = s[2:]
continue
elif (c == '0'):
out += escape('$[$$<$$]', level)
elif (c == '1'):
out += escape(f"$[{comp('0')}<$$]", level)
elif (c == '2'):
out += escape(f"$[{comp('1')}<<{comp('1')}]", level)
elif (c == '3'):
out += ((escape(f"$[{comp('1')}", level) + comp('+', (level - 1))) + escape(f"{comp('2')}]", level))
elif (c == '4'):
out += escape(f"$[{comp('2')}<<{comp('1')}]", level)
elif (c == '5'):
out += ((escape(f"$[{comp('1')}", level) + comp('+', (level - 1))) + escape(f"{comp('4')}]", level))
elif (c == '6'):
out += ((escape(f"$[{comp('2')}", level) + comp('+', (level - 1))) + escape(f"{comp('4')}]", level))
elif (c == '7'):
out += ((((escape(f"$[{comp('1')}", level) + comp('+', (level - 1))) + escape(f"{comp('2')}", level)) + comp('+', (level - 1))) + escape(f"{comp('4')}]", level))
elif (c in "\\$'<[]_"):
out += escape(c, level)
else:
out += ((escape("$'\\", level) + comp(f'{ord(c):03o}', (level - 1))) + escape("'", level))
s = s[1:]
return out | def comp(s, level=0):
'Compiles a string to use the given characters.\n \n The return value will take `level+1` bash-expansions to collapse to the\n given string.\n\n The cases in this function are clearest if read in the following order:\n 0\n 1, 2, 4\n 53\n else\n 3, 6, 7\n\n Note that each case is a mix of recursive calls to comp(..., level-1), and\n calls to escape(..., level). In particular, everything that needs to be\n ignored by all previous levels we escape; while things that the previous\n level needs to expand for us go through the recursive call to comp. We\n also sometimes call comp(0), for things we know can be expanded without\n going any deeper.\n '
if (level < 0):
raise Exception('We have to go deeper')
out =
while s:
c = s[0]
if (s[:2] == '53'):
out += escape(f"$[{comp('0')}$[{comp('0101')}]]", level)
s = s[2:]
continue
elif (c == '0'):
out += escape('$[$$<$$]', level)
elif (c == '1'):
out += escape(f"$[{comp('0')}<$$]", level)
elif (c == '2'):
out += escape(f"$[{comp('1')}<<{comp('1')}]", level)
elif (c == '3'):
out += ((escape(f"$[{comp('1')}", level) + comp('+', (level - 1))) + escape(f"{comp('2')}]", level))
elif (c == '4'):
out += escape(f"$[{comp('2')}<<{comp('1')}]", level)
elif (c == '5'):
out += ((escape(f"$[{comp('1')}", level) + comp('+', (level - 1))) + escape(f"{comp('4')}]", level))
elif (c == '6'):
out += ((escape(f"$[{comp('2')}", level) + comp('+', (level - 1))) + escape(f"{comp('4')}]", level))
elif (c == '7'):
out += ((((escape(f"$[{comp('1')}", level) + comp('+', (level - 1))) + escape(f"{comp('2')}", level)) + comp('+', (level - 1))) + escape(f"{comp('4')}]", level))
elif (c in "\\$'<[]_"):
out += escape(c, level)
else:
out += ((escape("$'\\", level) + comp(f'{ord(c):03o}', (level - 1))) + escape("'", level))
s = s[1:]
return out<|docstring|>Compiles a string to use the given characters.
The return value will take `level+1` bash-expansions to collapse to the
given string.
The cases in this function are clearest if read in the following order:
0
1, 2, 4
53
else
3, 6, 7
Note that each case is a mix of recursive calls to comp(..., level-1), and
calls to escape(..., level). In particular, everything that needs to be
ignored by all previous levels we escape; while things that the previous
level needs to expand for us go through the recursive call to comp. We
also sometimes call comp(0), for things we know can be expanded without
going any deeper.<|endoftext|> |
2119ec33bbde59404bf160bfee77f32afa0fbc5047031434757d77e540f19507 | def __init__(self, currency_code=None, client_id=None, end_date=None, currency_conversion=None, start_date=None, transaction_status_scope=None, aggregation_account_ids=None, _configuration=None):
'FeeAnalysisRequest - a model defined in Swagger'
if (_configuration is None):
_configuration = Configuration()
self._configuration = _configuration
self._currency_code = None
self._client_id = None
self._end_date = None
self._currency_conversion = None
self._start_date = None
self._transaction_status_scope = None
self._aggregation_account_ids = None
self.discriminator = None
if (currency_code is not None):
self.currency_code = currency_code
if (client_id is not None):
self.client_id = client_id
if (end_date is not None):
self.end_date = end_date
if (currency_conversion is not None):
self.currency_conversion = currency_conversion
if (start_date is not None):
self.start_date = start_date
if (transaction_status_scope is not None):
self.transaction_status_scope = transaction_status_scope
if (aggregation_account_ids is not None):
self.aggregation_account_ids = aggregation_account_ids | FeeAnalysisRequest - a model defined in Swagger | atom/proton/python/proton_api/models/fee_analysis_request.py | __init__ | AbhiGupta03/SDK | 11 | python | def __init__(self, currency_code=None, client_id=None, end_date=None, currency_conversion=None, start_date=None, transaction_status_scope=None, aggregation_account_ids=None, _configuration=None):
if (_configuration is None):
_configuration = Configuration()
self._configuration = _configuration
self._currency_code = None
self._client_id = None
self._end_date = None
self._currency_conversion = None
self._start_date = None
self._transaction_status_scope = None
self._aggregation_account_ids = None
self.discriminator = None
if (currency_code is not None):
self.currency_code = currency_code
if (client_id is not None):
self.client_id = client_id
if (end_date is not None):
self.end_date = end_date
if (currency_conversion is not None):
self.currency_conversion = currency_conversion
if (start_date is not None):
self.start_date = start_date
if (transaction_status_scope is not None):
self.transaction_status_scope = transaction_status_scope
if (aggregation_account_ids is not None):
self.aggregation_account_ids = aggregation_account_ids | def __init__(self, currency_code=None, client_id=None, end_date=None, currency_conversion=None, start_date=None, transaction_status_scope=None, aggregation_account_ids=None, _configuration=None):
if (_configuration is None):
_configuration = Configuration()
self._configuration = _configuration
self._currency_code = None
self._client_id = None
self._end_date = None
self._currency_conversion = None
self._start_date = None
self._transaction_status_scope = None
self._aggregation_account_ids = None
self.discriminator = None
if (currency_code is not None):
self.currency_code = currency_code
if (client_id is not None):
self.client_id = client_id
if (end_date is not None):
self.end_date = end_date
if (currency_conversion is not None):
self.currency_conversion = currency_conversion
if (start_date is not None):
self.start_date = start_date
if (transaction_status_scope is not None):
self.transaction_status_scope = transaction_status_scope
if (aggregation_account_ids is not None):
self.aggregation_account_ids = aggregation_account_ids<|docstring|>FeeAnalysisRequest - a model defined in Swagger<|endoftext|> |
5b1609aab4fbb31e0bf50a796bc926267b56fadf3b544a55dc59572ff60c68f1 | @property
def currency_code(self):
'Gets the currency_code of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The currency_code of this FeeAnalysisRequest. # noqa: E501\n :rtype: str\n '
return self._currency_code | Gets the currency_code of this FeeAnalysisRequest. # noqa: E501
:return: The currency_code of this FeeAnalysisRequest. # noqa: E501
:rtype: str | atom/proton/python/proton_api/models/fee_analysis_request.py | currency_code | AbhiGupta03/SDK | 11 | python | @property
def currency_code(self):
'Gets the currency_code of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The currency_code of this FeeAnalysisRequest. # noqa: E501\n :rtype: str\n '
return self._currency_code | @property
def currency_code(self):
'Gets the currency_code of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The currency_code of this FeeAnalysisRequest. # noqa: E501\n :rtype: str\n '
return self._currency_code<|docstring|>Gets the currency_code of this FeeAnalysisRequest. # noqa: E501
:return: The currency_code of this FeeAnalysisRequest. # noqa: E501
:rtype: str<|endoftext|> |
9a64991f770c4c1e6bc67ec799709cce8df3f8b4673e46c15de8049a88583f7e | @currency_code.setter
def currency_code(self, currency_code):
'Sets the currency_code of this FeeAnalysisRequest.\n\n\n :param currency_code: The currency_code of this FeeAnalysisRequest. # noqa: E501\n :type: str\n '
self._currency_code = currency_code | Sets the currency_code of this FeeAnalysisRequest.
:param currency_code: The currency_code of this FeeAnalysisRequest. # noqa: E501
:type: str | atom/proton/python/proton_api/models/fee_analysis_request.py | currency_code | AbhiGupta03/SDK | 11 | python | @currency_code.setter
def currency_code(self, currency_code):
'Sets the currency_code of this FeeAnalysisRequest.\n\n\n :param currency_code: The currency_code of this FeeAnalysisRequest. # noqa: E501\n :type: str\n '
self._currency_code = currency_code | @currency_code.setter
def currency_code(self, currency_code):
'Sets the currency_code of this FeeAnalysisRequest.\n\n\n :param currency_code: The currency_code of this FeeAnalysisRequest. # noqa: E501\n :type: str\n '
self._currency_code = currency_code<|docstring|>Sets the currency_code of this FeeAnalysisRequest.
:param currency_code: The currency_code of this FeeAnalysisRequest. # noqa: E501
:type: str<|endoftext|> |
7ceed830c135aa14eee13e4fda49696d9bdadde741288c681bcdb80cc0f69146 | @property
def client_id(self):
'Gets the client_id of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The client_id of this FeeAnalysisRequest. # noqa: E501\n :rtype: str\n '
return self._client_id | Gets the client_id of this FeeAnalysisRequest. # noqa: E501
:return: The client_id of this FeeAnalysisRequest. # noqa: E501
:rtype: str | atom/proton/python/proton_api/models/fee_analysis_request.py | client_id | AbhiGupta03/SDK | 11 | python | @property
def client_id(self):
'Gets the client_id of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The client_id of this FeeAnalysisRequest. # noqa: E501\n :rtype: str\n '
return self._client_id | @property
def client_id(self):
'Gets the client_id of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The client_id of this FeeAnalysisRequest. # noqa: E501\n :rtype: str\n '
return self._client_id<|docstring|>Gets the client_id of this FeeAnalysisRequest. # noqa: E501
:return: The client_id of this FeeAnalysisRequest. # noqa: E501
:rtype: str<|endoftext|> |
3b68abb551dc55161b88fcfa9b3106aec0da17b2c3089bbf8d490761a11357fe | @client_id.setter
def client_id(self, client_id):
'Sets the client_id of this FeeAnalysisRequest.\n\n\n :param client_id: The client_id of this FeeAnalysisRequest. # noqa: E501\n :type: str\n '
self._client_id = client_id | Sets the client_id of this FeeAnalysisRequest.
:param client_id: The client_id of this FeeAnalysisRequest. # noqa: E501
:type: str | atom/proton/python/proton_api/models/fee_analysis_request.py | client_id | AbhiGupta03/SDK | 11 | python | @client_id.setter
def client_id(self, client_id):
'Sets the client_id of this FeeAnalysisRequest.\n\n\n :param client_id: The client_id of this FeeAnalysisRequest. # noqa: E501\n :type: str\n '
self._client_id = client_id | @client_id.setter
def client_id(self, client_id):
'Sets the client_id of this FeeAnalysisRequest.\n\n\n :param client_id: The client_id of this FeeAnalysisRequest. # noqa: E501\n :type: str\n '
self._client_id = client_id<|docstring|>Sets the client_id of this FeeAnalysisRequest.
:param client_id: The client_id of this FeeAnalysisRequest. # noqa: E501
:type: str<|endoftext|> |
90391f2615aa544a33cf96c80d6afaf5d288068e9acbe23095ff5512b45e6c34 | @property
def end_date(self):
'Gets the end_date of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The end_date of this FeeAnalysisRequest. # noqa: E501\n :rtype: date\n '
return self._end_date | Gets the end_date of this FeeAnalysisRequest. # noqa: E501
:return: The end_date of this FeeAnalysisRequest. # noqa: E501
:rtype: date | atom/proton/python/proton_api/models/fee_analysis_request.py | end_date | AbhiGupta03/SDK | 11 | python | @property
def end_date(self):
'Gets the end_date of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The end_date of this FeeAnalysisRequest. # noqa: E501\n :rtype: date\n '
return self._end_date | @property
def end_date(self):
'Gets the end_date of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The end_date of this FeeAnalysisRequest. # noqa: E501\n :rtype: date\n '
return self._end_date<|docstring|>Gets the end_date of this FeeAnalysisRequest. # noqa: E501
:return: The end_date of this FeeAnalysisRequest. # noqa: E501
:rtype: date<|endoftext|> |
34f4ae9f2a3bf3dd874cd5f95239b52460f5270c37ce0cfd23f934f14fb80d2a | @end_date.setter
def end_date(self, end_date):
'Sets the end_date of this FeeAnalysisRequest.\n\n\n :param end_date: The end_date of this FeeAnalysisRequest. # noqa: E501\n :type: date\n '
self._end_date = end_date | Sets the end_date of this FeeAnalysisRequest.
:param end_date: The end_date of this FeeAnalysisRequest. # noqa: E501
:type: date | atom/proton/python/proton_api/models/fee_analysis_request.py | end_date | AbhiGupta03/SDK | 11 | python | @end_date.setter
def end_date(self, end_date):
'Sets the end_date of this FeeAnalysisRequest.\n\n\n :param end_date: The end_date of this FeeAnalysisRequest. # noqa: E501\n :type: date\n '
self._end_date = end_date | @end_date.setter
def end_date(self, end_date):
'Sets the end_date of this FeeAnalysisRequest.\n\n\n :param end_date: The end_date of this FeeAnalysisRequest. # noqa: E501\n :type: date\n '
self._end_date = end_date<|docstring|>Sets the end_date of this FeeAnalysisRequest.
:param end_date: The end_date of this FeeAnalysisRequest. # noqa: E501
:type: date<|endoftext|> |
87181bf1848c42975e9d1cbdbc37d7509ddf8021b0bbb5614712780522fb0d28 | @property
def currency_conversion(self):
'Gets the currency_conversion of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The currency_conversion of this FeeAnalysisRequest. # noqa: E501\n :rtype: str\n '
return self._currency_conversion | Gets the currency_conversion of this FeeAnalysisRequest. # noqa: E501
:return: The currency_conversion of this FeeAnalysisRequest. # noqa: E501
:rtype: str | atom/proton/python/proton_api/models/fee_analysis_request.py | currency_conversion | AbhiGupta03/SDK | 11 | python | @property
def currency_conversion(self):
'Gets the currency_conversion of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The currency_conversion of this FeeAnalysisRequest. # noqa: E501\n :rtype: str\n '
return self._currency_conversion | @property
def currency_conversion(self):
'Gets the currency_conversion of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The currency_conversion of this FeeAnalysisRequest. # noqa: E501\n :rtype: str\n '
return self._currency_conversion<|docstring|>Gets the currency_conversion of this FeeAnalysisRequest. # noqa: E501
:return: The currency_conversion of this FeeAnalysisRequest. # noqa: E501
:rtype: str<|endoftext|> |
4b24013604548c1b83c6647d801afb6cb97f57fb06f02f0773c385b40575a439 | @currency_conversion.setter
def currency_conversion(self, currency_conversion):
'Sets the currency_conversion of this FeeAnalysisRequest.\n\n\n :param currency_conversion: The currency_conversion of this FeeAnalysisRequest. # noqa: E501\n :type: str\n '
self._currency_conversion = currency_conversion | Sets the currency_conversion of this FeeAnalysisRequest.
:param currency_conversion: The currency_conversion of this FeeAnalysisRequest. # noqa: E501
:type: str | atom/proton/python/proton_api/models/fee_analysis_request.py | currency_conversion | AbhiGupta03/SDK | 11 | python | @currency_conversion.setter
def currency_conversion(self, currency_conversion):
'Sets the currency_conversion of this FeeAnalysisRequest.\n\n\n :param currency_conversion: The currency_conversion of this FeeAnalysisRequest. # noqa: E501\n :type: str\n '
self._currency_conversion = currency_conversion | @currency_conversion.setter
def currency_conversion(self, currency_conversion):
'Sets the currency_conversion of this FeeAnalysisRequest.\n\n\n :param currency_conversion: The currency_conversion of this FeeAnalysisRequest. # noqa: E501\n :type: str\n '
self._currency_conversion = currency_conversion<|docstring|>Sets the currency_conversion of this FeeAnalysisRequest.
:param currency_conversion: The currency_conversion of this FeeAnalysisRequest. # noqa: E501
:type: str<|endoftext|> |
27e45ea1ff09b070df9543cf595e213bbdbf8712b7d5e6238da6cf980cf4640e | @property
def start_date(self):
'Gets the start_date of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The start_date of this FeeAnalysisRequest. # noqa: E501\n :rtype: date\n '
return self._start_date | Gets the start_date of this FeeAnalysisRequest. # noqa: E501
:return: The start_date of this FeeAnalysisRequest. # noqa: E501
:rtype: date | atom/proton/python/proton_api/models/fee_analysis_request.py | start_date | AbhiGupta03/SDK | 11 | python | @property
def start_date(self):
'Gets the start_date of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The start_date of this FeeAnalysisRequest. # noqa: E501\n :rtype: date\n '
return self._start_date | @property
def start_date(self):
'Gets the start_date of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The start_date of this FeeAnalysisRequest. # noqa: E501\n :rtype: date\n '
return self._start_date<|docstring|>Gets the start_date of this FeeAnalysisRequest. # noqa: E501
:return: The start_date of this FeeAnalysisRequest. # noqa: E501
:rtype: date<|endoftext|> |
53253cbf432069d288cb01ad782f71f6e23bc363e72a598787070a47801292b1 | @start_date.setter
def start_date(self, start_date):
'Sets the start_date of this FeeAnalysisRequest.\n\n\n :param start_date: The start_date of this FeeAnalysisRequest. # noqa: E501\n :type: date\n '
self._start_date = start_date | Sets the start_date of this FeeAnalysisRequest.
:param start_date: The start_date of this FeeAnalysisRequest. # noqa: E501
:type: date | atom/proton/python/proton_api/models/fee_analysis_request.py | start_date | AbhiGupta03/SDK | 11 | python | @start_date.setter
def start_date(self, start_date):
'Sets the start_date of this FeeAnalysisRequest.\n\n\n :param start_date: The start_date of this FeeAnalysisRequest. # noqa: E501\n :type: date\n '
self._start_date = start_date | @start_date.setter
def start_date(self, start_date):
'Sets the start_date of this FeeAnalysisRequest.\n\n\n :param start_date: The start_date of this FeeAnalysisRequest. # noqa: E501\n :type: date\n '
self._start_date = start_date<|docstring|>Sets the start_date of this FeeAnalysisRequest.
:param start_date: The start_date of this FeeAnalysisRequest. # noqa: E501
:type: date<|endoftext|> |
5ef6abb71fca30f247469471d00952a1a9b01a0d73ed1a22b3370f6cf116a101 | @property
def transaction_status_scope(self):
'Gets the transaction_status_scope of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501\n :rtype: list[str]\n '
return self._transaction_status_scope | Gets the transaction_status_scope of this FeeAnalysisRequest. # noqa: E501
:return: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501
:rtype: list[str] | atom/proton/python/proton_api/models/fee_analysis_request.py | transaction_status_scope | AbhiGupta03/SDK | 11 | python | @property
def transaction_status_scope(self):
'Gets the transaction_status_scope of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501\n :rtype: list[str]\n '
return self._transaction_status_scope | @property
def transaction_status_scope(self):
'Gets the transaction_status_scope of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501\n :rtype: list[str]\n '
return self._transaction_status_scope<|docstring|>Gets the transaction_status_scope of this FeeAnalysisRequest. # noqa: E501
:return: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501
:rtype: list[str]<|endoftext|> |
21f7a0a6b20d8cba20ea65f564df75d7d4edbd8fc362b5ca698123c221051a10 | @transaction_status_scope.setter
def transaction_status_scope(self, transaction_status_scope):
'Sets the transaction_status_scope of this FeeAnalysisRequest.\n\n\n :param transaction_status_scope: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501\n :type: list[str]\n '
self._transaction_status_scope = transaction_status_scope | Sets the transaction_status_scope of this FeeAnalysisRequest.
:param transaction_status_scope: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501
:type: list[str] | atom/proton/python/proton_api/models/fee_analysis_request.py | transaction_status_scope | AbhiGupta03/SDK | 11 | python | @transaction_status_scope.setter
def transaction_status_scope(self, transaction_status_scope):
'Sets the transaction_status_scope of this FeeAnalysisRequest.\n\n\n :param transaction_status_scope: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501\n :type: list[str]\n '
self._transaction_status_scope = transaction_status_scope | @transaction_status_scope.setter
def transaction_status_scope(self, transaction_status_scope):
'Sets the transaction_status_scope of this FeeAnalysisRequest.\n\n\n :param transaction_status_scope: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501\n :type: list[str]\n '
self._transaction_status_scope = transaction_status_scope<|docstring|>Sets the transaction_status_scope of this FeeAnalysisRequest.
:param transaction_status_scope: The transaction_status_scope of this FeeAnalysisRequest. # noqa: E501
:type: list[str]<|endoftext|> |
78881335458e712c024f46d331a8dc3d193a9cf15ce1791061f668770333fa33 | @property
def aggregation_account_ids(self):
'Gets the aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501\n :rtype: list[str]\n '
return self._aggregation_account_ids | Gets the aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501
:return: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501
:rtype: list[str] | atom/proton/python/proton_api/models/fee_analysis_request.py | aggregation_account_ids | AbhiGupta03/SDK | 11 | python | @property
def aggregation_account_ids(self):
'Gets the aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501\n :rtype: list[str]\n '
return self._aggregation_account_ids | @property
def aggregation_account_ids(self):
'Gets the aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501\n\n\n :return: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501\n :rtype: list[str]\n '
return self._aggregation_account_ids<|docstring|>Gets the aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501
:return: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501
:rtype: list[str]<|endoftext|> |
98ada8cfb1d940f105208964a4a4aaa43b1f86255d24f6a66a78f1019b1458b8 | @aggregation_account_ids.setter
def aggregation_account_ids(self, aggregation_account_ids):
'Sets the aggregation_account_ids of this FeeAnalysisRequest.\n\n\n :param aggregation_account_ids: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501\n :type: list[str]\n '
self._aggregation_account_ids = aggregation_account_ids | Sets the aggregation_account_ids of this FeeAnalysisRequest.
:param aggregation_account_ids: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501
:type: list[str] | atom/proton/python/proton_api/models/fee_analysis_request.py | aggregation_account_ids | AbhiGupta03/SDK | 11 | python | @aggregation_account_ids.setter
def aggregation_account_ids(self, aggregation_account_ids):
'Sets the aggregation_account_ids of this FeeAnalysisRequest.\n\n\n :param aggregation_account_ids: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501\n :type: list[str]\n '
self._aggregation_account_ids = aggregation_account_ids | @aggregation_account_ids.setter
def aggregation_account_ids(self, aggregation_account_ids):
'Sets the aggregation_account_ids of this FeeAnalysisRequest.\n\n\n :param aggregation_account_ids: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501\n :type: list[str]\n '
self._aggregation_account_ids = aggregation_account_ids<|docstring|>Sets the aggregation_account_ids of this FeeAnalysisRequest.
:param aggregation_account_ids: The aggregation_account_ids of this FeeAnalysisRequest. # noqa: E501
:type: list[str]<|endoftext|> |
8885702a7c34b5b3ec25cfbb7b70f960e55ea296cbd152b9173158e1fea7438e | def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(FeeAnalysisRequest, dict):
for (key, value) in self.items():
result[key] = value
return result | Returns the model properties as a dict | atom/proton/python/proton_api/models/fee_analysis_request.py | to_dict | AbhiGupta03/SDK | 11 | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(FeeAnalysisRequest, dict):
for (key, value) in self.items():
result[key] = value
return result | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(FeeAnalysisRequest, dict):
for (key, value) in self.items():
result[key] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|> |
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99 | def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | Returns the string representation of the model | atom/proton/python/proton_api/models/fee_analysis_request.py | to_str | AbhiGupta03/SDK | 11 | python | def to_str(self):
return pprint.pformat(self.to_dict()) | def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|> |
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703 | def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | For `print` and `pprint` | atom/proton/python/proton_api/models/fee_analysis_request.py | __repr__ | AbhiGupta03/SDK | 11 | python | def __repr__(self):
return self.to_str() | def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|> |
fb493b5dfbc923e2c0bccde42188909c38d223e80077f024226a5f1d74fcd38b | def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, FeeAnalysisRequest)):
return False
return (self.to_dict() == other.to_dict()) | Returns true if both objects are equal | atom/proton/python/proton_api/models/fee_analysis_request.py | __eq__ | AbhiGupta03/SDK | 11 | python | def __eq__(self, other):
if (not isinstance(other, FeeAnalysisRequest)):
return False
return (self.to_dict() == other.to_dict()) | def __eq__(self, other):
if (not isinstance(other, FeeAnalysisRequest)):
return False
return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|> |
3ab7ff3caf4e1adafeee3be819682dc5ec004695f0c85b6333c6aa5590b5fbc3 | def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, FeeAnalysisRequest)):
return True
return (self.to_dict() != other.to_dict()) | Returns true if both objects are not equal | atom/proton/python/proton_api/models/fee_analysis_request.py | __ne__ | AbhiGupta03/SDK | 11 | python | def __ne__(self, other):
if (not isinstance(other, FeeAnalysisRequest)):
return True
return (self.to_dict() != other.to_dict()) | def __ne__(self, other):
if (not isinstance(other, FeeAnalysisRequest)):
return True
return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.