function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def a_function(): return
PyBossa/pybossa
[ 716, 269, 716, 21, 1321773782 ]
def setUp(self): sentinel = Sentinel(settings_test.REDIS_SENTINEL) db = getattr(settings_test, 'REDIS_DB', 0) self.connection = sentinel.master_for('mymaster', db=db) self.connection.flushall() self.scheduler = Scheduler('test_queue', connection=self.connection)
PyBossa/pybossa
[ 716, 269, 716, 21, 1321773782 ]
def test_adds_several_jobs_(self): schedule_job(a_job, self.scheduler) schedule_job(another_job, self.scheduler) sched_jobs = self.scheduler.get_jobs() job_func_names = [job.func_name for job in sched_jobs] module_name = 'test_jobs.test_schedule_jobs' assert len(sched_jobs) == 2, sched_jobs assert module_name + '.a_function' in job_func_names, job_func_names assert module_name + '.another_function' in job_func_names, job_func_names
PyBossa/pybossa
[ 716, 269, 716, 21, 1321773782 ]
def test_returns_log_messages(self): success_message = schedule_job(a_job, self.scheduler) failure_message = schedule_job(a_job, self.scheduler) assert success_message == 'Scheduled a_function([], {}) to run every 1 seconds' assert failure_message == 'WARNING: Job a_function([], {}) is already scheduled'
PyBossa/pybossa
[ 716, 269, 716, 21, 1321773782 ]
def utcNow( cls ): """Return the current UTC time as a FILETIME value. Output: An unsigned long integer representing the current time in FILETIME format. Notes: Yeah...the few nanoseconds it will take to run this code means that by the time the result is actually returned it is already a bit stale. """ return( long( round( time(), 7 ) * 10000000 ) + cls._EPOCH_DELTA_SECS )
ubiqx-org/Carnaval
[ 11, 5, 11, 1, 1396561525 ]
def SMB_Pad8( msglen=0 ): """Return the number of padding octets needed for 8-octet alignment. Input: msglen - The length of the bytestream that may need to be padded. It is assumed that this bytestream starts on an 8-octet boundary (otherwise, the results are somewhat meaningless). Output: The number of octets required in order to pad to a multiple of 8 octets. This, of course, will be in the range 0..7. Doctest: >>> for i in [-9, -2, 0, 3, 8, 9]: ... print "%2d ==> %d" % (i, SMB_Pad8( i )) -9 ==> 1 -2 ==> 2 0 ==> 0 3 ==> 5 8 ==> 0 9 ==> 7 """ return( (8 - (msglen % 8)) & 0x7 ) # 9% code, 91% documentation.
ubiqx-org/Carnaval
[ 11, 5, 11, 1, 1396561525 ]
def test_roster_generation(self, config_override, extra_module_engagement_metric_ranges, extra_module_engagement_rows): self._validate_roster_generation(config_override, extra_module_engagement_metric_ranges, extra_module_engagement_rows) self._validate_elasticsearch_index()
edx/edx-analytics-pipeline
[ 91, 120, 91, 29, 1407959837 ]
def _validate_roster_generation(self, config_override, extra_module_engagement_metric_ranges, extra_module_engagement_rows): """Validates the module engagement roster data generated by ModuleEngagementWorkflowTask""" for day in range(2, 17): fake_date = datetime.date(2015, 4, day) if day in (13, 16): self.upload_tracking_log(self.INPUT_FILE.format(date=fake_date.strftime('%Y%m%d')), fake_date) else: self.upload_tracking_log(self.EMPTY_INPUT_FILE, fake_date) self.execute_sql_fixture_file('load_auth_user_for_internal_reporting_user.sql') self.execute_sql_fixture_file('load_auth_userprofile.sql') self.execute_sql_fixture_file('load_course_groups_courseusergroup.sql') self.execute_sql_fixture_file('load_course_groups_courseusergroup_users.sql') self.task.launch( [ 'CourseEnrollmentPartitionTask', '--interval-end', '2015-04-17', '--n-reduce-tasks', str(self.NUM_REDUCERS), ] ) self.task.launch( [ 'ModuleEngagementWorkflowTask', '--date', '2015-04-17', '--n-reduce-tasks', str(self.NUM_REDUCERS), ], config_override=config_override, ) with self.export_db.cursor() as cursor: cursor.execute('SELECT course_id, start_date, end_date, metric, range_type, low_value, high_value FROM module_engagement_metric_ranges') results = cursor.fetchall() self.assertItemsEqual( [row for row in results], [ ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'discussion_contributions', 'low', 0, 6.0), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'discussion_contributions', 'normal', 6.0, None), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts', 'low', 0, 6.0), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts', 'normal', 6.0, None), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts_per_completed', 'low', 0, 3.0), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts_per_completed', 'normal', 3.0, None), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_attempted', 'low', 0, 3.0), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_attempted', 'normal', 3.0, None), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_completed', 'low', 0, 2.0), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_completed', 'normal', 2.0, None), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'videos_viewed', 'low', 0, 2.0), ('edX/DemoX/Demo_Course', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'videos_viewed', 'normal', 2.0, None), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'discussion_contributions', 'low', 0, 0.45), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'discussion_contributions', 'normal', 0.45, 2.55), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'discussion_contributions', 'high', 2.55, None), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts', 'low', 0, 0.15), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts', 'normal', 0.15, 0.85), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts', 'high', 0.85, None), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts_per_completed', 'low', 0, None), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts_per_completed', 'normal', None, None), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_attempted', 'low', 0, 0.15), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_attempted', 'normal', 0.15, 0.85), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_attempted', 'high', 0.85, None), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_completed', 'normal', 0.0, None), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'videos_viewed', 'low', 0, 1.0), ('edX/DemoX/Demo_Course_2', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'videos_viewed', 'normal', 1.0, None), ('course-v1:edX+DemoX+Demo_Course_2015', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'discussion_contributions', 'low', 0, 6.0), ('course-v1:edX+DemoX+Demo_Course_2015', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'discussion_contributions', 'normal', 6.0, None), ('course-v1:edX+DemoX+Demo_Course_2015', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts_per_completed', 'normal', 0, None), ('course-v1:edX+DemoX+Demo_Course_2015', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'videos_viewed', 'low', 0, 1.0), ('course-v1:edX+DemoX+Demo_Course_2015', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'videos_viewed', 'normal', 1.0, None), ('course-v1:edX+DemoX+Demo_Course_2015', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problem_attempts', 'normal', 0.0, None), ('course-v1:edX+DemoX+Demo_Course_2015', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_attempted', 'normal', 0.0, None), ('course-v1:edX+DemoX+Demo_Course_2015', datetime.date(2015, 4, 10), datetime.date(2015, 4, 17), 'problems_completed', 'normal', 0.0, None) ] + extra_module_engagement_metric_ranges ) with self.export_db.cursor() as cursor: cursor.execute('SELECT date, course_id, username, entity_type, entity_id, event, count FROM module_engagement') results = cursor.fetchall() april_thirteenth = datetime.date(2015, 4, 13) april_sixteenth = datetime.date(2015, 4, 16) self.assertItemsEqual( [row for row in results], [ (april_thirteenth, 'course-v1:edX+DemoX+Demo_Course_2015', 'honor', 'discussion', 'cba3e4cd91d0466b9ac50926e495b76f', 'contributed', 3), (april_thirteenth, 'course-v1:edX+DemoX+Demo_Course_2015', 'honor', 'video', '8c0028eb2a724f48a074bc184cd8635f', 'viewed', 1), (april_thirteenth, 'edX/DemoX/Demo_Course', 'honor', 'video', 'i4x-edX-DemoX-video-8c0028eb2a724f48a074bc184cd8635f', 'viewed', 2), (april_thirteenth, 'edX/DemoX/Demo_Course_2', 'honor', 'video', 'i4x-edX-DemoX-video-8c0028eb2a724f48a074bc184cd8635f', 'viewed', 1), (april_thirteenth, 'edX/DemoX/Demo_Course_2', 'staff', 'video', 'i4x-edX-DemoX-video-8c0028eb2a724f48a074bc184cd8635f', 'viewed', 1), (april_sixteenth, 'course-v1:edX+DemoX+Demo_Course_2015', 'honor', 'discussion', 'cba3e4cd91d0466b9ac50926e495b76f', 'contributed', 3), (april_sixteenth, 'course-v1:edX+DemoX+Demo_Course_2015', 'honor', 'video', '8c0028eb2a724f48a074bc184cd8635f', 'viewed', 1), (april_sixteenth, 'edX/DemoX/Demo_Course', 'honor', 'discussion', 'cba3e4cd91d0466b9ac50926e495b76f', 'contributed', 6), (april_sixteenth, 'edX/DemoX/Demo_Course', 'honor', 'problem', 'i4x://edX/DemoX/problem/0d759dee4f9d459c8956136dbde55f02', 'attempted', 1), (april_sixteenth, 'edX/DemoX/Demo_Course', 'honor', 'problem', 'i4x://edX/DemoX/problem/75f9562c77bc4858b61f907bb810d974', 'attempted', 2), (april_sixteenth, 'edX/DemoX/Demo_Course', 'honor', 'problem', 'i4x://edX/DemoX/problem/75f9562c77bc4858b61f907bb810d974', 'completed', 1), (april_sixteenth, 'edX/DemoX/Demo_Course', 'honor', 'problem', 'i4x://edX/DemoX/problem/a0effb954cca4759994f1ac9e9434bf4', 'attempted', 3), (april_sixteenth, 'edX/DemoX/Demo_Course', 'honor', 'problem', 'i4x://edX/DemoX/problem/a0effb954cca4759994f1ac9e9434bf4', 'completed', 1), (april_sixteenth, 'edX/DemoX/Demo_Course', 'honor', 'video', 'i4x-edX-DemoX-video-0b9e39477cf34507a7a48f74be381fdd', 'viewed', 2), (april_sixteenth, 'edX/DemoX/Demo_Course', 'honor', 'video', 'i4x-edX-DemoX-video-8c0028eb2a724f48a074bc184cd8635f', 'viewed', 1), (april_sixteenth, 'edX/DemoX/Demo_Course_2', 'honor', 'discussion', 'cba3e4cd91d0466b9ac50926e495b76f', 'contributed', 3), (april_sixteenth, 'edX/DemoX/Demo_Course_2', 'honor', 'problem', 'i4x://edX/DemoX/problem/a0effb954cca4759994f1ac9e9434bf4', 'attempted', 1), ] + extra_module_engagement_rows )
edx/edx-analytics-pipeline
[ 91, 120, 91, 29, 1407959837 ]
def __init__(self, user_id, post_id): # Call the BaseAlert __init__ method super(PostingAlert, self).__init__(user_id) self.post_id = post_id
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def verify(self): """Overwrites the verify() of BaseAlert to check the post exists """ return m.db.users.find_one({'_id': self.user_id}, {}) and \ m.db.posts.find_one({'_id': self.post_id}, {})
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def prettify(self, for_uid=None): return '<a href="{0}">{1}</a> tagged you in a <a href="{2}">post</a>' \ .format(url_for('users.profile', username=self.user.get('username')), do_capitalize(self.user.get('username')), self.url())
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def prettify(self, for_uid=None): # Let's try and work out why this user is being notified of a comment reason = subscription_reason(for_uid, self.post_id) if reason == SubscriptionReasons.POSTER: sr = 'posted' elif reason == SubscriptionReasons.COMMENTER: sr = 'commented on' elif reason == SubscriptionReasons.TAGEE: sr = 'were tagged in' else: # This should never really happen but let's play ball eh? sr = 'are subscribed to' return '<a href="{0}">{1}</a> ' \ 'commented on a <a href="{2}">post</a> you {3}' \ .format(url_for('users.profile', username=self.user.get('username')), do_capitalize(self.user.get('username')), self.url(), sr)
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def populate_followers_feeds(user_id, post_id, timestamp): """Fan out a post_id to all the users followers. This can be run on a worker to speed the process up. """ # Get a list of ALL users who are following a user followers = r.zrange(k.USER_FOLLOWERS.format(user_id), 0, -1) # This is not transactional as to not hold Redis up. for follower_id in followers: # Add the pid to the list r.zadd(k.USER_FEED.format(follower_id), {str(post_id): timestamp}) # Stop followers feeds from growing to large, doesn't matter if it # doesn't exist r.zremrangebyrank(k.USER_FEED.format(follower_id), 0, -1000)
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def populate_approved_followers_feeds(user_id, post_id, timestamp): """Fan out a post_id to all the users approved followers.""" # Get a list of ALL users who are following a user followers = r.zrange(k.USER_APPROVED.format(user_id), 0, -1) # This is not transactional as to not hold Redis up. for follower_id in followers: # Add the pid to the list r.zadd(k.USER_FEED.format(follower_id), {str(post_id): timestamp}) # Stop followers feeds from growing to large, doesn't matter if it # doesn't exist r.zremrangebyrank(k.USER_FEED.format(follower_id), 0, -1000)
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def back_feed(who_id, whom_id): """Takes 5 lastest posts from user with ``who_id`` places them in user with ``whom_id`` feed. The reason behind this is that new users may follow someone but still have and empty feed, which makes them sad :( so we'll give them some. If the posts are to old for a non user they will be removed when the feed is trimmed, but they may make it in to the feed but not at the top. :param who_id: user who just followed ``who_id`` :type who_id: str :param whom_id: user who was just followed by ``whom_id`` :type whom_id: str :returns: None """ # Get followee's last 5 un-approved posts (doesn't matter if isn't any) # We only need the IDs and the created time posts = m.db.posts.find( {'user_id': whom_id, 'reply_to': None, 'permission': {'$lte': k.PERM_PJUU}}, {'_id': True, 'created': True}, ).sort('created', -1).limit(5) # Iterate the cursor and append the posts to the users feed for post in posts: timestamp = post.get('created') post_id = post.get('_id') # Place on the feed r.zadd(k.USER_FEED.format(who_id), {str(post_id): timestamp}) # Trim the feed to the 1000 max r.zremrangebyrank(k.USER_FEED.format(who_id), 0, -1000)
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def get_post(post_id): """Returns a post. Simple helper function """ post = m.db.posts.find_one({'_id': post_id}) # Attach in the e-mail (will be removed with image uploads) if post is not None: user = m.db.users.find_one({'_id': post.get('user_id')}, {'avatar': True, 'donated': True}) if user is not None: post['user_avatar'] = user.get('avatar') post['user_donated'] = user.get('donated', False) return post
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def get_posts(user_id, page=1, per_page=None, perm=0): """Returns a users posts as a pagination object.""" if per_page is None: per_page = app.config.get('FEED_ITEMS_PER_PAGE') # Get the user object we need the email for Gravatar. user = m.db.users.find_one({'_id': user_id}, {'avatar': True, 'donated': True}) lookup_dict = { 'user_id': user_id, 'reply_to': {'$exists': False} } lookup_dict['permission'] = {'$lte': perm} total = m.db.posts.find(lookup_dict).count() cursor = m.db.posts.find(lookup_dict).sort( 'created', -1).skip((page - 1) * per_page).limit(per_page) posts = [] for post in cursor: post['user_avatar'] = user.get('avatar') post['user_donated'] = user.get('donated', False) posts.append(post) return Pagination(posts, total, page, per_page)
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def get_hashtagged_posts(hashtag, page=1, per_page=None): """Returns all posts with `hashtag` in date order.""" if per_page is None: per_page = app.config.get('FEED_ITEMS_PER_PAGE') total = m.db.posts.find({ 'hashtags.hashtag': hashtag, 'reply_to': {'$exists': False}}).count() cursor = m.db.posts.find({ 'hashtags.hashtag': hashtag, 'reply_to': {'$exists': False} }).sort('created', -1).skip((page - 1) * per_page).limit(per_page) posts = [] for post in cursor: user = m.db.users.find_one( {'_id': post.get('user_id')}, {'avatar': True}) if post is not None: # pragma: no branch post['user_avatar'] = user.get('avatar') posts.append(post) return Pagination(posts, total, page, per_page)
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def vote_post(user_id, post_id, amount=1, ts=None): """Handles voting on posts :param user_id: User who is voting :type user_id: str :param post_id: ID of the post the user is voting on :type post_id: int :param amount: The way to vote (-1 or 1) :type amount: int :param ts: Timestamp to use for vote (ONLY FOR TESTING) :type ts: int :returns: -1 if downvote, 0 if reverse vote and +1 if upvote """ if ts is None: ts = timestamp() # Get the comment so we can check who the author is author_uid = get_post(post_id).get('user_id') # Votes can ONLY ever be -1 or 1 and nothing else # we use the sign to store the time and score in one zset score amount = 1 if amount >= 0 else -1 voted = has_voted(user_id, post_id) if not voted: if author_uid != user_id: # Store the timestamp of the vote with the sign of the vote r.zadd(k.POST_VOTES.format(post_id), { str(user_id): amount * timestamp() }) # Update post score m.db.posts.update({'_id': post_id}, {'$inc': {'score': amount}}) # Update user score m.db.users.update({'_id': author_uid}, {'$inc': {'score': amount}}) return amount else: raise CantVoteOnOwn elif voted and abs(voted) + k.VOTE_TIMEOUT > ts: # No need to check if user is current user because it can't # happen in the first place # Remove the vote from Redis r.zrem(k.POST_VOTES.format(post_id), user_id) previous_vote = -1 if voted < 0 else 1 # Calculate how much to increment/decrement the scores by # Saves multiple trips to Mongo if amount == previous_vote: if previous_vote < 0: amount = 1 result = 0 else: amount = -1 result = 0 else: # We will only register the new vote if it is NOT a vote reversal. r.zadd(k.POST_VOTES.format(post_id), { str(user_id): amount * timestamp() }) if previous_vote < 0: amount = 2 result = 1 else: amount = -2 result = -1 # Update post score m.db.posts.update({'_id': post_id}, {'$inc': {'score': amount}}) # Update user score m.db.users.update({'_id': author_uid}, {'$inc': {'score': amount}}) return result else: raise AlreadyVoted
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def delete_post_replies(post_id): """Delete ALL comments on post with pid. This can't be done in one single call to Mongo because we need to remove the votes from Redis! """ # Get a cursor for all the posts comments cur = m.db.posts.find({'reply_to': post_id}) # Iterate over the cursor and delete each one for reply in cur: reply_id = reply.get('_id') # Delete the comment itself from MongoDB m.db.posts.remove({'_id': reply_id}) # Remove any uploaded files if 'upload' in reply: storage.delete(reply['upload']) # Delete votes from Redis r.delete(k.POST_VOTES.format(reply_id))
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def unsubscribe(user_id, post_id): """Unsubscribe a user from a post. """ # Actually remove the uid from the subscribers list return bool(r.zrem(k.POST_SUBSCRIBERS.format(post_id), user_id))
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def unflag_post(post_id): """Resets the flag count on a post to 0. .. note: This is an OP user only action from the dashboard. """ return m.db.posts.update({'_id': post_id}, {'$set': {'flags': 0}})
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def is_subscribed(user_id, post_id): """Returns a boolean to denote if a user is subscribed or not """ return r.zrank(k.POST_SUBSCRIBERS.format(post_id), user_id) is not None
pjuu/pjuu
[ 57, 7, 57, 27, 1374656999 ]
def setUpClass(cls): super().setUpClass() cls.res_users_model = cls.env["res.users"] cls.move_model = cls.env["account.move"] cls.journal_model = cls.env["account.journal"] cls.payment_mode_model = cls.env["account.payment.mode"] cls.partner_bank_model = cls.env["res.partner.bank"] # Refs cls.company = cls.env.ref("base.main_company") cls.acct_type_payable = cls.env.ref("account.data_account_type_payable") cls.acct_type_receivable = cls.env.ref("account.data_account_type_receivable") cls.acct_type_expenses = cls.env.ref("account.data_account_type_expenses") cls.company_2 = cls.env["res.company"].create({"name": "Company 2"}) charts = cls.env["account.chart.template"].search([]) if charts: cls.chart = charts[0] else: raise ValidationError(_("No Chart of Account Template has been defined !")) old_company = cls.env.user.company_id cls.env.user.company_id = cls.company_2.id cls.chart.try_loading() cls.env.user.company_id = old_company.id # refs cls.manual_out = cls.env.ref("account.account_payment_method_manual_out") cls.manual_out.bank_account_required = True cls.manual_in = cls.env.ref("account.account_payment_method_manual_in") cls.journal_sale = cls.env["account.journal"].create( { "name": "Test Sales Journal", "code": "tSAL", "type": "sale", "company_id": cls.company.id, } ) cls.journal_purchase = cls.env["account.journal"].create( { "name": "Test Purchases Journal", "code": "tPUR", "type": "purchase", "company_id": cls.company.id, } ) cls.journal_c1 = cls.journal_model.create( { "name": "J1", "code": "J1", "type": "bank", "company_id": cls.company.id, "bank_acc_number": "123456", } ) cls.journal_c2 = cls.journal_model.create( { "name": "J2", "code": "J2", "type": "bank", "company_id": cls.company_2.id, "bank_acc_number": "552344", } ) cls.supplier_payment_mode = cls.payment_mode_model.create( { "name": "Suppliers Bank 1", "bank_account_link": "variable", "payment_method_id": cls.manual_out.id, "show_bank_account_from_journal": True, "company_id": cls.company.id, "fixed_journal_id": cls.journal_c1.id, "variable_journal_ids": [(6, 0, [cls.journal_c1.id])], } ) cls.supplier_payment_mode_c2 = cls.payment_mode_model.create( { "name": "Suppliers Bank 2", "bank_account_link": "variable", "payment_method_id": cls.manual_out.id, "company_id": cls.company_2.id, "fixed_journal_id": cls.journal_c2.id, "variable_journal_ids": [(6, 0, [cls.journal_c2.id])], } ) cls.customer_payment_mode = cls.payment_mode_model.create( { "name": "Customers to Bank 1", "bank_account_link": "fixed", "payment_method_id": cls.manual_in.id, "company_id": cls.company.id, "fixed_journal_id": cls.journal_c1.id, "refund_payment_mode_id": cls.supplier_payment_mode.id, "variable_journal_ids": [(6, 0, [cls.journal_c1.id])], } ) cls.supplier_payment_mode.write( {"refund_payment_mode_id": cls.customer_payment_mode.id} ) cls.customer = ( cls.env["res.partner"] .with_company(cls.company.id) .create( { "name": "Test customer", "customer_payment_mode_id": cls.customer_payment_mode, } ) ) cls.supplier = ( cls.env["res.partner"] .with_company(cls.company.id) .create( { "name": "Test supplier", "supplier_payment_mode_id": cls.supplier_payment_mode, } ) ) cls.supplier_bank = cls.env["res.partner.bank"].create( { "acc_number": "5345345", "partner_id": cls.supplier.id, "company_id": cls.company.id, } ) cls.supplier_bank_2 = cls.env["res.partner.bank"].create( { "acc_number": "3452342", "partner_id": cls.supplier.id, "company_id": cls.company_2.id, } ) cls.supplier.with_company( cls.company_2.id ).supplier_payment_mode_id = cls.supplier_payment_mode_c2 cls.invoice_account = cls.env["account.account"].search( [ ("user_type_id", "=", cls.acct_type_payable.id), ("company_id", "=", cls.company.id), ], limit=1, ) cls.invoice_line_account = cls.env["account.account"].search( [ ("user_type_id", "=", cls.acct_type_expenses.id), ("company_id", "=", cls.company.id), ], limit=1, ) cls.journal_bank = cls.env["res.partner.bank"].create( { "acc_number": "GB95LOYD87430237296288", "partner_id": cls.env.user.company_id.partner_id.id, } ) cls.journal = cls.env["account.journal"].create( { "name": "BANK TEST", "code": "TEST", "type": "bank", "bank_account_id": cls.journal_bank.id, } ) cls.supplier_invoice = cls.move_model.create( { "partner_id": cls.supplier.id, "invoice_date": fields.Date.today(), "move_type": "in_invoice", "journal_id": cls.journal_purchase.id, } )
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def test_create_partner(self): customer = ( self.env["res.partner"] .with_company(self.company.id) .create( { "name": "Test customer", "customer_payment_mode_id": self.customer_payment_mode, } ) ) self.assertEqual( customer.with_company(self.company.id).customer_payment_mode_id, self.customer_payment_mode, ) self.assertEqual( customer.with_company(self.company_2.id).customer_payment_mode_id, self.payment_mode_model, )
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def test_out_invoice_onchange(self): # Test the onchange methods in invoice invoice = self.move_model.new( { "partner_id": self.customer.id, "move_type": "out_invoice", "company_id": self.company.id, } ) self.assertEqual(invoice.payment_mode_id, self.customer_payment_mode) invoice.company_id = self.company_2 self.assertEqual(invoice.payment_mode_id, self.payment_mode_model) invoice.payment_mode_id = False self.assertFalse(invoice.partner_bank_id)
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def test_invoice_create_in_invoice(self): invoice = self._create_invoice( default_move_type="in_invoice", partner=self.supplier ) invoice.action_post() aml = invoice.line_ids.filtered( lambda l: l.account_id.user_type_id == self.acct_type_payable ) self.assertEqual(invoice.payment_mode_id, aml[0].payment_mode_id)
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def test_invoice_create_out_refund(self): self.manual_out.bank_account_required = False invoice = self._create_invoice( default_move_type="out_refund", partner=self.customer ) invoice.action_post() self.assertEqual( invoice.payment_mode_id, self.customer.customer_payment_mode_id.refund_payment_mode_id, )
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def test_invoice_constrains(self): with self.assertRaises(UserError): self.move_model.create( { "partner_id": self.supplier.id, "move_type": "in_invoice", "invoice_date": fields.Date.today(), "company_id": self.company.id, "payment_mode_id": self.supplier_payment_mode_c2.id, } )
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def test_payment_mode_constrains_02(self): self.move_model.create( { "date": fields.Date.today(), "journal_id": self.journal_sale.id, "name": "/", "ref": "reference", "state": "draft", "invoice_line_ids": [ ( 0, 0, { "account_id": self.invoice_account.id, "credit": 1000, "debit": 0, "name": "Test", "ref": "reference", }, ), ( 0, 0, { "account_id": self.invoice_line_account.id, "credit": 0, "debit": 1000, "name": "Test", "ref": "reference", }, ), ], } ) with self.assertRaises(UserError): self.supplier_payment_mode.company_id = self.company_2
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def test_invoice_out_refund(self): invoice = self._create_invoice( default_move_type="out_invoice", partner=self.customer ) invoice.partner_bank_id = False invoice.action_post() # Lets create a refund invoice for invoice_1. # I refund the invoice Using Refund Button. refund_invoice_wizard = ( self.env["account.move.reversal"] .with_context( { "active_ids": [invoice.id], "active_id": invoice.id, "active_model": "account.move", } ) .create({"refund_method": "refund", "reason": "reason test create"}) ) refund_invoice = self.move_model.browse( refund_invoice_wizard.reverse_moves()["res_id"] ) self.assertEqual( refund_invoice.payment_mode_id, invoice.payment_mode_id.refund_payment_mode_id, ) self.assertEqual(refund_invoice.partner_bank_id, invoice.partner_bank_id)
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def test_partner_onchange(self): customer_invoice = self.move_model.create( {"partner_id": self.customer.id, "move_type": "out_invoice"} ) self.assertEqual(customer_invoice.payment_mode_id, self.customer_payment_mode) self.assertEqual(self.supplier_invoice.partner_bank_id, self.supplier_bank) vals = {"partner_id": self.customer.id, "move_type": "out_refund"} invoice = self.move_model.new(vals) self.assertEqual(invoice.payment_mode_id, self.supplier_payment_mode) vals = {"partner_id": self.supplier.id, "move_type": "in_refund"} invoice = self.move_model.new(vals) self.assertEqual(invoice.payment_mode_id, self.customer_payment_mode) vals = {"partner_id": False, "move_type": "out_invoice"} invoice = self.move_model.new(vals) self.assertFalse(invoice.payment_mode_id) vals = {"partner_id": False, "move_type": "out_refund"} invoice = self.move_model.new(vals) self.assertFalse(invoice.partner_bank_id) vals = {"partner_id": False, "move_type": "in_invoice"} invoice = self.move_model.new(vals) self.assertFalse(invoice.partner_bank_id) vals = {"partner_id": False, "move_type": "in_refund"} invoice = self.move_model.new(vals) self.assertFalse(invoice.partner_bank_id)
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def test_print_report(self): self.supplier_invoice.partner_bank_id = self.supplier_bank.id report = self.env.ref("account.account_invoices") res = str(report._render_qweb_html(self.supplier_invoice.ids)[0]) self.assertIn(self.supplier_bank.acc_number, res) payment_mode = self.supplier_payment_mode payment_mode.show_bank_account_from_journal = True self.supplier_invoice.payment_mode_id = payment_mode.id self.supplier_invoice.partner_bank_id = False res = str(report._render_qweb_html(self.supplier_invoice.ids)[0]) self.assertIn(self.journal_c1.bank_acc_number, res) payment_mode.bank_account_link = "variable" payment_mode.variable_journal_ids = [(6, 0, self.journal.ids)] res = str(report._render_qweb_html(self.supplier_invoice.ids)[0]) self.assertIn(self.journal_bank.acc_number, res)
OCA/bank-payment
[ 152, 470, 152, 36, 1402917389 ]
def setUp(self): self.curl = pycurl.Curl()
p/pycurl-archived
[ 2, 6, 2, 5, 1361076307 ]
def tearDown(self): self.curl.close()
p/pycurl-archived
[ 2, 6, 2, 5, 1361076307 ]
def __init__(self): """ Empty Ctor """ pass
xalt/xalt
[ 36, 10, 36, 1, 1410559911 ]
def execute(self): """ Specify command line arguments and parse the command line""" parser = argparse.ArgumentParser() parser.add_argument("--dbname", dest='dbname', action="store", default = "xalt", help="xalt") args = parser.parse_args() return args
xalt/xalt
[ 36, 10, 36, 1, 1410559911 ]
def __init__(self): """ Empty Ctor """ pass
xalt/xalt
[ 36, 10, 36, 1, 1410559911 ]
def execute(self): """ Specify command line arguments and parse the command line""" parser = argparse.ArgumentParser() parser.add_argument("--confFn", dest='confFn', action="store", help="python config file") parser.add_argument("--xalt_cfg", dest='xaltCFG', action="store", help="XALT std config") parser.add_argument("--input", dest='input', action="store", help="input template file") parser.add_argument("--output", dest='output', action="store", help="output file") args = parser.parse_args()
xalt/xalt
[ 36, 10, 36, 1, 1410559911 ]
def convert_template(pattern, replaceA ,inputFn, outputFn): try: f = open(inputFn,"r") except IOError as e: print("Unable to open \"%s\", aborting!" % (inputFn)) sys.exit(-1)
xalt/xalt
[ 36, 10, 36, 1, 1410559911 ]
def main(): my_replacement = "python_pkg_patterns" args = CmdLineOptions().execute() namespace = {} exec(open(args.confFn).read(), namespace) replaceA = namespace.get(my_replacement, []) namespace = {} exec(open(args.xaltCFG).read(), namespace) replaceA.extend(namespace.get(my_replacement, [])) convert_template("@"+my_replacement+"@", replaceA, args.input, args.output)
xalt/xalt
[ 36, 10, 36, 1, 1410559911 ]
def start_agent(): result = False try: process = subprocess.Popen(start_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() if raw_output == '' and error_output == '': logger.log('Agent started.') result = True elif 'No such process' in error_output: logger.log('Agent not found.') else: logger.log('Unknown output: "%s"' % error_output) except Exception as e: logger.log("Could not start agent.", logger.LogLevel.Error) logger.log_exception(e) return result
vFense/vFenseAgent-nix
[ 6, 2, 6, 10, 1393869099 ]
def load_agent(): try: process = subprocess.Popen(load_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() if raw_output == '' and error_output == '': logger.log('Agent loaded.') elif 'Already loaded' in error_output: logger.log('Agent is already loaded.') else: logger.log('Unknown output: "%s"' % error_output) except Exception as e: logger.log("Could not load agent.", logger.LogLevel.Error) logger.log_exception(e)
vFense/vFenseAgent-nix
[ 6, 2, 6, 10, 1393869099 ]
def agent_running_stats(): ps_info = [] running = False loaded = False process = subprocess.Popen(list_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() for line in raw_output.splitlines(): pid, run, pname = line.split('\t') ps_info.append((pname, run, pid)) for p in ps_info: if daemon_label == p[0]: # p[1] can either be: # : '0' meaning not running. # : '-' meaning its running. loaded = True if p[1] == '-': running = True break elif p[1] == '0': running = False status = AgentStatus(loaded, running) logger.log(str(status), logger.LogLevel.Debug) return status
vFense/vFenseAgent-nix
[ 6, 2, 6, 10, 1393869099 ]
def setUp(self, *args, **kwargs): ctx = zmq.Context() ctx = zmq.Context() # two beacon frames self.transmit1 = struct.pack('cccb16sH', b'Z', b'R', b'E', 1, uuid.uuid4().bytes, socket.htons(9999)) self.transmit2 = struct.pack('cccb16sH', b'Z', b'R', b'E', 1, uuid.uuid4().bytes, socket.htons(9999)) self.node1 = ZActor(ctx, ZBeacon) self.node1.send_unicode("VERBOSE") self.node1.send_unicode("CONFIGURE", zmq.SNDMORE) self.node1.send(struct.pack("I", 9999)) print("Hostname 1:", self.node1.recv_unicode()) self.node2 = ZActor(ctx, ZBeacon) self.node2.send_unicode("VERBOSE") self.node2.send_unicode("CONFIGURE", zmq.SNDMORE) self.node2.send(struct.pack("I", 9999)) print("Hostname 2:", self.node2.recv_unicode())
zeromq/pyre
[ 116, 50, 116, 20, 1382777149 ]
def tearDown(self): self.node1.destroy() self.node2.destroy()
zeromq/pyre
[ 116, 50, 116, 20, 1382777149 ]
def test_node1(self): self.node1.send_unicode("PUBLISH", zmq.SNDMORE) self.node1.send(self.transmit1)
zeromq/pyre
[ 116, 50, 116, 20, 1382777149 ]
def test_recv_beacon1(self): self.node1.send_unicode("PUBLISH", zmq.SNDMORE) self.node1.send(self.transmit1) self.node2.send_unicode("PUBLISH", zmq.SNDMORE) self.node2.send(self.transmit2) req = self.node1.recv_multipart() self.assertEqual(self.transmit2, req[1])
zeromq/pyre
[ 116, 50, 116, 20, 1382777149 ]
def generate_color_handler(cls, stream=sys.stdout): handler = logging.StreamHandler(stream) handler.setFormatter(cls.FORMATTER_COLOR) return handler
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def get_script_name(cls): script_name = os.path.basename(sys.argv[0]) script_name, _ = os.path.splitext(script_name) return script_name
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def generate_simple_rotating_file_handler(cls, path_log_file=None, when='midnight', files_count=7): if path_log_file is None: path_dir = os.path.dirname(sys.argv[0]) path_log_file = cls.suggest_script_log_name(path_dir) handler = logging.handlers.TimedRotatingFileHandler(path_log_file, when=when, backupCount=files_count) handler.setLevel(logging.DEBUG) handler.setFormatter(cls.FORMATTER) return handler
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def suggest_script_log_name(cls, path_dir): return os.path.join(path_dir, cls.get_script_name() + '.log')
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def timestamp(with_ms=False, time=None): if time is None: time = datetime.datetime.now() if with_ms: return time.strftime('%Y%m%d_%H%M%S.%f')[:-3] else: return time.strftime('%Y%m%d_%H%M%S')
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def __init__(self, *, n_samples=1000, units_suffix='', units_format='.2f', name=None): super().__init__() self.name: str = name self.queue_samples = deque(maxlen=n_samples) self.total = 0 self.last = 0 self.units_str = units_suffix self.units_format = units_format
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def n_samples(self): return len(self.queue_samples)
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def last_str(self): str_name = f'[{self.name}] ' if self.name else '' return f'{str_name}{self.last:{self.units_format}} {self.units_str}'
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def average(self): if self.n_samples == 0: return None return self.total / self.n_samples
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def __init__(self, n_samples=1000, units_format='.1f', **kwargs) -> None: super().__init__(n_samples=n_samples, units_suffix='sec', units_format=units_format, **kwargs) self.time_last_start = 0
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def __exit__(self, t, value, tb): self.end()
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def end(self): self.submit_sample(self.peek())
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def __init__(self, stream=None): if not stream: stream = io.StringIO() self.stream = stream
wolf1986/log_utils
[ 3, 2, 3, 1, 1497855595 ]
def PutAsync( entities, **kwargs ): """ Asynchronously store one or more entities in the data store. This function is identical to :func:`server.db.Put`, except that it returns an asynchronous object. Call ``get_result()`` on the return value to block on the call and get the results. """ if isinstance( entities, Entity ): entities._fixUnindexedProperties() elif isinstance( entities, list ): for entity in entities: assert isinstance( entity, Entity ) entity._fixUnindexedProperties() if conf["viur.db.caching" ]>0: if isinstance( entities, Entity ): #Just one: if entities.is_saved(): #Its an update memcache.delete( str( entities.key() ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) elif isinstance( entities, list ): for entity in entities: assert isinstance( entity, Entity ) if entity.is_saved(): #Its an update memcache.delete( str( entity.key() ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) return( datastore.PutAsync( entities, **kwargs ) )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def GetAsync( keys, **kwargs ): """ Asynchronously retrieves one or more entities from the data store. This function is identical to :func:`server.db.Get`, except that it returns an asynchronous object. Call ``get_result()`` on the return value to block on the call and get the results. """ class AsyncResultWrapper: """ Wraps an result thats allready there into something looking like an RPC-Object. """ def __init__( self, res ): self.res = res def get_result( self ): return( self.res ) if conf["viur.db.caching" ]>0 and not datastore.IsInTransaction(): if isinstance( keys, datastore_types.Key ) or isinstance( keys, basestring ): #Just one: res = memcache.get( str(keys), namespace=__CacheKeyPrefix__ ) if res: return( AsyncResultWrapper( res ) ) #Either the result wasnt found, or we got a list of keys to fetch; # --> no caching possible return( datastore.GetAsync( keys, **kwargs ) )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def GetOrInsert( key, kindName=None, parent=None, **kwargs ): """ Either creates a new entity with the given key, or returns the existing one. Its guaranteed that there is no race-condition here; it will never overwrite an previously created entity. Extra keyword arguments passed to this function will be used to populate the entity if it has to be created; otherwise they are ignored. :param key: The key which will be fetched or created. \ If key is a string, it will be used as the name for the new entity, therefore the \ collectionName is required in this case. :type key: server.db.Key | str :param kindName: The data kind to use for that entity. Ignored if key is a db.Key. :type kindName: str :param parent: The parent entity of the entity. :type parent: db.Key or None :returns: Returns the wanted Entity. :rtype: server.db.Entity """ def txn( key, kwargs ): try: res = Entity.FromDatastoreEntity(datastore.Get( key )) except datastore_errors.EntityNotFoundError: res = Entity( kind=key.kind(), parent=key.parent(), name=key.name(), id=key.id() ) for k, v in kwargs.items(): res[ k ] = v datastore.Put( res ) return( res ) if not isinstance( key, datastore_types.Key ): try: key = datastore_types.Key( encoded=key ) except: assert kindName key = datastore_types.Key.from_path( kindName, key, parent=parent ) if datastore.IsInTransaction(): return txn(key, kwargs) return datastore.RunInTransaction( txn, key, kwargs )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def Delete(keys, **kwargs): """ Deletes one or more entities from the data store. :warning: Permanently deletes entities, use with care! Deletes the given entity or entities from the data store. You can only delete entities from your app. If there is an error, the function raises a subclass of :exc:`datastore_errors.Error`. :param keys: Key, str or list of keys or strings to be deleted. :type keys: Key | str | list of Key | list of str :param config: Optional configuration to use for this request. This must be specified\ as a keyword argument. :type config: dict :raises: :exc:`TransactionFailedError`, if the deletion could not be committed. """ if conf["viur.db.caching" ]>0: if isinstance( keys, datastore_types.Key ) or isinstance( keys, basestring ): #Just one: memcache.delete( str( keys ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) elif isinstance( keys, list ): for key in keys: assert isinstance( key, datastore_types.Key ) or isinstance( key, basestring ) memcache.delete( str( key ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) return( datastore.Delete( keys, **kwargs ) )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def __init__(self, kind, srcSkelClass=None, *args, **kwargs ): super( Query, self ).__init__( ) self.datastoreQuery = datastore.Query( kind, *args, **kwargs ) self.srcSkel = srcSkelClass self.amount = 30 self._filterHook = None self._orderHook = None self._origCursor = None self._customMultiQueryMerge = None # Sometimes, the default merge functionality from MultiQuery is not sufficient self._calculateInternalMultiQueryAmount = None # Some (Multi-)Queries need a different amount of results per subQuery than actually returned self.customQueryInfo = {} # Allow carrying custom data along with the query. Currently only used by spartialBone to record the guranteed correctnes self.origKind = kind
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def setOrderHook(self, hook): """ Installs *hook* as a callback function for new orderings. *hook* will be called each time a :func:`db.Query.order` is called on this query. :param hook: The function to register as callback. \ A value of None removes the currently active hook. :type hook: callable :returns: The previously registered hook (if any), or None. """ old = self._orderHook self._orderHook = hook return( old )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def filter(self, filter, value=__undefinedC__ ): """ Adds a filter to this query. #fixme: Better description required here... The following examples are equivalent: ``filter( "name", "John" )`` and ``filter( {"name": "John"} )``. See also :func:`server.db.Query.mergeExternalFilter` for a safer filter implementation. :param filter: A dictionary to read the filters from, or a string (name of that filter) :type filter: dict | str :param value: The value of that filter. Only valid, if *key* is a string. :type: value: int | long | float | bytes | string | list | datetime :returns: Returns the query itself for chaining. :rtype: server.db.Query """ if self.datastoreQuery is None: #This query is already unsatisfiable and adding more constrains to this won't change this return( self ) if isinstance( filter, dict ): for k, v in filter.items(): self.filter( k, v ) return( self ) if self._filterHook is not None: try: r = self._filterHook( self, filter, value ) except RuntimeError: self.datastoreQuery = None return( self ) if r is None: # The Hook did something special directly on 'self' to apply that filter, # no need for us to do anything return( self ) filter, value = r # Cast keys into string if filter != datastore_types.KEY_SPECIAL_PROPERTY and isinstance(value, datastore_types.Key): value = str(value) if value!=None and (filter.endswith(" !=") or filter.lower().endswith(" in")): if isinstance( self.datastoreQuery, datastore.MultiQuery ): raise NotImplementedError("You cannot use multiple IN or != filter") origQuery = self.datastoreQuery queries = [] if filter.endswith("!="): q = datastore.Query( kind=self.getKind() ) q[ "%s <" % filter.split(" ")[0] ] = value queries.append( q ) q = datastore.Query( kind=self.getKind() ) q[ "%s >" % filter.split(" ")[0] ] = value queries.append( q ) else: #IN filter if not (isinstance( value, list ) or isinstance( value, tuple ) ): raise NotImplementedError("Value must be list or tuple if using IN filter!") for val in value: q = datastore.Query( kind=self.getKind() ) q[ "%s =" % filter.split(" ")[0] ] = val q.Order( *origQuery.__orderings ) queries.append( q ) self.datastoreQuery = MultiQuery( queries, origQuery.__orderings ) for k,v in origQuery.items(): self.datastoreQuery[ k ] = v elif filter and value is not __undefinedC__: self.datastoreQuery[ filter ] = value else: raise NotImplementedError("Incorrect call to query.filter()!") return( self )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def ancestor(self, ancestor): """ Sets an ancestor for this query. This restricts the query to only return result entities that are descended from a given entity. In other words, all of the results will have the ancestor as their parent, or parent's parent, and so on. Raises BadArgumentError or BadKeyError if parent is not an existing Entity or Key in the data store. :param ancestor: Entity or Key. The key must be complete. :type ancestor: server.db.Entity | Key :returns: Returns the query itself for chaining. :rtype: server.db.Query """ self.datastoreQuery.Ancestor( ancestor ) return( self )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def limit( self, amount ): """ Sets the query limit to *amount* entities in the result. Specifying an amount of 0 disables the limit (use with care!). :param amount: The maximum number of entities. :type amount: int :returns: Returns the query itself for chaining. :rtype: server.db.Query """ self.amount = amount return self
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def getQueryOptions(self): """ Returns a datastore_query.QueryOptions for the current instance. :rtype: datastore_query.QueryOptions """ return( self.datastoreQuery.GetQueryOptions() )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def getOrder(self): """ Gets a datastore_query.Order for the current instance. :returns: The sort orders set on the current query, or None. :rtype: datastore_query.Order or None """ if self.datastoreQuery is None: return( None ) return( self.datastoreQuery.GetOrder() )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def getOrders(self): """ Returns a list of orders applied to this query. Every element in the list returned (if any), is a tuple of (property,direction). Property is the name of the property used to sort, direction a bool (false => ascending, True => descending). :returns: list of orderings, in tuples (property,direction). :rtype: list """ try: order = self.datastoreQuery.__orderings return( [ (prop, dir) for (prop, dir) in order ] ) except: return( [] )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def getKind(self): """ Returns the kind of this query. :rtype: str """ if self.datastoreQuery is None: return( None ) return( self.datastoreQuery.__kind )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def getAncestor(self): """ Returns the ancestor of this query (if any). :rtype: str | None """ return( self.datastoreQuery.ancestor )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def fetch(self, limit=-1, **kwargs ): """ Run this query and fetch results as :class:`server.skeleton.SkelList`. This function is similar to :func:`server.db.Query.run`, but returns a :class:`server.skeleton.SkelList` instance instead of Entities. :warning: The query must be limited! If queried data is wanted as instances of Entity, :func:`server.db.Query.run` should be used. :param limit: Limits the query to the defined maximum entities. \ A maxiumum value of 99 entries can be fetched at once. :type limit: int :raises: :exc:`BadFilterError` if a filter string is invalid :raises: :exc:`BadValueError` if a filter value is invalid. :raises: :exc:`BadQueryError` if an IN filter in combination with a sort order on\ another property is provided """ if self.srcSkel is None: raise NotImplementedError("This query has not been created using skel.all()") amount = limit if limit!=-1 else self.amount if amount < 1 or amount > 100: raise NotImplementedError("This query is not limited! You must specify an upper bound using limit() between 1 and 100") from server.skeleton import SkelList res = SkelList( self.srcSkel ) dbRes = self.run( amount ) res.customQueryInfo = self.customQueryInfo if dbRes is None: return( res ) for e in dbRes: #s = self.srcSkel.clone() valueCache = {} self.srcSkel.setValuesCache(valueCache) self.srcSkel.setValues(e) res.append( self.srcSkel.getValuesCache() ) try: c = self.datastoreQuery.GetCursor() if c: res.cursor = c.urlsafe() else: res.cursor = None except AssertionError: #No Cursors available on MultiQueries ( in or != ) res.cursor = None return( res )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def get( self ): """ Returns only the first entity of the current query. :returns: dict on success, or None if the result-set is empty. :rtype: dict """ try: res = list( self.run( limit=1 ) )[0] return( res ) except IndexError: #Empty result-set return( None ) except TypeError: #Also Empty result-set return( None )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def count( self, limit=1000, **kwargs ): """ Returns the number of entities that this query matches. :param limit: Limits the query to the defined maximum entities count.\ If there are more results than this limit, stop short and just return this number.\ Providing this argument makes the count operation more efficient. :type limit: int :param config: Optional configuration to use for this request. This must be specified\ as a keyword argument. :type config: dict :returns: The number of results. :rtype: int """ return( self.datastoreQuery.Count( limit, **kwargs ) )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def _fixUnindexedProperties( self ): """ Ensures that no property with strlen > 500 makes it into the index. """ unindexed = list( self.getUnindexedProperties() ) for k,v in self.items(): if isinstance( v, basestring ) and len( v )>=500 and not k in unindexed: logging.warning("Your property %s cant be indexed!" % k) unindexed.append( k ) elif isinstance( v, list ) or isinstance( v, tuple() ): if any( [ isinstance(x,basestring) and len(x)>=500 for x in v] ) and not k in unindexed: logging.warning("Your property %s cant be indexed!" % k) unindexed.append( k ) self.set_unindexed_properties( unindexed )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def entityGroup(self): """ Returns this entity's entity group as a Key. Note that the returned Key will be incomplete if this is a a root entity and its key is incomplete. """ return( self.entity_group() )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def setUnindexedProperties(self, unindexed_properties): """ Sets the list of unindexed properties. Properties listed here are *not* saved in an index; its impossible to use them in a query filter / sort. But it saves one db-write op per property listed here. """ self.set_unindexed_properties( unindexed_properties )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def set(self, key, value, indexed=True): """ Sets a property. :param key: key of the property to set. :type key: str :param value: Any value to set tot the property. :param indexed: Defines if the value is indexed. :type indexed: bool :raises: :exc:`BadPropertyError` if the property name is the \ empty string or not a string. :raises: :exc:`BadValueError` if the value is not a supported type. """ unindexed = list(self.getUnindexedProperties()) if not indexed and not key in unindexed: unindexed.append(key) self.setUnindexedProperties(unindexed) elif indexed and key in unindexed: unindexed.remove(key) self.setUnindexedProperties(unindexed) self[key] = value
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def FromDatastoreEntity( entity ): """ Converts a datastore.Entity into a :class:`db.server.Entity`. Required, as ``datastore.Get()`` always returns a datastore.Entity (and it seems that currently there is no valid way to change that). """ res = Entity( entity.kind(), parent=entity.key().parent(), _app=entity.key().app(), name=entity.key().name(), id=entity.key().id(), unindexed_properties=entity.unindexed_properties(), namespace=entity.namespace() ) res.update( entity ) return( res )
viur-framework/server
[ 12, 5, 12, 31, 1496136499 ]
def doit(channel): cpu_pc = psutil.cpu_percent() mem_avail_mb = psutil.virtual_memory().percent try: response = channel.update({1: cpu_pc, 2: mem_avail}) print(cpu_pc) print(mem_avail_mb) print(strftime("%a, %d %b %Y %H:%M:%S", localtime())) print(response) except: print("connection failed")
mchwalisz/thingspeak
[ 32, 14, 32, 9, 1454869765 ]
def testCombinationPairs(self): inputs = ["A", "B", "C"] expected_combination = [("A", "B"), ("A", "C"), ("B", "C")] actual_combination = paraphrase_ms_coco.create_combination(inputs) self.assertEqual(actual_combination, expected_combination)
tensorflow/tensor2tensor
[ 13097, 3192, 13097, 587, 1497545859 ]
def testBidirectionalTrue(self, data, bidirectional): paraphrase_problem = paraphrase_ms_coco.ParaphraseGenerationProblem() paraphrase_problem.bidirectional = True expected_generated_data = [{"inputs": "sentence1", "targets": "sentence2"}, {"inputs": "sentence2", "targets": "sentence1"}] actual_generated_data = list(paraphrase_problem .generate_samples("data_dir", "tmp_dir", "dataset_split")) self.assertEqual(actual_generated_data, expected_generated_data)
tensorflow/tensor2tensor
[ 13097, 3192, 13097, 587, 1497545859 ]
def testBidirectionalFalse(self, data, bidirectional): paraphrase_problem = paraphrase_ms_coco.ParaphraseGenerationProblem() paraphrase_problem.bidirectional = False expected_generated_data = [{"inputs": "sentence1", "targets": "sentence2"}] actual_generated_data = list(paraphrase_problem .generate_samples("data_dir", "tmp_dir", "dataset_split")) self.assertEqual(actual_generated_data, expected_generated_data)
tensorflow/tensor2tensor
[ 13097, 3192, 13097, 587, 1497545859 ]
def __init__(self, *args, **kwargs): kwargs.setdefault('directConnection', True) kwargs.setdefault('serverSelectionTimeoutMS', 1) # Set client application name for MongoDB 3.4+ servers kwargs['appName'] = f'''mlaunch v{__version__}''' Connection.__init__(self, *args, **kwargs)
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def shutdown_host(port, username=None, password=None, authdb=None): """ Send the shutdown command to a mongod or mongos on given port. This function can be called as a separate thread. """ host = 'localhost:%i' % port try: if username and password and authdb: if authdb != "admin": raise RuntimeError("given username/password is not for " "admin database") mc = MongoConnection(host, username=username, password=password) else: mc = MongoConnection(host)
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def check_mongo_server_output(binary, argument): """Call mongo[d|s] with arguments such as --help or --version. This is used only to check the server's output. We expect the server to exit immediately. """ try: proc = subprocess.Popen(['%s' % binary, argument], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=False) except OSError as exc: print('Failed to launch %s' % binary) raise exc out, err = proc.communicate() if proc.returncode: raise OSError(out or err) return out
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def __init__(self, test=False): BaseCmdLineTool.__init__(self) # arguments self.args = None # startup parameters for each port self.startup_info = {} # data structures for the discovery feature self.cluster_tree = {} self.cluster_tags = defaultdict(list) self.cluster_running = {} # memoize ignored arguments passed to different binaries self.ignored_arguments = {} # config docs for replica sets (key is replica set name) self.config_docs = {} # shard connection strings self.shard_connection_str = [] # ssl configuration to start mongod or mongos, or create a MongoClient self.ssl_server_args = '' self.ssl_pymongo_options = {} # tls configuration to start mongod or mongos, or create a MongoClient self.tls_server_args = '' self.tls_pymongo_options = {} # indicate if running in testing mode self.test = test # version of MongoDB server self.current_version = self.getMongoDVersion()
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def is_file(arg): if not os.path.exists(os.path.expanduser(arg)): init_parser.error("The file [%s] does not exist" % arg) return arg
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def init(self): """ Sub-command init. Branches out to sharded, replicaset or single node methods. """ # check for existing environment. Only allow subsequent # 'mlaunch init' if they are identical. if self._load_parameters(): if self.loaded_args != self.args: raise SystemExit('A different environment already exists ' 'at %s.' % self.dir) first_init = False else: first_init = True self.ssl_pymongo_options = self._get_ssl_pymongo_options(self.args) self.tls_pymongo_options = self._get_tls_pymongo_options(self.args) if (self._get_ssl_server_args() and not self.args['sslAllowConnectionsWithoutCertificates'] and not self.args['sslClientCertificate'] and not self.args['sslClientPEMKeyFile']): sys.stderr.write('warning: server requires certificates but no' ' --sslClientCertificate provided\n') if (self._get_tls_server_args() and not self.args['tlsAllowConnectionsWithoutCertificates'] and not self.args['tlsClientCertificate'] and not self.args['tlsClientCertificateKeyFile']): sys.stderr.write('warning: server requires certificates but no' ' --tlsClientCertificate provided\n') # number of default config servers if self.args['config'] == -1: self.args['config'] = 1 # Exit with error if --csrs is set and MongoDB < 3.1.0 if (self.args['csrs'] and LooseVersion(self.current_version) < LooseVersion("3.1.0") and LooseVersion(self.current_version) != LooseVersion("0.0.0")): errmsg = (" \n * The '--csrs' option requires MongoDB version " "3.2.0 or greater, the current version is %s.\n" % self.current_version) raise SystemExit(errmsg) # add the 'csrs' parameter as default for MongoDB >= 3.3.0 if (LooseVersion(self.current_version) >= LooseVersion("3.3.0") or LooseVersion(self.current_version) == LooseVersion("0.0.0")): self.args['csrs'] = True # construct startup strings self._construct_cmdlines() # write out parameters if self.args['verbose']: print("writing .mlaunch_startup file.") self._store_parameters() # exit if running in testing mode if self.test: return # check if authentication is enabled, make key file if self.args['auth'] and first_init: if not os.path.exists(self.dir): os.makedirs(self.dir) if '--keyFile' in self.unknown_args: # Check if keyfile is readable keyfile = None try: keyfile_idx = self.unknown_args.index('--keyFile') + 1 keyfile_path = self.unknown_args[keyfile_idx] keyfile = self._read_key_file(keyfile_path) except: print(f'\n WARNING: Specified keyFile does not appear readable: {keyfile_path}\n') else: keyfile = os.path.join(self.dir, "keyfile") print(f'Generating keyfile: {keyfile}') os.system('openssl rand -base64 753 > "%s"' % keyfile) if os.name != 'nt': os.system(f'chmod 600 "{keyfile}"') # if not all ports are free, complain and suggest alternatives. all_ports = self.get_tagged(['all']) ports_avail = self.wait_for(all_ports, 1, 1, to_start=False) if not all(map(itemgetter(1), ports_avail)): dir_addon = (' --dir %s' % self.relative_dir if self.relative_dir != './data' else '') errmsg = ('\nThe following ports are not available: %s\n\n' % ', '.join([str(p[0]) for p in ports_avail if not p[1]])) errmsg += (" * If you want to restart nodes from this " "environment, use 'mlaunch start%s' instead.\n" % dir_addon) errmsg += (" * If the ports are used by a different mlaunch " "environment, stop those first with 'mlaunch stop " "--dir <env>'.\n") errmsg += (" * You can also specify a different port range with " "an additional '--port <startport>'\n") raise SystemExit(errmsg) if self.args['sharded']: shard_names = self._get_shard_names(self.args) # start mongod (shard and config) nodes and wait nodes = self.get_tagged(['mongod', 'down']) self._start_on_ports(nodes, wait=True, override_auth=True) # initiate replica sets if init is called for the first time if first_init: if self.args['csrs']: # Initiate config servers in a replicaset if self.args['verbose']: print('Initiating config server replica set.') members = sorted(self.get_tagged(["config"])) self._initiate_replset(members[0], "configRepl") for shard in shard_names: # initiate replica set on first member if self.args['verbose']: print('Initiating shard replica set %s.' % shard) members = sorted(self.get_tagged([shard])) self._initiate_replset(members[0], shard) # add mongos mongos = sorted(self.get_tagged(['mongos', 'down'])) self._start_on_ports(mongos, wait=True, override_auth=True) if first_init: # add shards mongos = sorted(self.get_tagged(['mongos'])) con = self.client('localhost:%i' % mongos[0]) shards_to_add = len(self.shard_connection_str) nshards = con['config']['shards'].count_documents({}) if nshards < shards_to_add: if self.args['replicaset']: print("adding shards. can take up to 30 seconds...") else: print("adding shards.") shard_conns_and_names = list(zip(self.shard_connection_str, shard_names)) while True: try: nshards = con['config']['shards'].count_documents({}) except Exception: nshards = 0 if nshards >= shards_to_add: break for conn_str, name in shard_conns_and_names: try: res = con['admin'].command(SON([('addShard', conn_str), ('name', name)])) except Exception as e: if self.args['verbose']: print('%s will retry in a moment.' % e) continue if res['ok']: if self.args['verbose']: print("shard %s added successfully" % conn_str) shard_conns_and_names.remove((conn_str, name)) break else: if self.args['verbose']: print(res + ' - will retry') time.sleep(1) elif self.args['single']: # just start node nodes = self.get_tagged(['single', 'down']) self._start_on_ports(nodes, wait=False) elif self.args['replicaset']: # start nodes and wait nodes = sorted(self.get_tagged(['mongod', 'down'])) self._start_on_ports(nodes, wait=True) # initiate replica set if first_init: self._initiate_replset(nodes[0], self.args['name']) # wait for all nodes to be running nodes = self.get_tagged(['all']) self.wait_for(nodes) # now that nodes are running, add admin user if authentication enabled if self.args['auth'] and self.args['initial-user'] and first_init: self.discover() nodes = [] if self.args['sharded']: nodes = self.get_tagged(['mongos', 'running']) elif self.args['single']: nodes = self.get_tagged(['single', 'running']) elif self.args['replicaset']: print("waiting for primary to add a user.") if self._wait_for_primary(): nodes = self.get_tagged(['primary', 'running']) else: raise RuntimeError("failed to find a primary, so adding " "admin user isn't possible") if not nodes: raise RuntimeError("can't connect to server, so adding admin " "user isn't possible") roles = [] found_cluster_admin = False if self.args['auth_role_docs']: for role_str in self.args['auth_roles']: role_doc = json.loads(role_str) roles.append(role_doc) if role_doc['role'] == "clusterAdmin": found_cluster_admin = True else: roles = self.args['auth_roles'] found_cluster_admin = "clusterAdmin" in roles if not found_cluster_admin: warnings.warn("the stop command will not work with auth " "because the user does not have the " "clusterAdmin role") self._add_user(sorted(nodes)[0], name=self.args['username'], password=self.args['password'], database=self.args['auth_db'], roles=roles) if self.args['sharded']: for shard in shard_names: members = sorted(self.get_tagged([shard])) if self.args['verbose']: print("adding users to %s" % shard) self._add_user(members[0], name=self.args['username'], password=self.args['password'], database=self.args['auth_db'], roles=roles) if self.args['verbose']: print("added user %s on %s database" % (self.args['username'], self.args['auth_db'])) # in sharded env, if --mongos 0, kill the dummy mongos if self.args['sharded'] and self.args['mongos'] == 0: port = self.args['port'] print("shutting down temporary mongos on localhost:%s" % port) username = self.args['username'] if self.args['auth'] else None password = self.args['password'] if self.args['auth'] else None authdb = self.args['auth_db'] if self.args['auth'] else None shutdown_host(port, username, password, authdb) # discover again, to get up-to-date info self.discover() # for sharded authenticated clusters, restart after first_init # to enable auth if self.args['sharded'] and self.args['auth'] and first_init: if self.args['verbose']: print("restarting cluster to enable auth...") self.restart() if self.args['auth'] and self.args['initial-user']: print('Username "%s", password "%s"' % (self.args['username'], self.args['password'])) if self.args['verbose']: print("done.")
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def getMongoDVersion(self): binary = "mongod" if self.args and self.args.get('binarypath'): binary = os.path.join(self.args['binarypath'], binary) try: out = check_mongo_server_output(binary, '--version') except Exception: return "0.0" buf = BytesIO(out) current_version = buf.readline().strip().decode('utf-8') # remove prefix "db version v" if current_version.rindex('v') > 0: current_version = current_version.rpartition('v')[2] # remove suffix making assumption that all release candidates # equal revision 0 try: if current_version.rindex('-') > 0: # release candidate? current_version = current_version.rpartition('-')[0] except Exception: pass if self.args and self.args['verbose']: print("Detected mongod version: %s" % current_version) return current_version
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def stop(self): """ Sub-command stop. Parse the list of tags and stop the matching nodes. Each tag has a set of nodes associated with it, and only the nodes matching all tags (intersection) will be shut down. Currently this is an alias for kill() """ self.kill()
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def list(self): """ Sub-command list. Takes no further parameters. Will discover the current configuration and print a table of all the nodes with status and port. """ self.discover() print_docs = [] # mongos for node in sorted(self.get_tagged(['mongos'])): doc = OrderedDict([('process', 'mongos'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down')]) print_docs.append(doc) if len(self.get_tagged(['mongos'])) > 0: print_docs.append(None) # configs for node in sorted(self.get_tagged(['config'])): doc = OrderedDict([('process', 'config server'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down')]) print_docs.append(doc) if len(self.get_tagged(['config'])) > 0: print_docs.append(None) # mongod for shard in self._get_shard_names(self.loaded_args): tags = [] replicaset = ('replicaset' in self.loaded_args and self.loaded_args['replicaset']) padding = '' if shard: print_docs.append(shard) tags.append(shard) padding = ' ' if replicaset: # primary primary = self.get_tagged(tags + ['primary', 'running']) if len(primary) > 0: node = list(primary)[0] print_docs.append(OrderedDict ([('process', padding + 'primary'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down')])) # secondaries secondaries = self.get_tagged(tags + ['secondary', 'running']) for node in sorted(secondaries): print_docs.append(OrderedDict ([('process', padding + 'secondary'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down')])) # data-bearing nodes that are down or not in the # replica set yet mongods = self.get_tagged(tags + ['mongod']) arbiters = self.get_tagged(tags + ['arbiter']) nodes = sorted(mongods - primary - secondaries - arbiters) for node in nodes: print_docs.append(OrderedDict ([('process', padding + 'mongod'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down')])) # arbiters for node in arbiters: print_docs.append(OrderedDict ([('process', padding + 'arbiter'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down')])) else: nodes = self.get_tagged(tags + ['mongod']) if len(nodes) > 0: node = nodes.pop() print_docs.append(OrderedDict ([('process', padding + 'single'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down')])) if shard: print_docs.append(None) processes = self._get_processes() startup = self.startup_info # print tags as well for doc in [x for x in print_docs if type(x) == OrderedDict]: try: doc['pid'] = processes[doc['port']].pid except KeyError: doc['pid'] = '-' if self.args['verbose'] or self.args['tags']: tags = self.get_tags_of_port(doc['port']) doc['tags'] = ', '.join(tags) if self.args['startup']: try: # first try running process (startup may be modified # via start command) doc['startup command'] = ' '.join(processes[doc['port']] .cmdline()) except KeyError: # if not running, use stored startup_info doc['startup command'] = startup[str(doc['port'])] if (self.args['json']): print(json.dumps(print_docs)) else: print() print_docs.append(None) print_table(print_docs) if self.loaded_args.get('auth'): print('\tauth: "%s:%s"' % (self.loaded_args.get('username'), self.loaded_args.get('password')))
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def restart(self): # get all running processes processes = self._get_processes() procs = [processes[k] for k in list(processes.keys())] # stop nodes via stop command self.stop() # wait until all processes terminate psutil.wait_procs(procs) # start nodes again via start command self.start()
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def discover(self): """ Fetch state for each processes. Build the self.cluster_tree, self.cluster_tags, self.cluster_running data structures, needed for sub-commands start, stop, list. """ # need self.args['command'] so fail if it's not available if (not self.args or 'command' not in self.args or not self.args['command']): return # load .mlaunch_startup file for start, stop, list, use current # parameters for init if self.args['command'] == 'init': self.loaded_args = self.args self.loaded_unknown_args = self.unknown_args else: if not self._load_parameters(): startup_file = os.path.join(self.dir, ".mlaunch_startup") raise SystemExit("Can't read %s, use 'mlaunch init ...' first." % startup_file) self.ssl_pymongo_options = self._get_ssl_pymongo_options(self.loaded_args) self.tls_pymongo_options = self._get_tls_pymongo_options(self.loaded_args) # reset cluster_* variables self.cluster_tree = {} self.cluster_tags = defaultdict(list) self.cluster_running = {} # get shard names shard_names = self._get_shard_names(self.loaded_args) # some shortcut variables is_sharded = ('sharded' in self.loaded_args and self.loaded_args['sharded'] is not None) is_replicaset = ('replicaset' in self.loaded_args and self.loaded_args['replicaset']) is_single = 'single' in self.loaded_args and self.loaded_args['single'] has_arbiter = ('arbiter' in self.loaded_args and self.loaded_args['arbiter']) # determine number of nodes to inspect if is_sharded: num_config = self.loaded_args['config'] # at least one temp. mongos for adding shards, will be # killed later on num_mongos = max(1, self.loaded_args['mongos']) num_shards = len(shard_names) else: num_shards = 1 num_config = 0 num_mongos = 0 num_nodes_per_shard = self.loaded_args['nodes'] if is_replicaset else 1 if has_arbiter: num_nodes_per_shard += 1 num_nodes = num_shards * num_nodes_per_shard + num_config + num_mongos current_port = self.loaded_args['port'] # tag all nodes with 'all' self.cluster_tags['all'].extend(list(range(current_port, current_port + num_nodes))) # tag all nodes with their port number (as string) and whether # they are running for port in range(current_port, current_port + num_nodes): self.cluster_tags[str(port)].append(port) running = self.is_running(port) self.cluster_running[port] = running self.cluster_tags['running' if running else 'down'].append(port) # find all mongos for i in range(num_mongos): port = i + current_port # add mongos to cluster tree self.cluster_tree.setdefault('mongos', []).append(port) # add mongos to tags self.cluster_tags['mongos'].append(port) current_port += num_mongos # find all mongods (sharded, replicaset or single) if shard_names is None: shard_names = [None] for shard in shard_names: port_range = list(range(current_port, current_port + num_nodes_per_shard)) # all of these are mongod nodes self.cluster_tags['mongod'].extend(port_range) if shard: # if this is a shard, store in cluster_tree and tag shard name self.cluster_tree.setdefault('shard', []).append(port_range) self.cluster_tags[shard].extend(port_range) if is_replicaset: # get replica set states rs_name = shard if shard else self.loaded_args['name'] try: mrsc = self.client( ','.join('localhost:%i' % i for i in port_range), replicaSet=rs_name) # primary, secondaries, arbiters # @todo: this is no longer working because MongoClient # is now non-blocking if mrsc.primary: self.cluster_tags['primary'].append(mrsc.primary[1]) self.cluster_tags['secondary'].extend(list(map (itemgetter(1), mrsc.secondaries))) self.cluster_tags['arbiter'].extend(list(map(itemgetter(1), mrsc.arbiters))) # secondaries in cluster_tree (order is now important) self.cluster_tree.setdefault('secondary', []) for i, secondary in enumerate(sorted(map (itemgetter(1), mrsc.secondaries))): if len(self.cluster_tree['secondary']) <= i: self.cluster_tree['secondary'].append([]) self.cluster_tree['secondary'][i].append(secondary) except (ConnectionFailure, ConfigurationError): pass elif is_single: self.cluster_tags['single'].append(current_port) # increase current_port current_port += num_nodes_per_shard # add config server to cluster tree self.cluster_tree.setdefault('config', []).append(port) # If not CSRS, set the number of config servers to be 1 or 3 # This is needed, otherwise `mlaunch init --sharded 2 --replicaset # --config 2` on <3.3.0 will crash if not self.args.get('csrs') and self.args['command'] == 'init': if num_config >= 3: num_config = 3 else: num_config = 1 for i in range(num_config): port = i + current_port try: mc = self.client('localhost:%i' % port) mc.admin.command('ping') running = True except ConnectionFailure: # node not reachable running = False # add config server to cluster tree self.cluster_tree.setdefault('config', []).append(port) # add config server to tags self.cluster_tags['config'].append(port) self.cluster_tags['mongod'].append(port) current_port += num_mongos
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def get_tagged(self, tags): """ Tag format. The format for the tags list is tuples for tags: mongos, config, shard, secondary tags of the form (tag, number), e.g. ('mongos', 2) which references the second mongos in the list. For all other tags, it is simply the string, e.g. 'primary'. """ # if tags is a simple string, make it a list (note: tuples like # ('mongos', 2) must be in a surrounding list) if not hasattr(tags, '__iter__') and type(tags) == str: tags = [tags] nodes = set(self.cluster_tags['all']) for tag in tags: if re.match(r"\w+ \d{1,2}", tag): # special case for tuple tags: mongos, config, shard, # secondary. These can contain a number tag, number = tag.split() try: branch = self.cluster_tree[tag][int(number) - 1] except (IndexError, KeyError): continue if hasattr(branch, '__iter__'): subset = set(branch) else: subset = set([branch]) else: # otherwise use tags dict to get the subset subset = set(self.cluster_tags[tag]) nodes = nodes.intersection(subset) return nodes
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]
def wait_for(self, ports, interval=1.0, timeout=30, to_start=True): """ Spawn threads to ping host using a list of ports. Returns when all hosts are running (if to_start=True) / shut down (if to_start=False). """ threads = [] queue = Queue.Queue() for port in ports: threads.append(threading.Thread(target=wait_for_host, args=( port, interval, timeout, to_start, queue, self.ssl_pymongo_options, self.tls_pymongo_options))) if self.args and 'verbose' in self.args and self.args['verbose']: print("waiting for nodes %s..." % ('to start' if to_start else 'to shutdown')) for thread in threads: thread.start() for thread in threads: thread.join() # get all results back and return tuple return tuple(queue.get_nowait() for _ in ports)
rueckstiess/mtools
[ 1782, 375, 1782, 74, 1347607696 ]