after_merge
stringlengths
28
79.6k
before_merge
stringlengths
20
79.6k
url
stringlengths
38
71
full_traceback
stringlengths
43
922k
traceback_type
stringclasses
555 values
async def copy_room_tags_and_direct_to_room( self, old_room_id, new_room_id, user_id ) -> None: """Copies the tags and direct room state from one room to another. Args: old_room_id: The room ID of the old room. new_room_id: The room ID of the new room. user_id: The user's ID. """ # Retrieve user account data for predecessor room user_account_data, _ = await self.store.get_account_data_for_user(user_id) # Copy direct message state if applicable direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {}) # Check which key this room is under if isinstance(direct_rooms, dict): for key, room_id_list in direct_rooms.items(): if old_room_id in room_id_list and new_room_id not in room_id_list: # Add new room_id to this key direct_rooms[key].append(new_room_id) # Save back to user's m.direct account data await self.store.add_account_data_for_user( user_id, AccountDataTypes.DIRECT, direct_rooms ) break # Copy room tags if applicable room_tags = await self.store.get_tags_for_room(user_id, old_room_id) # Copy each room tag to the new room for tag, tag_content in room_tags.items(): await self.store.add_tag_to_room(user_id, new_room_id, tag, tag_content)
async def copy_room_tags_and_direct_to_room( self, old_room_id, new_room_id, user_id ) -> None: """Copies the tags and direct room state from one room to another. Args: old_room_id: The room ID of the old room. new_room_id: The room ID of the new room. user_id: The user's ID. """ # Retrieve user account data for predecessor room user_account_data, _ = await self.store.get_account_data_for_user(user_id) # Copy direct message state if applicable direct_rooms = user_account_data.get("m.direct", {}) # Check which key this room is under if isinstance(direct_rooms, dict): for key, room_id_list in direct_rooms.items(): if old_room_id in room_id_list and new_room_id not in room_id_list: # Add new room_id to this key direct_rooms[key].append(new_room_id) # Save back to user's m.direct account data await self.store.add_account_data_for_user( user_id, "m.direct", direct_rooms ) break # Copy room tags if applicable room_tags = await self.store.get_tags_for_room(user_id, old_room_id) # Copy each room tag to the new room for tag, tag_content in room_tags.items(): await self.store.add_tag_to_room(user_id, new_room_id, tag, tag_content)
https://github.com/matrix-org/synapse/issues/8357
2020-09-20 07:53:32,780 - synapse.http.server - 80 - ERROR - GET-8896 - Failed handle request via 'SyncRestServlet': <XForwardedForRequest at 0xffff882be940 method='GET' uri='/_matrix/client/r0/sync?fil> Traceback (most recent call last): File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 229, in _async_render_wrapper callback_return = await self._async_render(request) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 406, in _async_render callback_return = await raw_callback_return File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/rest/client/v2_alpha/sync.py", line 174, in on_GET sync_result = await self.sync_handler.wait_for_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 278, in wait_for_sync_for_user res = await self.response_cache.wrap( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 310, in _wait_for_sync_for_user result = await self.current_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 342, in current_sync_for_user return await self.generate_sync_result(sync_config, since_token, full_state) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 998, in generate_sync_result res = await self._generate_sync_entry_for_rooms( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 1395, in _generate_sync_entry_for_rooms ignored_users = ignored_account_data.get("ignored_users", {}).keys() AttributeError: 'list' object has no attribute 'keys'
AttributeError
async def _generate_sync_entry_for_rooms( self, sync_result_builder: "SyncResultBuilder", account_data_by_room: Dict[str, Dict[str, JsonDict]], ) -> Tuple[Set[str], Set[str], Set[str], Set[str]]: """Generates the rooms portion of the sync response. Populates the `sync_result_builder` with the result. Args: sync_result_builder account_data_by_room: Dictionary of per room account data Returns: Returns a 4-tuple of `(newly_joined_rooms, newly_joined_or_invited_users, newly_left_rooms, newly_left_users)` """ user_id = sync_result_builder.sync_config.user.to_string() block_all_room_ephemeral = ( sync_result_builder.since_token is None and sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral() ) if block_all_room_ephemeral: ephemeral_by_room = {} # type: Dict[str, List[JsonDict]] else: now_token, ephemeral_by_room = await self.ephemeral_by_room( sync_result_builder, now_token=sync_result_builder.now_token, since_token=sync_result_builder.since_token, ) sync_result_builder.now_token = now_token # We check up front if anything has changed, if it hasn't then there is # no point in going further. since_token = sync_result_builder.since_token if not sync_result_builder.full_state: if since_token and not ephemeral_by_room and not account_data_by_room: have_changed = await self._have_rooms_changed(sync_result_builder) if not have_changed: tags_by_room = await self.store.get_updated_tags( user_id, since_token.account_data_key ) if not tags_by_room: logger.debug("no-oping sync") return set(), set(), set(), set() ignored_account_data = await self.store.get_global_account_data_by_type_for_user( AccountDataTypes.IGNORED_USER_LIST, user_id=user_id ) # If there is ignored users account data and it matches the proper type, # then use it. ignored_users = frozenset() # type: FrozenSet[str] if ignored_account_data: ignored_users_data = ignored_account_data.get("ignored_users", {}) if isinstance(ignored_users_data, dict): ignored_users = frozenset(ignored_users_data.keys()) if since_token: room_changes = await self._get_rooms_changed(sync_result_builder, ignored_users) tags_by_room = await self.store.get_updated_tags( user_id, since_token.account_data_key ) else: room_changes = await self._get_all_rooms(sync_result_builder, ignored_users) tags_by_room = await self.store.get_tags_for_user(user_id) room_entries = room_changes.room_entries invited = room_changes.invited newly_joined_rooms = room_changes.newly_joined_rooms newly_left_rooms = room_changes.newly_left_rooms async def handle_room_entries(room_entry): logger.debug("Generating room entry for %s", room_entry.room_id) res = await self._generate_room_entry( sync_result_builder, ignored_users, room_entry, ephemeral=ephemeral_by_room.get(room_entry.room_id, []), tags=tags_by_room.get(room_entry.room_id), account_data=account_data_by_room.get(room_entry.room_id, {}), always_include=sync_result_builder.full_state, ) logger.debug("Generated room entry for %s", room_entry.room_id) return res await concurrently_execute(handle_room_entries, room_entries, 10) sync_result_builder.invited.extend(invited) # Now we want to get any newly joined or invited users newly_joined_or_invited_users = set() newly_left_users = set() if since_token: for joined_sync in sync_result_builder.joined: it = itertools.chain( joined_sync.timeline.events, joined_sync.state.values() ) for event in it: if event.type == EventTypes.Member: if ( event.membership == Membership.JOIN or event.membership == Membership.INVITE ): newly_joined_or_invited_users.add(event.state_key) else: prev_content = event.unsigned.get("prev_content", {}) prev_membership = prev_content.get("membership", None) if prev_membership == Membership.JOIN: newly_left_users.add(event.state_key) newly_left_users -= newly_joined_or_invited_users return ( set(newly_joined_rooms), newly_joined_or_invited_users, set(newly_left_rooms), newly_left_users, )
async def _generate_sync_entry_for_rooms( self, sync_result_builder: "SyncResultBuilder", account_data_by_room: Dict[str, Dict[str, JsonDict]], ) -> Tuple[Set[str], Set[str], Set[str], Set[str]]: """Generates the rooms portion of the sync response. Populates the `sync_result_builder` with the result. Args: sync_result_builder account_data_by_room: Dictionary of per room account data Returns: Returns a 4-tuple of `(newly_joined_rooms, newly_joined_or_invited_users, newly_left_rooms, newly_left_users)` """ user_id = sync_result_builder.sync_config.user.to_string() block_all_room_ephemeral = ( sync_result_builder.since_token is None and sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral() ) if block_all_room_ephemeral: ephemeral_by_room = {} # type: Dict[str, List[JsonDict]] else: now_token, ephemeral_by_room = await self.ephemeral_by_room( sync_result_builder, now_token=sync_result_builder.now_token, since_token=sync_result_builder.since_token, ) sync_result_builder.now_token = now_token # We check up front if anything has changed, if it hasn't then there is # no point in going further. since_token = sync_result_builder.since_token if not sync_result_builder.full_state: if since_token and not ephemeral_by_room and not account_data_by_room: have_changed = await self._have_rooms_changed(sync_result_builder) if not have_changed: tags_by_room = await self.store.get_updated_tags( user_id, since_token.account_data_key ) if not tags_by_room: logger.debug("no-oping sync") return set(), set(), set(), set() ignored_account_data = await self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id ) if ignored_account_data: ignored_users = ignored_account_data.get("ignored_users", {}).keys() else: ignored_users = frozenset() if since_token: room_changes = await self._get_rooms_changed(sync_result_builder, ignored_users) tags_by_room = await self.store.get_updated_tags( user_id, since_token.account_data_key ) else: room_changes = await self._get_all_rooms(sync_result_builder, ignored_users) tags_by_room = await self.store.get_tags_for_user(user_id) room_entries = room_changes.room_entries invited = room_changes.invited newly_joined_rooms = room_changes.newly_joined_rooms newly_left_rooms = room_changes.newly_left_rooms async def handle_room_entries(room_entry): logger.debug("Generating room entry for %s", room_entry.room_id) res = await self._generate_room_entry( sync_result_builder, ignored_users, room_entry, ephemeral=ephemeral_by_room.get(room_entry.room_id, []), tags=tags_by_room.get(room_entry.room_id), account_data=account_data_by_room.get(room_entry.room_id, {}), always_include=sync_result_builder.full_state, ) logger.debug("Generated room entry for %s", room_entry.room_id) return res await concurrently_execute(handle_room_entries, room_entries, 10) sync_result_builder.invited.extend(invited) # Now we want to get any newly joined or invited users newly_joined_or_invited_users = set() newly_left_users = set() if since_token: for joined_sync in sync_result_builder.joined: it = itertools.chain( joined_sync.timeline.events, joined_sync.state.values() ) for event in it: if event.type == EventTypes.Member: if ( event.membership == Membership.JOIN or event.membership == Membership.INVITE ): newly_joined_or_invited_users.add(event.state_key) else: prev_content = event.unsigned.get("prev_content", {}) prev_membership = prev_content.get("membership", None) if prev_membership == Membership.JOIN: newly_left_users.add(event.state_key) newly_left_users -= newly_joined_or_invited_users return ( set(newly_joined_rooms), newly_joined_or_invited_users, set(newly_left_rooms), newly_left_users, )
https://github.com/matrix-org/synapse/issues/8357
2020-09-20 07:53:32,780 - synapse.http.server - 80 - ERROR - GET-8896 - Failed handle request via 'SyncRestServlet': <XForwardedForRequest at 0xffff882be940 method='GET' uri='/_matrix/client/r0/sync?fil> Traceback (most recent call last): File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 229, in _async_render_wrapper callback_return = await self._async_render(request) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 406, in _async_render callback_return = await raw_callback_return File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/rest/client/v2_alpha/sync.py", line 174, in on_GET sync_result = await self.sync_handler.wait_for_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 278, in wait_for_sync_for_user res = await self.response_cache.wrap( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 310, in _wait_for_sync_for_user result = await self.current_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 342, in current_sync_for_user return await self.generate_sync_result(sync_config, since_token, full_state) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 998, in generate_sync_result res = await self._generate_sync_entry_for_rooms( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 1395, in _generate_sync_entry_for_rooms ignored_users = ignored_account_data.get("ignored_users", {}).keys() AttributeError: 'list' object has no attribute 'keys'
AttributeError
async def _get_rooms_changed( self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str] ) -> _RoomChanges: """Gets the the changes that have happened since the last sync.""" user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config assert since_token # Get a list of membership change events that have happened. rooms_changed = await self.store.get_membership_changes_for_user( user_id, since_token.room_key, now_token.room_key ) mem_change_events_by_room_id = {} # type: Dict[str, List[EventBase]] for event in rooms_changed: mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) newly_joined_rooms = [] newly_left_rooms = [] room_entries = [] invited = [] for room_id, events in mem_change_events_by_room_id.items(): logger.debug( "Membership changes in %s: [%s]", room_id, ", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)), ) non_joins = [e for e in events if e.membership != Membership.JOIN] has_join = len(non_joins) != len(events) # We want to figure out if we joined the room at some point since # the last sync (even if we have since left). This is to make sure # we do send down the room, and with full state, where necessary old_state_ids = None if room_id in sync_result_builder.joined_room_ids and non_joins: # Always include if the user (re)joined the room, especially # important so that device list changes are calculated correctly. # If there are non-join member events, but we are still in the room, # then the user must have left and joined newly_joined_rooms.append(room_id) # User is in the room so we don't need to do the invite/leave checks continue if room_id in sync_result_builder.joined_room_ids or has_join: old_state_ids = await self.get_state_at(room_id, since_token) old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None) old_mem_ev = None if old_mem_ev_id: old_mem_ev = await self.store.get_event(old_mem_ev_id, allow_none=True) # debug for #4422 if has_join: prev_membership = None if old_mem_ev: prev_membership = old_mem_ev.membership issue4422_logger.debug( "Previous membership for room %s with join: %s (event %s)", room_id, prev_membership, old_mem_ev_id, ) if not old_mem_ev or old_mem_ev.membership != Membership.JOIN: newly_joined_rooms.append(room_id) # If user is in the room then we don't need to do the invite/leave checks if room_id in sync_result_builder.joined_room_ids: continue if not non_joins: continue # Check if we have left the room. This can either be because we were # joined before *or* that we since joined and then left. if events[-1].membership != Membership.JOIN: if has_join: newly_left_rooms.append(room_id) else: if not old_state_ids: old_state_ids = await self.get_state_at(room_id, since_token) old_mem_ev_id = old_state_ids.get( (EventTypes.Member, user_id), None ) old_mem_ev = None if old_mem_ev_id: old_mem_ev = await self.store.get_event( old_mem_ev_id, allow_none=True ) if old_mem_ev and old_mem_ev.membership == Membership.JOIN: newly_left_rooms.append(room_id) # Only bother if we're still currently invited should_invite = non_joins[-1].membership == Membership.INVITE if should_invite: if event.sender not in ignored_users: room_sync = InvitedSyncResult(room_id, invite=non_joins[-1]) if room_sync: invited.append(room_sync) # Always include leave/ban events. Just take the last one. # TODO: How do we handle ban -> leave in same batch? leave_events = [ e for e in non_joins if e.membership in (Membership.LEAVE, Membership.BAN) ] if leave_events: leave_event = leave_events[-1] leave_position = await self.store.get_position_for_event( leave_event.event_id ) # If the leave event happened before the since token then we # bail. if since_token and not leave_position.persisted_after(since_token.room_key): continue # We can safely convert the position of the leave event into a # stream token as it'll only be used in the context of this # room. (c.f. the docstring of `to_room_stream_token`). leave_token = since_token.copy_and_replace( "room_key", leave_position.to_room_stream_token() ) # If this is an out of band message, like a remote invite # rejection, we include it in the recents batch. Otherwise, we # let _load_filtered_recents handle fetching the correct # batches. # # This is all screaming out for a refactor, as the logic here is # subtle and the moving parts numerous. if leave_event.internal_metadata.is_out_of_band_membership(): batch_events = [leave_event] # type: Optional[List[EventBase]] else: batch_events = None room_entries.append( RoomSyncResultBuilder( room_id=room_id, rtype="archived", events=batch_events, newly_joined=room_id in newly_joined_rooms, full_state=False, since_token=since_token, upto_token=leave_token, ) ) timeline_limit = sync_config.filter_collection.timeline_limit() # Get all events for rooms we're currently joined to. room_to_events = await self.store.get_room_events_stream_for_rooms( room_ids=sync_result_builder.joined_room_ids, from_key=since_token.room_key, to_key=now_token.room_key, limit=timeline_limit + 1, ) # We loop through all room ids, even if there are no new events, in case # there are non room events that we need to notify about. for room_id in sync_result_builder.joined_room_ids: room_entry = room_to_events.get(room_id, None) newly_joined = room_id in newly_joined_rooms if room_entry: events, start_key = room_entry prev_batch_token = now_token.copy_and_replace("room_key", start_key) entry = RoomSyncResultBuilder( room_id=room_id, rtype="joined", events=events, newly_joined=newly_joined, full_state=False, since_token=None if newly_joined else since_token, upto_token=prev_batch_token, ) else: entry = RoomSyncResultBuilder( room_id=room_id, rtype="joined", events=[], newly_joined=newly_joined, full_state=False, since_token=since_token, upto_token=since_token, ) if newly_joined: # debugging for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger.debug( "RoomSyncResultBuilder events for newly joined room %s: %r", room_id, entry.events, ) room_entries.append(entry) return _RoomChanges(room_entries, invited, newly_joined_rooms, newly_left_rooms)
async def _get_rooms_changed( self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] ) -> _RoomChanges: """Gets the the changes that have happened since the last sync.""" user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config assert since_token # Get a list of membership change events that have happened. rooms_changed = await self.store.get_membership_changes_for_user( user_id, since_token.room_key, now_token.room_key ) mem_change_events_by_room_id = {} # type: Dict[str, List[EventBase]] for event in rooms_changed: mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) newly_joined_rooms = [] newly_left_rooms = [] room_entries = [] invited = [] for room_id, events in mem_change_events_by_room_id.items(): logger.debug( "Membership changes in %s: [%s]", room_id, ", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)), ) non_joins = [e for e in events if e.membership != Membership.JOIN] has_join = len(non_joins) != len(events) # We want to figure out if we joined the room at some point since # the last sync (even if we have since left). This is to make sure # we do send down the room, and with full state, where necessary old_state_ids = None if room_id in sync_result_builder.joined_room_ids and non_joins: # Always include if the user (re)joined the room, especially # important so that device list changes are calculated correctly. # If there are non-join member events, but we are still in the room, # then the user must have left and joined newly_joined_rooms.append(room_id) # User is in the room so we don't need to do the invite/leave checks continue if room_id in sync_result_builder.joined_room_ids or has_join: old_state_ids = await self.get_state_at(room_id, since_token) old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None) old_mem_ev = None if old_mem_ev_id: old_mem_ev = await self.store.get_event(old_mem_ev_id, allow_none=True) # debug for #4422 if has_join: prev_membership = None if old_mem_ev: prev_membership = old_mem_ev.membership issue4422_logger.debug( "Previous membership for room %s with join: %s (event %s)", room_id, prev_membership, old_mem_ev_id, ) if not old_mem_ev or old_mem_ev.membership != Membership.JOIN: newly_joined_rooms.append(room_id) # If user is in the room then we don't need to do the invite/leave checks if room_id in sync_result_builder.joined_room_ids: continue if not non_joins: continue # Check if we have left the room. This can either be because we were # joined before *or* that we since joined and then left. if events[-1].membership != Membership.JOIN: if has_join: newly_left_rooms.append(room_id) else: if not old_state_ids: old_state_ids = await self.get_state_at(room_id, since_token) old_mem_ev_id = old_state_ids.get( (EventTypes.Member, user_id), None ) old_mem_ev = None if old_mem_ev_id: old_mem_ev = await self.store.get_event( old_mem_ev_id, allow_none=True ) if old_mem_ev and old_mem_ev.membership == Membership.JOIN: newly_left_rooms.append(room_id) # Only bother if we're still currently invited should_invite = non_joins[-1].membership == Membership.INVITE if should_invite: if event.sender not in ignored_users: room_sync = InvitedSyncResult(room_id, invite=non_joins[-1]) if room_sync: invited.append(room_sync) # Always include leave/ban events. Just take the last one. # TODO: How do we handle ban -> leave in same batch? leave_events = [ e for e in non_joins if e.membership in (Membership.LEAVE, Membership.BAN) ] if leave_events: leave_event = leave_events[-1] leave_position = await self.store.get_position_for_event( leave_event.event_id ) # If the leave event happened before the since token then we # bail. if since_token and not leave_position.persisted_after(since_token.room_key): continue # We can safely convert the position of the leave event into a # stream token as it'll only be used in the context of this # room. (c.f. the docstring of `to_room_stream_token`). leave_token = since_token.copy_and_replace( "room_key", leave_position.to_room_stream_token() ) # If this is an out of band message, like a remote invite # rejection, we include it in the recents batch. Otherwise, we # let _load_filtered_recents handle fetching the correct # batches. # # This is all screaming out for a refactor, as the logic here is # subtle and the moving parts numerous. if leave_event.internal_metadata.is_out_of_band_membership(): batch_events = [leave_event] # type: Optional[List[EventBase]] else: batch_events = None room_entries.append( RoomSyncResultBuilder( room_id=room_id, rtype="archived", events=batch_events, newly_joined=room_id in newly_joined_rooms, full_state=False, since_token=since_token, upto_token=leave_token, ) ) timeline_limit = sync_config.filter_collection.timeline_limit() # Get all events for rooms we're currently joined to. room_to_events = await self.store.get_room_events_stream_for_rooms( room_ids=sync_result_builder.joined_room_ids, from_key=since_token.room_key, to_key=now_token.room_key, limit=timeline_limit + 1, ) # We loop through all room ids, even if there are no new events, in case # there are non room events that we need to notify about. for room_id in sync_result_builder.joined_room_ids: room_entry = room_to_events.get(room_id, None) newly_joined = room_id in newly_joined_rooms if room_entry: events, start_key = room_entry prev_batch_token = now_token.copy_and_replace("room_key", start_key) entry = RoomSyncResultBuilder( room_id=room_id, rtype="joined", events=events, newly_joined=newly_joined, full_state=False, since_token=None if newly_joined else since_token, upto_token=prev_batch_token, ) else: entry = RoomSyncResultBuilder( room_id=room_id, rtype="joined", events=[], newly_joined=newly_joined, full_state=False, since_token=since_token, upto_token=since_token, ) if newly_joined: # debugging for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger.debug( "RoomSyncResultBuilder events for newly joined room %s: %r", room_id, entry.events, ) room_entries.append(entry) return _RoomChanges(room_entries, invited, newly_joined_rooms, newly_left_rooms)
https://github.com/matrix-org/synapse/issues/8357
2020-09-20 07:53:32,780 - synapse.http.server - 80 - ERROR - GET-8896 - Failed handle request via 'SyncRestServlet': <XForwardedForRequest at 0xffff882be940 method='GET' uri='/_matrix/client/r0/sync?fil> Traceback (most recent call last): File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 229, in _async_render_wrapper callback_return = await self._async_render(request) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 406, in _async_render callback_return = await raw_callback_return File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/rest/client/v2_alpha/sync.py", line 174, in on_GET sync_result = await self.sync_handler.wait_for_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 278, in wait_for_sync_for_user res = await self.response_cache.wrap( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 310, in _wait_for_sync_for_user result = await self.current_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 342, in current_sync_for_user return await self.generate_sync_result(sync_config, since_token, full_state) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 998, in generate_sync_result res = await self._generate_sync_entry_for_rooms( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 1395, in _generate_sync_entry_for_rooms ignored_users = ignored_account_data.get("ignored_users", {}).keys() AttributeError: 'list' object has no attribute 'keys'
AttributeError
async def _get_all_rooms( self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str] ) -> _RoomChanges: """Returns entries for all rooms for the user. Args: sync_result_builder ignored_users: Set of users ignored by user. """ user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config membership_list = ( Membership.INVITE, Membership.JOIN, Membership.LEAVE, Membership.BAN, ) room_list = await self.store.get_rooms_for_local_user_where_membership_is( user_id=user_id, membership_list=membership_list ) room_entries = [] invited = [] for event in room_list: if event.membership == Membership.JOIN: room_entries.append( RoomSyncResultBuilder( room_id=event.room_id, rtype="joined", events=None, newly_joined=False, full_state=True, since_token=since_token, upto_token=now_token, ) ) elif event.membership == Membership.INVITE: if event.sender in ignored_users: continue invite = await self.store.get_event(event.event_id) invited.append(InvitedSyncResult(room_id=event.room_id, invite=invite)) elif event.membership in (Membership.LEAVE, Membership.BAN): # Always send down rooms we were banned or kicked from. if not sync_config.filter_collection.include_leave: if event.membership == Membership.LEAVE: if user_id == event.sender: continue leave_token = now_token.copy_and_replace( "room_key", RoomStreamToken(None, event.stream_ordering) ) room_entries.append( RoomSyncResultBuilder( room_id=event.room_id, rtype="archived", events=None, newly_joined=False, full_state=True, since_token=since_token, upto_token=leave_token, ) ) return _RoomChanges(room_entries, invited, [], [])
async def _get_all_rooms( self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] ) -> _RoomChanges: """Returns entries for all rooms for the user. Args: sync_result_builder ignored_users: Set of users ignored by user. """ user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config membership_list = ( Membership.INVITE, Membership.JOIN, Membership.LEAVE, Membership.BAN, ) room_list = await self.store.get_rooms_for_local_user_where_membership_is( user_id=user_id, membership_list=membership_list ) room_entries = [] invited = [] for event in room_list: if event.membership == Membership.JOIN: room_entries.append( RoomSyncResultBuilder( room_id=event.room_id, rtype="joined", events=None, newly_joined=False, full_state=True, since_token=since_token, upto_token=now_token, ) ) elif event.membership == Membership.INVITE: if event.sender in ignored_users: continue invite = await self.store.get_event(event.event_id) invited.append(InvitedSyncResult(room_id=event.room_id, invite=invite)) elif event.membership in (Membership.LEAVE, Membership.BAN): # Always send down rooms we were banned or kicked from. if not sync_config.filter_collection.include_leave: if event.membership == Membership.LEAVE: if user_id == event.sender: continue leave_token = now_token.copy_and_replace( "room_key", RoomStreamToken(None, event.stream_ordering) ) room_entries.append( RoomSyncResultBuilder( room_id=event.room_id, rtype="archived", events=None, newly_joined=False, full_state=True, since_token=since_token, upto_token=leave_token, ) ) return _RoomChanges(room_entries, invited, [], [])
https://github.com/matrix-org/synapse/issues/8357
2020-09-20 07:53:32,780 - synapse.http.server - 80 - ERROR - GET-8896 - Failed handle request via 'SyncRestServlet': <XForwardedForRequest at 0xffff882be940 method='GET' uri='/_matrix/client/r0/sync?fil> Traceback (most recent call last): File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 229, in _async_render_wrapper callback_return = await self._async_render(request) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 406, in _async_render callback_return = await raw_callback_return File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/rest/client/v2_alpha/sync.py", line 174, in on_GET sync_result = await self.sync_handler.wait_for_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 278, in wait_for_sync_for_user res = await self.response_cache.wrap( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 310, in _wait_for_sync_for_user result = await self.current_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 342, in current_sync_for_user return await self.generate_sync_result(sync_config, since_token, full_state) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 998, in generate_sync_result res = await self._generate_sync_entry_for_rooms( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 1395, in _generate_sync_entry_for_rooms ignored_users = ignored_account_data.get("ignored_users", {}).keys() AttributeError: 'list' object has no attribute 'keys'
AttributeError
async def _generate_room_entry( self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str], room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], tags: Optional[Dict[str, Dict[str, Any]]], account_data: Dict[str, JsonDict], always_include: bool = False, ): """Populates the `joined` and `archived` section of `sync_result_builder` based on the `room_builder`. Args: sync_result_builder ignored_users: Set of users ignored by user. room_builder ephemeral: List of new ephemeral events for room tags: List of *all* tags for room, or None if there has been no change. account_data: List of new account data for room always_include: Always include this room in the sync response, even if empty. """ newly_joined = room_builder.newly_joined full_state = ( room_builder.full_state or newly_joined or sync_result_builder.full_state ) events = room_builder.events # We want to shortcut out as early as possible. if not (always_include or account_data or ephemeral or full_state): if events == [] and tags is None: return now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config room_id = room_builder.room_id since_token = room_builder.since_token upto_token = room_builder.upto_token batch = await self._load_filtered_recents( room_id, sync_config, now_token=upto_token, since_token=since_token, potential_recents=events, newly_joined_room=newly_joined, ) # Note: `batch` can be both empty and limited here in the case where # `_load_filtered_recents` can't find any events the user should see # (e.g. due to having ignored the sender of the last 50 events). if newly_joined: # debug for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger.debug( "Timeline events after filtering in newly-joined room %s: %r", room_id, batch, ) # When we join the room (or the client requests full_state), we should # send down any existing tags. Usually the user won't have tags in a # newly joined room, unless either a) they've joined before or b) the # tag was added by synapse e.g. for server notice rooms. if full_state: user_id = sync_result_builder.sync_config.user.to_string() tags = await self.store.get_tags_for_room(user_id, room_id) # If there aren't any tags, don't send the empty tags list down # sync if not tags: tags = None account_data_events = [] if tags is not None: account_data_events.append({"type": "m.tag", "content": {"tags": tags}}) for account_data_type, content in account_data.items(): account_data_events.append({"type": account_data_type, "content": content}) account_data_events = sync_config.filter_collection.filter_room_account_data( account_data_events ) ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral) if not (always_include or batch or account_data_events or ephemeral or full_state): return state = await self.compute_state_delta( room_id, batch, sync_config, since_token, now_token, full_state=full_state ) summary = {} # type: Optional[JsonDict] # we include a summary in room responses when we're lazy loading # members (as the client otherwise doesn't have enough info to form # the name itself). if sync_config.filter_collection.lazy_load_members() and ( # we recalulate the summary: # if there are membership changes in the timeline, or # if membership has changed during a gappy sync, or # if this is an initial sync. any(ev.type == EventTypes.Member for ev in batch.events) or ( # XXX: this may include false positives in the form of LL # members which have snuck into state batch.limited and any(t == EventTypes.Member for (t, k) in state) ) or since_token is None ): summary = await self.compute_summary( room_id, sync_config, batch, state, now_token ) if room_builder.rtype == "joined": unread_notifications = {} # type: Dict[str, int] room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, state=state, ephemeral=ephemeral, account_data=account_data_events, unread_notifications=unread_notifications, summary=summary, unread_count=0, ) if room_sync or always_include: notifs = await self.unread_notifs_for_room_id(room_id, sync_config) unread_notifications["notification_count"] = notifs["notify_count"] unread_notifications["highlight_count"] = notifs["highlight_count"] room_sync.unread_count = notifs["unread_count"] sync_result_builder.joined.append(room_sync) if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.debug( "Incremental gappy sync of %s for user %s with %d state events" % (room_id, user_id, len(state)) ) elif room_builder.rtype == "archived": archived_room_sync = ArchivedSyncResult( room_id=room_id, timeline=batch, state=state, account_data=account_data_events, ) if archived_room_sync or always_include: sync_result_builder.archived.append(archived_room_sync) else: raise Exception("Unrecognized rtype: %r", room_builder.rtype)
async def _generate_room_entry( self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str], room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], tags: Optional[Dict[str, Dict[str, Any]]], account_data: Dict[str, JsonDict], always_include: bool = False, ): """Populates the `joined` and `archived` section of `sync_result_builder` based on the `room_builder`. Args: sync_result_builder ignored_users: Set of users ignored by user. room_builder ephemeral: List of new ephemeral events for room tags: List of *all* tags for room, or None if there has been no change. account_data: List of new account data for room always_include: Always include this room in the sync response, even if empty. """ newly_joined = room_builder.newly_joined full_state = ( room_builder.full_state or newly_joined or sync_result_builder.full_state ) events = room_builder.events # We want to shortcut out as early as possible. if not (always_include or account_data or ephemeral or full_state): if events == [] and tags is None: return now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config room_id = room_builder.room_id since_token = room_builder.since_token upto_token = room_builder.upto_token batch = await self._load_filtered_recents( room_id, sync_config, now_token=upto_token, since_token=since_token, potential_recents=events, newly_joined_room=newly_joined, ) # Note: `batch` can be both empty and limited here in the case where # `_load_filtered_recents` can't find any events the user should see # (e.g. due to having ignored the sender of the last 50 events). if newly_joined: # debug for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger.debug( "Timeline events after filtering in newly-joined room %s: %r", room_id, batch, ) # When we join the room (or the client requests full_state), we should # send down any existing tags. Usually the user won't have tags in a # newly joined room, unless either a) they've joined before or b) the # tag was added by synapse e.g. for server notice rooms. if full_state: user_id = sync_result_builder.sync_config.user.to_string() tags = await self.store.get_tags_for_room(user_id, room_id) # If there aren't any tags, don't send the empty tags list down # sync if not tags: tags = None account_data_events = [] if tags is not None: account_data_events.append({"type": "m.tag", "content": {"tags": tags}}) for account_data_type, content in account_data.items(): account_data_events.append({"type": account_data_type, "content": content}) account_data_events = sync_config.filter_collection.filter_room_account_data( account_data_events ) ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral) if not (always_include or batch or account_data_events or ephemeral or full_state): return state = await self.compute_state_delta( room_id, batch, sync_config, since_token, now_token, full_state=full_state ) summary = {} # type: Optional[JsonDict] # we include a summary in room responses when we're lazy loading # members (as the client otherwise doesn't have enough info to form # the name itself). if sync_config.filter_collection.lazy_load_members() and ( # we recalulate the summary: # if there are membership changes in the timeline, or # if membership has changed during a gappy sync, or # if this is an initial sync. any(ev.type == EventTypes.Member for ev in batch.events) or ( # XXX: this may include false positives in the form of LL # members which have snuck into state batch.limited and any(t == EventTypes.Member for (t, k) in state) ) or since_token is None ): summary = await self.compute_summary( room_id, sync_config, batch, state, now_token ) if room_builder.rtype == "joined": unread_notifications = {} # type: Dict[str, int] room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, state=state, ephemeral=ephemeral, account_data=account_data_events, unread_notifications=unread_notifications, summary=summary, unread_count=0, ) if room_sync or always_include: notifs = await self.unread_notifs_for_room_id(room_id, sync_config) unread_notifications["notification_count"] = notifs["notify_count"] unread_notifications["highlight_count"] = notifs["highlight_count"] room_sync.unread_count = notifs["unread_count"] sync_result_builder.joined.append(room_sync) if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.debug( "Incremental gappy sync of %s for user %s with %d state events" % (room_id, user_id, len(state)) ) elif room_builder.rtype == "archived": archived_room_sync = ArchivedSyncResult( room_id=room_id, timeline=batch, state=state, account_data=account_data_events, ) if archived_room_sync or always_include: sync_result_builder.archived.append(archived_room_sync) else: raise Exception("Unrecognized rtype: %r", room_builder.rtype)
https://github.com/matrix-org/synapse/issues/8357
2020-09-20 07:53:32,780 - synapse.http.server - 80 - ERROR - GET-8896 - Failed handle request via 'SyncRestServlet': <XForwardedForRequest at 0xffff882be940 method='GET' uri='/_matrix/client/r0/sync?fil> Traceback (most recent call last): File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 229, in _async_render_wrapper callback_return = await self._async_render(request) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 406, in _async_render callback_return = await raw_callback_return File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/rest/client/v2_alpha/sync.py", line 174, in on_GET sync_result = await self.sync_handler.wait_for_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 278, in wait_for_sync_for_user res = await self.response_cache.wrap( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 310, in _wait_for_sync_for_user result = await self.current_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 342, in current_sync_for_user return await self.generate_sync_result(sync_config, since_token, full_state) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 998, in generate_sync_result res = await self._generate_sync_entry_for_rooms( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 1395, in _generate_sync_entry_for_rooms ignored_users = ignored_account_data.get("ignored_users", {}).keys() AttributeError: 'list' object has no attribute 'keys'
AttributeError
async def is_ignored_by( self, ignored_user_id: str, ignorer_user_id: str, cache_context: _CacheContext ) -> bool: ignored_account_data = await self.get_global_account_data_by_type_for_user( AccountDataTypes.IGNORED_USER_LIST, ignorer_user_id, on_invalidate=cache_context.invalidate, ) if not ignored_account_data: return False try: return ignored_user_id in ignored_account_data.get("ignored_users", {}) except TypeError: # The type of the ignored_users field is invalid. return False
async def is_ignored_by( self, ignored_user_id: str, ignorer_user_id: str, cache_context: _CacheContext ) -> bool: ignored_account_data = await self.get_global_account_data_by_type_for_user( "m.ignored_user_list", ignorer_user_id, on_invalidate=cache_context.invalidate, ) if not ignored_account_data: return False return ignored_user_id in ignored_account_data.get("ignored_users", {})
https://github.com/matrix-org/synapse/issues/8357
2020-09-20 07:53:32,780 - synapse.http.server - 80 - ERROR - GET-8896 - Failed handle request via 'SyncRestServlet': <XForwardedForRequest at 0xffff882be940 method='GET' uri='/_matrix/client/r0/sync?fil> Traceback (most recent call last): File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 229, in _async_render_wrapper callback_return = await self._async_render(request) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 406, in _async_render callback_return = await raw_callback_return File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/rest/client/v2_alpha/sync.py", line 174, in on_GET sync_result = await self.sync_handler.wait_for_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 278, in wait_for_sync_for_user res = await self.response_cache.wrap( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 310, in _wait_for_sync_for_user result = await self.current_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 342, in current_sync_for_user return await self.generate_sync_result(sync_config, since_token, full_state) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 998, in generate_sync_result res = await self._generate_sync_entry_for_rooms( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 1395, in _generate_sync_entry_for_rooms ignored_users = ignored_account_data.get("ignored_users", {}).keys() AttributeError: 'list' object has no attribute 'keys'
AttributeError
async def filter_events_for_client( storage: Storage, user_id, events, is_peeking=False, always_include_ids=frozenset(), filter_send_to_client=True, ): """ Check which events a user is allowed to see. If the user can see the event but its sender asked for their data to be erased, prune the content of the event. Args: storage user_id(str): user id to be checked events(list[synapse.events.EventBase]): sequence of events to be checked is_peeking(bool): should be True if: * the user is not currently a member of the room, and: * the user has not been a member of the room since the given events always_include_ids (set(event_id)): set of event ids to specifically include (unless sender is ignored) filter_send_to_client (bool): Whether we're checking an event that's going to be sent to a client. This might not always be the case since this function can also be called to check whether a user can see the state at a given point. Returns: list[synapse.events.EventBase] """ # Filter out events that have been soft failed so that we don't relay them # to clients. events = [e for e in events if not e.internal_metadata.is_soft_failed()] types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id)) event_id_to_state = await storage.state.get_state_for_events( frozenset(e.event_id for e in events), state_filter=StateFilter.from_types(types), ) ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user( AccountDataTypes.IGNORED_USER_LIST, user_id ) ignore_list = frozenset() if ignore_dict_content: ignored_users_dict = ignore_dict_content.get("ignored_users", {}) if isinstance(ignored_users_dict, dict): ignore_list = frozenset(ignored_users_dict.keys()) erased_senders = await storage.main.are_users_erased((e.sender for e in events)) if filter_send_to_client: room_ids = {e.room_id for e in events} retention_policies = {} for room_id in room_ids: retention_policies[ room_id ] = await storage.main.get_retention_policy_for_room(room_id) def allowed(event): """ Args: event (synapse.events.EventBase): event to check Returns: None|EventBase: None if the user cannot see this event at all a redacted copy of the event if they can only see a redacted version the original event if they can see it as normal. """ # Only run some checks if these events aren't about to be sent to clients. This is # because, if this is not the case, we're probably only checking if the users can # see events in the room at that point in the DAG, and that shouldn't be decided # on those checks. if filter_send_to_client: if event.type == "org.matrix.dummy_event": return None if not event.is_state() and event.sender in ignore_list: return None # Until MSC2261 has landed we can't redact malicious alias events, so for # now we temporarily filter out m.room.aliases entirely to mitigate # abuse, while we spec a better solution to advertising aliases # on rooms. if event.type == EventTypes.Aliases: return None # Don't try to apply the room's retention policy if the event is a state # event, as MSC1763 states that retention is only considered for non-state # events. if not event.is_state(): retention_policy = retention_policies[event.room_id] max_lifetime = retention_policy.get("max_lifetime") if max_lifetime is not None: oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime if event.origin_server_ts < oldest_allowed_ts: return None if event.event_id in always_include_ids: return event state = event_id_to_state[event.event_id] # get the room_visibility at the time of the event. visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None) if visibility_event: visibility = visibility_event.content.get("history_visibility", "shared") else: visibility = "shared" if visibility not in VISIBILITY_PRIORITY: visibility = "shared" # Always allow history visibility events on boundaries. This is done # by setting the effective visibility to the least restrictive # of the old vs new. if event.type == EventTypes.RoomHistoryVisibility: prev_content = event.unsigned.get("prev_content", {}) prev_visibility = prev_content.get("history_visibility", None) if prev_visibility not in VISIBILITY_PRIORITY: prev_visibility = "shared" new_priority = VISIBILITY_PRIORITY.index(visibility) old_priority = VISIBILITY_PRIORITY.index(prev_visibility) if old_priority < new_priority: visibility = prev_visibility # likewise, if the event is the user's own membership event, use # the 'most joined' membership membership = None if event.type == EventTypes.Member and event.state_key == user_id: membership = event.content.get("membership", None) if membership not in MEMBERSHIP_PRIORITY: membership = "leave" prev_content = event.unsigned.get("prev_content", {}) prev_membership = prev_content.get("membership", None) if prev_membership not in MEMBERSHIP_PRIORITY: prev_membership = "leave" # Always allow the user to see their own leave events, otherwise # they won't see the room disappear if they reject the invite if membership == "leave" and ( prev_membership == "join" or prev_membership == "invite" ): return event new_priority = MEMBERSHIP_PRIORITY.index(membership) old_priority = MEMBERSHIP_PRIORITY.index(prev_membership) if old_priority < new_priority: membership = prev_membership # otherwise, get the user's membership at the time of the event. if membership is None: membership_event = state.get((EventTypes.Member, user_id), None) if membership_event: membership = membership_event.membership # if the user was a member of the room at the time of the event, # they can see it. if membership == Membership.JOIN: return event # otherwise, it depends on the room visibility. if visibility == "joined": # we weren't a member at the time of the event, so we can't # see this event. return None elif visibility == "invited": # user can also see the event if they were *invited* at the time # of the event. return event if membership == Membership.INVITE else None elif visibility == "shared" and is_peeking: # if the visibility is shared, users cannot see the event unless # they have *subequently* joined the room (or were members at the # time, of course) # # XXX: if the user has subsequently joined and then left again, # ideally we would share history up to the point they left. But # we don't know when they left. We just treat it as though they # never joined, and restrict access. return None # the visibility is either shared or world_readable, and the user was # not a member at the time. We allow it, provided the original sender # has not requested their data to be erased, in which case, we return # a redacted version. if erased_senders[event.sender]: return prune_event(event) return event # check each event: gives an iterable[None|EventBase] filtered_events = map(allowed, events) # remove the None entries filtered_events = filter(operator.truth, filtered_events) # we turn it into a list before returning it. return list(filtered_events)
async def filter_events_for_client( storage: Storage, user_id, events, is_peeking=False, always_include_ids=frozenset(), filter_send_to_client=True, ): """ Check which events a user is allowed to see. If the user can see the event but its sender asked for their data to be erased, prune the content of the event. Args: storage user_id(str): user id to be checked events(list[synapse.events.EventBase]): sequence of events to be checked is_peeking(bool): should be True if: * the user is not currently a member of the room, and: * the user has not been a member of the room since the given events always_include_ids (set(event_id)): set of event ids to specifically include (unless sender is ignored) filter_send_to_client (bool): Whether we're checking an event that's going to be sent to a client. This might not always be the case since this function can also be called to check whether a user can see the state at a given point. Returns: list[synapse.events.EventBase] """ # Filter out events that have been soft failed so that we don't relay them # to clients. events = [e for e in events if not e.internal_metadata.is_soft_failed()] types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id)) event_id_to_state = await storage.state.get_state_for_events( frozenset(e.event_id for e in events), state_filter=StateFilter.from_types(types), ) ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id ) # FIXME: This will explode if people upload something incorrect. ignore_list = frozenset( ignore_dict_content.get("ignored_users", {}).keys() if ignore_dict_content else [] ) erased_senders = await storage.main.are_users_erased((e.sender for e in events)) if filter_send_to_client: room_ids = {e.room_id for e in events} retention_policies = {} for room_id in room_ids: retention_policies[ room_id ] = await storage.main.get_retention_policy_for_room(room_id) def allowed(event): """ Args: event (synapse.events.EventBase): event to check Returns: None|EventBase: None if the user cannot see this event at all a redacted copy of the event if they can only see a redacted version the original event if they can see it as normal. """ # Only run some checks if these events aren't about to be sent to clients. This is # because, if this is not the case, we're probably only checking if the users can # see events in the room at that point in the DAG, and that shouldn't be decided # on those checks. if filter_send_to_client: if event.type == "org.matrix.dummy_event": return None if not event.is_state() and event.sender in ignore_list: return None # Until MSC2261 has landed we can't redact malicious alias events, so for # now we temporarily filter out m.room.aliases entirely to mitigate # abuse, while we spec a better solution to advertising aliases # on rooms. if event.type == EventTypes.Aliases: return None # Don't try to apply the room's retention policy if the event is a state # event, as MSC1763 states that retention is only considered for non-state # events. if not event.is_state(): retention_policy = retention_policies[event.room_id] max_lifetime = retention_policy.get("max_lifetime") if max_lifetime is not None: oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime if event.origin_server_ts < oldest_allowed_ts: return None if event.event_id in always_include_ids: return event state = event_id_to_state[event.event_id] # get the room_visibility at the time of the event. visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None) if visibility_event: visibility = visibility_event.content.get("history_visibility", "shared") else: visibility = "shared" if visibility not in VISIBILITY_PRIORITY: visibility = "shared" # Always allow history visibility events on boundaries. This is done # by setting the effective visibility to the least restrictive # of the old vs new. if event.type == EventTypes.RoomHistoryVisibility: prev_content = event.unsigned.get("prev_content", {}) prev_visibility = prev_content.get("history_visibility", None) if prev_visibility not in VISIBILITY_PRIORITY: prev_visibility = "shared" new_priority = VISIBILITY_PRIORITY.index(visibility) old_priority = VISIBILITY_PRIORITY.index(prev_visibility) if old_priority < new_priority: visibility = prev_visibility # likewise, if the event is the user's own membership event, use # the 'most joined' membership membership = None if event.type == EventTypes.Member and event.state_key == user_id: membership = event.content.get("membership", None) if membership not in MEMBERSHIP_PRIORITY: membership = "leave" prev_content = event.unsigned.get("prev_content", {}) prev_membership = prev_content.get("membership", None) if prev_membership not in MEMBERSHIP_PRIORITY: prev_membership = "leave" # Always allow the user to see their own leave events, otherwise # they won't see the room disappear if they reject the invite if membership == "leave" and ( prev_membership == "join" or prev_membership == "invite" ): return event new_priority = MEMBERSHIP_PRIORITY.index(membership) old_priority = MEMBERSHIP_PRIORITY.index(prev_membership) if old_priority < new_priority: membership = prev_membership # otherwise, get the user's membership at the time of the event. if membership is None: membership_event = state.get((EventTypes.Member, user_id), None) if membership_event: membership = membership_event.membership # if the user was a member of the room at the time of the event, # they can see it. if membership == Membership.JOIN: return event # otherwise, it depends on the room visibility. if visibility == "joined": # we weren't a member at the time of the event, so we can't # see this event. return None elif visibility == "invited": # user can also see the event if they were *invited* at the time # of the event. return event if membership == Membership.INVITE else None elif visibility == "shared" and is_peeking: # if the visibility is shared, users cannot see the event unless # they have *subequently* joined the room (or were members at the # time, of course) # # XXX: if the user has subsequently joined and then left again, # ideally we would share history up to the point they left. But # we don't know when they left. We just treat it as though they # never joined, and restrict access. return None # the visibility is either shared or world_readable, and the user was # not a member at the time. We allow it, provided the original sender # has not requested their data to be erased, in which case, we return # a redacted version. if erased_senders[event.sender]: return prune_event(event) return event # check each event: gives an iterable[None|EventBase] filtered_events = map(allowed, events) # remove the None entries filtered_events = filter(operator.truth, filtered_events) # we turn it into a list before returning it. return list(filtered_events)
https://github.com/matrix-org/synapse/issues/8357
2020-09-20 07:53:32,780 - synapse.http.server - 80 - ERROR - GET-8896 - Failed handle request via 'SyncRestServlet': <XForwardedForRequest at 0xffff882be940 method='GET' uri='/_matrix/client/r0/sync?fil> Traceback (most recent call last): File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 229, in _async_render_wrapper callback_return = await self._async_render(request) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/http/server.py", line 406, in _async_render callback_return = await raw_callback_return File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/rest/client/v2_alpha/sync.py", line 174, in on_GET sync_result = await self.sync_handler.wait_for_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 278, in wait_for_sync_for_user res = await self.response_cache.wrap( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 310, in _wait_for_sync_for_user result = await self.current_sync_for_user( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 342, in current_sync_for_user return await self.generate_sync_result(sync_config, since_token, full_state) File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 998, in generate_sync_result res = await self._generate_sync_entry_for_rooms( File "/home/ubuntu/synapse/env/lib/python3.8/site-packages/synapse/handlers/sync.py", line 1395, in _generate_sync_entry_for_rooms ignored_users = ignored_account_data.get("ignored_users", {}).keys() AttributeError: 'list' object has no attribute 'keys'
AttributeError
def _store_room_members_txn(self, txn, events, backfilled): """Store a room member in the database.""" def str_or_none(val: Any) -> Optional[str]: return val if isinstance(val, str) else None self.db_pool.simple_insert_many_txn( txn, table="room_memberships", values=[ { "event_id": event.event_id, "user_id": event.state_key, "sender": event.user_id, "room_id": event.room_id, "membership": event.membership, "display_name": str_or_none(event.content.get("displayname")), "avatar_url": str_or_none(event.content.get("avatar_url")), } for event in events ], ) for event in events: txn.call_after( self.store._membership_stream_cache.entity_has_changed, event.state_key, event.internal_metadata.stream_ordering, ) txn.call_after( self.store.get_invited_rooms_for_local_user.invalidate, (event.state_key,), ) # We update the local_current_membership table only if the event is # "current", i.e., its something that has just happened. # # This will usually get updated by the `current_state_events` handling, # unless its an outlier, and an outlier is only "current" if it's an "out of # band membership", like a remote invite or a rejection of a remote invite. if ( self.is_mine_id(event.state_key) and not backfilled and event.internal_metadata.is_outlier() and event.internal_metadata.is_out_of_band_membership() ): self.db_pool.simple_upsert_txn( txn, table="local_current_membership", keyvalues={"room_id": event.room_id, "user_id": event.state_key}, values={ "event_id": event.event_id, "membership": event.membership, }, )
def _store_room_members_txn(self, txn, events, backfilled): """Store a room member in the database.""" self.db_pool.simple_insert_many_txn( txn, table="room_memberships", values=[ { "event_id": event.event_id, "user_id": event.state_key, "sender": event.user_id, "room_id": event.room_id, "membership": event.membership, "display_name": event.content.get("displayname", None), "avatar_url": event.content.get("avatar_url", None), } for event in events ], ) for event in events: txn.call_after( self.store._membership_stream_cache.entity_has_changed, event.state_key, event.internal_metadata.stream_ordering, ) txn.call_after( self.store.get_invited_rooms_for_local_user.invalidate, (event.state_key,), ) # We update the local_current_membership table only if the event is # "current", i.e., its something that has just happened. # # This will usually get updated by the `current_state_events` handling, # unless its an outlier, and an outlier is only "current" if it's an "out of # band membership", like a remote invite or a rejection of a remote invite. if ( self.is_mine_id(event.state_key) and not backfilled and event.internal_metadata.is_outlier() and event.internal_metadata.is_out_of_band_membership() ): self.db_pool.simple_upsert_txn( txn, table="local_current_membership", keyvalues={"room_id": event.room_id, "user_id": event.state_key}, values={ "event_id": event.event_id, "membership": event.membership, }, )
https://github.com/matrix-org/synapse/issues/8340
synapse_1 | 2020-09-17 15:44:59,053 - synapse.http.server - 84 - ERROR - POST-1715 - Failed handle request via 'JoinRoomAliasServlet': <XForwardedForRequest at 0x7f175a2b9550 method='POST' uri='/_matrix/client/r0/join/%23matrix-spec%3Amatrix.org' clientproto='HTTP/1.1' site=8008> synapse_1 | Traceback (most recent call last): synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/http/server.py", line 229, in _async_render_wrapper synapse_1 | callback_return = await self._async_render(request) synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/http/server.py", line 406, in _async_render synapse_1 | callback_return = await raw_callback_return synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/rest/client/v1/room.py", line 318, in on_POST synapse_1 | third_party_signed=content.get("third_party_signed", None), synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/handlers/room_member.py", line 317, in update_membership synapse_1 | require_consent=require_consent, synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/handlers/room_member.py", line 503, in _update_membership synapse_1 | requester, remote_room_hosts, room_id, target, content synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/handlers/room_member.py", line 1032, in _remote_join synapse_1 | remote_room_hosts, room_id, user.to_string(), content synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/handlers/federation.py", line 1360, in do_invite_join synapse_1 | origin, auth_chain, state, event, room_version_obj synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/handlers/federation.py", line 2045, in _persist_auth_tree synapse_1 | for e in itertools.chain(auth_events, state) synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/handlers/federation.py", line 2923, in persist_events_and_notify synapse_1 | event_and_contexts, backfilled=backfilled synapse_1 | File "/usr/local/lib/python3.7/site-packages/synapse/storage/persist_events.py", line 226, in persist_events synapse_1 | defer.gatherResults(deferreds, consumeErrors=True) synapse_1 | twisted.internet.defer.FirstError: FirstError[#0, [Failure instance: Traceback: <class 'sqlite3.InterfaceError'>: Error binding parameter 1 - probably unsupported type. synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/internet/defer.py:654:_runCallbacks synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/internet/defer.py:1475:gotResult synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator synapse_1 | --- <exception caught here> --- synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/persist_events.py:148:handle_queue_loop synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/persist_events.py:254:persisting_queue synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/persist_events.py:430:_persist_events synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/databases/main/events.py:177:_persist_events_and_state_updates synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/database.py:527:runInteraction synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/database.py:575:runWithConnection synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/python/threadpool.py:250:inContext synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/python/threadpool.py:266:<lambda> synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/python/context.py:122:callWithContext synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/python/context.py:85:callWithContext synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/enterprise/adbapi.py:306:_runWithConnection synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/python/compat.py:464:reraise synapse_1 | /usr/local/lib/python3.7/site-packages/twisted/enterprise/adbapi.py:297:_runWithConnection synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/database.py:572:inner_func synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/database.py:416:new_transaction synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/logging/utils.py:71:wrapped synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/databases/main/events.py:409:_persist_events_txn synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/databases/main/events.py:947:_update_metadata_tables_txn synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/databases/main/events.py:1116:_store_room_members_txn synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/database.py:685:simple_insert_many_txn synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/database.py:211:executemany synapse_1 | /usr/local/lib/python3.7/site-packages/synapse/storage/database.py:234:_do_execute synapse_1 | ]]
consumeError
async def on_POST(self, request): body = parse_json_object_from_request(request) client_addr = request.getClientIP() self.ratelimiter.ratelimit(client_addr, update=False) kind = b"user" if b"kind" in request.args: kind = request.args[b"kind"][0] if kind == b"guest": ret = await self._do_guest_registration(body, address=client_addr) return ret elif kind != b"user": raise UnrecognizedRequestError( "Do not understand membership kind: %s" % (kind.decode("utf8"),) ) # Pull out the provided username and do basic sanity checks early since # the auth layer will store these in sessions. desired_username = None if "username" in body: if not isinstance(body["username"], str) or len(body["username"]) > 512: raise SynapseError(400, "Invalid username") desired_username = body["username"] appservice = None if self.auth.has_access_token(request): appservice = self.auth.get_appservice_by_req(request) # fork off as soon as possible for ASes which have completely # different registration flows to normal users # == Application Service Registration == if appservice: # Set the desired user according to the AS API (which uses the # 'user' key not 'username'). Since this is a new addition, we'll # fallback to 'username' if they gave one. desired_username = body.get("user", desired_username) # XXX we should check that desired_username is valid. Currently # we give appservices carte blanche for any insanity in mxids, # because the IRC bridges rely on being able to register stupid # IDs. access_token = self.auth.get_access_token_from_request(request) if not isinstance(desired_username, str): raise SynapseError(400, "Desired Username is missing or not a string") result = await self._do_appservice_registration( desired_username, access_token, body ) return 200, result # == Normal User Registration == (everyone else) if not self._registration_enabled: raise SynapseError(403, "Registration has been disabled") # For regular registration, convert the provided username to lowercase # before attempting to register it. This should mean that people who try # to register with upper-case in their usernames don't get a nasty surprise. # # Note that we treat usernames case-insensitively in login, so they are # free to carry on imagining that their username is CrAzYh4cKeR if that # keeps them happy. if desired_username is not None: desired_username = desired_username.lower() # Check if this account is upgrading from a guest account. guest_access_token = body.get("guest_access_token", None) # Pull out the provided password and do basic sanity checks early. # # Note that we remove the password from the body since the auth layer # will store the body in the session and we don't want a plaintext # password store there. password = body.pop("password", None) if password is not None: if not isinstance(password, str) or len(password) > 512: raise SynapseError(400, "Invalid password") self.password_policy_handler.validate_password(password) if "initial_device_display_name" in body and password is None: # ignore 'initial_device_display_name' if sent without # a password to work around a client bug where it sent # the 'initial_device_display_name' param alone, wiping out # the original registration params logger.warning("Ignoring initial_device_display_name without password") del body["initial_device_display_name"] session_id = self.auth_handler.get_session_id(body) registered_user_id = None password_hash = None if session_id: # if we get a registered user id out of here, it means we previously # registered a user for this session, so we could just return the # user here. We carry on and go through the auth checks though, # for paranoia. registered_user_id = await self.auth_handler.get_session_data( session_id, "registered_user_id", None ) # Extract the previously-hashed password from the session. password_hash = await self.auth_handler.get_session_data( session_id, "password_hash", None ) # Ensure that the username is valid. if desired_username is not None: await self.registration_handler.check_username( desired_username, guest_access_token=guest_access_token, assigned_user_id=registered_user_id, ) # Check if the user-interactive authentication flows are complete, if # not this will raise a user-interactive auth error. try: auth_result, params, session_id = await self.auth_handler.check_ui_auth( self._registration_flows, request, body, self.hs.get_ip_from_request(request), "register a new account", ) except InteractiveAuthIncompleteError as e: # The user needs to provide more steps to complete auth. # # Hash the password and store it with the session since the client # is not required to provide the password again. # # If a password hash was previously stored we will not attempt to # re-hash and store it for efficiency. This assumes the password # does not change throughout the authentication flow, but this # should be fine since the data is meant to be consistent. if not password_hash and password: password_hash = await self.auth_handler.hash(password) await self.auth_handler.set_session_data( e.session_id, "password_hash", password_hash ) raise # Check that we're not trying to register a denied 3pid. # # the user-facing checks will probably already have happened in # /register/email/requestToken when we requested a 3pid, but that's not # guaranteed. if auth_result: for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]: if login_type in auth_result: medium = auth_result[login_type]["medium"] address = auth_result[login_type]["address"] if not check_3pid_allowed(self.hs, medium, address): raise SynapseError( 403, "Third party identifiers (email/phone numbers)" + " are not authorized on this server", Codes.THREEPID_DENIED, ) if registered_user_id is not None: logger.info( "Already registered user ID %r for this session", registered_user_id ) # don't re-register the threepids registered = False else: # If we have a password in this request, prefer it. Otherwise, there # might be a password hash from an earlier request. if password: password_hash = await self.auth_handler.hash(password) if not password_hash: raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM) desired_username = params.get("username", None) guest_access_token = params.get("guest_access_token", None) if desired_username is not None: desired_username = desired_username.lower() threepid = None if auth_result: threepid = auth_result.get(LoginType.EMAIL_IDENTITY) # Also check that we're not trying to register a 3pid that's already # been registered. # # This has probably happened in /register/email/requestToken as well, # but if a user hits this endpoint twice then clicks on each link from # the two activation emails, they would register the same 3pid twice. for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]: if login_type in auth_result: medium = auth_result[login_type]["medium"] address = auth_result[login_type]["address"] # For emails, canonicalise the address. # We store all email addresses canonicalised in the DB. # (See on_POST in EmailThreepidRequestTokenRestServlet # in synapse/rest/client/v2_alpha/account.py) if medium == "email": try: address = canonicalise_email(address) except ValueError as e: raise SynapseError(400, str(e)) existing_user_id = await self.store.get_user_id_by_threepid( medium, address ) if existing_user_id is not None: raise SynapseError( 400, "%s is already in use" % medium, Codes.THREEPID_IN_USE, ) entries = await self.store.get_user_agents_ips_to_ui_auth_session(session_id) registered_user_id = await self.registration_handler.register_user( localpart=desired_username, password_hash=password_hash, guest_access_token=guest_access_token, threepid=threepid, address=client_addr, user_agent_ips=entries, ) # Necessary due to auth checks prior to the threepid being # written to the db if threepid: if is_threepid_reserved( self.hs.config.mau_limits_reserved_threepids, threepid ): await self.store.upsert_monthly_active_user(registered_user_id) # Remember that the user account has been registered (and the user # ID it was registered with, since it might not have been specified). await self.auth_handler.set_session_data( session_id, "registered_user_id", registered_user_id ) registered = True return_dict = await self._create_registration_details(registered_user_id, params) if registered: await self.registration_handler.post_registration_actions( user_id=registered_user_id, auth_result=auth_result, access_token=return_dict.get("access_token"), ) return 200, return_dict
async def on_POST(self, request): body = parse_json_object_from_request(request) client_addr = request.getClientIP() self.ratelimiter.ratelimit(client_addr, update=False) kind = b"user" if b"kind" in request.args: kind = request.args[b"kind"][0] if kind == b"guest": ret = await self._do_guest_registration(body, address=client_addr) return ret elif kind != b"user": raise UnrecognizedRequestError( "Do not understand membership kind: %s" % (kind.decode("utf8"),) ) # Pull out the provided username and do basic sanity checks early since # the auth layer will store these in sessions. desired_username = None if "username" in body: if not isinstance(body["username"], str) or len(body["username"]) > 512: raise SynapseError(400, "Invalid username") desired_username = body["username"] appservice = None if self.auth.has_access_token(request): appservice = self.auth.get_appservice_by_req(request) # fork off as soon as possible for ASes which have completely # different registration flows to normal users # == Application Service Registration == if appservice: # Set the desired user according to the AS API (which uses the # 'user' key not 'username'). Since this is a new addition, we'll # fallback to 'username' if they gave one. desired_username = body.get("user", desired_username) # XXX we should check that desired_username is valid. Currently # we give appservices carte blanche for any insanity in mxids, # because the IRC bridges rely on being able to register stupid # IDs. access_token = self.auth.get_access_token_from_request(request) if isinstance(desired_username, str): result = await self._do_appservice_registration( desired_username, access_token, body ) return 200, result # we throw for non 200 responses # == Normal User Registration == (everyone else) if not self._registration_enabled: raise SynapseError(403, "Registration has been disabled") # For regular registration, convert the provided username to lowercase # before attempting to register it. This should mean that people who try # to register with upper-case in their usernames don't get a nasty surprise. # # Note that we treat usernames case-insensitively in login, so they are # free to carry on imagining that their username is CrAzYh4cKeR if that # keeps them happy. if desired_username is not None: desired_username = desired_username.lower() # Check if this account is upgrading from a guest account. guest_access_token = body.get("guest_access_token", None) # Pull out the provided password and do basic sanity checks early. # # Note that we remove the password from the body since the auth layer # will store the body in the session and we don't want a plaintext # password store there. password = body.pop("password", None) if password is not None: if not isinstance(password, str) or len(password) > 512: raise SynapseError(400, "Invalid password") self.password_policy_handler.validate_password(password) if "initial_device_display_name" in body and password is None: # ignore 'initial_device_display_name' if sent without # a password to work around a client bug where it sent # the 'initial_device_display_name' param alone, wiping out # the original registration params logger.warning("Ignoring initial_device_display_name without password") del body["initial_device_display_name"] session_id = self.auth_handler.get_session_id(body) registered_user_id = None password_hash = None if session_id: # if we get a registered user id out of here, it means we previously # registered a user for this session, so we could just return the # user here. We carry on and go through the auth checks though, # for paranoia. registered_user_id = await self.auth_handler.get_session_data( session_id, "registered_user_id", None ) # Extract the previously-hashed password from the session. password_hash = await self.auth_handler.get_session_data( session_id, "password_hash", None ) # Ensure that the username is valid. if desired_username is not None: await self.registration_handler.check_username( desired_username, guest_access_token=guest_access_token, assigned_user_id=registered_user_id, ) # Check if the user-interactive authentication flows are complete, if # not this will raise a user-interactive auth error. try: auth_result, params, session_id = await self.auth_handler.check_ui_auth( self._registration_flows, request, body, self.hs.get_ip_from_request(request), "register a new account", ) except InteractiveAuthIncompleteError as e: # The user needs to provide more steps to complete auth. # # Hash the password and store it with the session since the client # is not required to provide the password again. # # If a password hash was previously stored we will not attempt to # re-hash and store it for efficiency. This assumes the password # does not change throughout the authentication flow, but this # should be fine since the data is meant to be consistent. if not password_hash and password: password_hash = await self.auth_handler.hash(password) await self.auth_handler.set_session_data( e.session_id, "password_hash", password_hash ) raise # Check that we're not trying to register a denied 3pid. # # the user-facing checks will probably already have happened in # /register/email/requestToken when we requested a 3pid, but that's not # guaranteed. if auth_result: for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]: if login_type in auth_result: medium = auth_result[login_type]["medium"] address = auth_result[login_type]["address"] if not check_3pid_allowed(self.hs, medium, address): raise SynapseError( 403, "Third party identifiers (email/phone numbers)" + " are not authorized on this server", Codes.THREEPID_DENIED, ) if registered_user_id is not None: logger.info( "Already registered user ID %r for this session", registered_user_id ) # don't re-register the threepids registered = False else: # If we have a password in this request, prefer it. Otherwise, there # might be a password hash from an earlier request. if password: password_hash = await self.auth_handler.hash(password) if not password_hash: raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM) desired_username = params.get("username", None) guest_access_token = params.get("guest_access_token", None) if desired_username is not None: desired_username = desired_username.lower() threepid = None if auth_result: threepid = auth_result.get(LoginType.EMAIL_IDENTITY) # Also check that we're not trying to register a 3pid that's already # been registered. # # This has probably happened in /register/email/requestToken as well, # but if a user hits this endpoint twice then clicks on each link from # the two activation emails, they would register the same 3pid twice. for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]: if login_type in auth_result: medium = auth_result[login_type]["medium"] address = auth_result[login_type]["address"] # For emails, canonicalise the address. # We store all email addresses canonicalised in the DB. # (See on_POST in EmailThreepidRequestTokenRestServlet # in synapse/rest/client/v2_alpha/account.py) if medium == "email": try: address = canonicalise_email(address) except ValueError as e: raise SynapseError(400, str(e)) existing_user_id = await self.store.get_user_id_by_threepid( medium, address ) if existing_user_id is not None: raise SynapseError( 400, "%s is already in use" % medium, Codes.THREEPID_IN_USE, ) entries = await self.store.get_user_agents_ips_to_ui_auth_session(session_id) registered_user_id = await self.registration_handler.register_user( localpart=desired_username, password_hash=password_hash, guest_access_token=guest_access_token, threepid=threepid, address=client_addr, user_agent_ips=entries, ) # Necessary due to auth checks prior to the threepid being # written to the db if threepid: if is_threepid_reserved( self.hs.config.mau_limits_reserved_threepids, threepid ): await self.store.upsert_monthly_active_user(registered_user_id) # Remember that the user account has been registered (and the user # ID it was registered with, since it might not have been specified). await self.auth_handler.set_session_data( session_id, "registered_user_id", registered_user_id ) registered = True return_dict = await self._create_registration_details(registered_user_id, params) if registered: await self.registration_handler.post_registration_actions( user_id=registered_user_id, auth_result=auth_result, access_token=return_dict.get("access_token"), ) return 200, return_dict
https://github.com/matrix-org/synapse/issues/2832
янв 27 20:08:26 tad python[11795]: 2018-01-27 20:08:26,131 - synapse.http.server - 145 - ERROR - POST-2331- Failed handle request synapse.http.server._async_render on <synapse.rest.ClientRestResource object at 0x7f9e3d39a550>: <SynapseRequest at 0x7f9e3419b368 method=POST uri=/_matrix/client/r0/register?user_id=%40telegram_94477076%3Ahomeserver.ru&amp;access_token=<redacted> clientproto=HTTP/1.0 site=8008>: Traceback (most recent call last): янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 588, in _runCallbacks янв 27 20:08:26 tad python[11795]: current.result = callback(current.result, *args, **kw) янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 1184, in gotResult янв 27 20:08:26 tad python[11795]: _inlineCallbacks(r, g, deferred) янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 1126, in _inlineCallbacks янв 27 20:08:26 tad python[11795]: result = result.throwExceptionIntoGenerator(g) янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/twisted/python/failure.py", line 389, in throwExceptionIntoGenerator янв 27 20:08:26 tad python[11795]: return g.throw(self.type, self.value, self.tb) янв 27 20:08:26 tad python[11795]: --- <exception caught here> --- янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/synapse/http/server.py", line 117, in wrapped_request_handler янв 27 20:08:26 tad python[11795]: yield request_handler(self, request, request_metrics) янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 1126, in _inlineCallbacks янв 27 20:08:26 tad python[11795]: result = result.throwExceptionIntoGenerator(g) янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/twisted/python/failure.py", line 389, in throwExceptionIntoGenerator янв 27 20:08:26 tad python[11795]: return g.throw(self.type, self.value, self.tb) янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/synapse/http/server.py", line 263, in _async_render янв 27 20:08:26 tad python[11795]: callback_return = yield callback(request, **kwargs) янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 1128, in _inlineCallbacks янв 27 20:08:26 tad python[11795]: result = g.send(result) янв 27 20:08:26 tad python[11795]: File "/usr/lib/python2.7/dist-packages/synapse/rest/client/v2_alpha/register.py", line 240, in on_POST янв 27 20:08:26 tad python[11795]: defer.returnValue((200, result)) # we throw for non 200 responses янв 27 20:08:26 tad python[11795]: exceptions.UnboundLocalError: local variable 'result' referenced before assignment
exceptions.UnboundLocalError
def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): """ Set up Python stdlib logging. """ if log_config is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) logger = logging.getLogger("") logger.setLevel(logging.INFO) logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) formatter = logging.Formatter(log_format) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) else: logging.config.dictConfig(log_config) # We add a log record factory that runs all messages through the # LoggingContextFilter so that we get the context *at the time we log* # rather than when we write to a handler. This can be done in config using # filter options, but care must when using e.g. MemoryHandler to buffer # writes. log_filter = LoggingContextFilter(request="") old_factory = logging.getLogRecordFactory() def factory(*args, **kwargs): record = old_factory(*args, **kwargs) log_filter.filter(record) return record logging.setLogRecordFactory(factory) # Route Twisted's native logging through to the standard library logging # system. observer = STDLibLogObserver() threadlocal = threading.local() def _log(event): if "log_text" in event: if event["log_text"].startswith("DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return # this is a workaround to make sure we don't get stack overflows when the # logging system raises an error which is written to stderr which is redirected # to the logging system, etc. if getattr(threadlocal, "active", False): # write the text of the event, if any, to the *real* stderr (which may # be redirected to /dev/null, but there's not much we can do) try: event_text = eventAsText(event) print("logging during logging: %s" % event_text, file=sys.__stderr__) except Exception: # gah. pass return try: threadlocal.active = True return observer(event) finally: threadlocal.active = False logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs") return observer
def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): """ Set up Python stdlib logging. """ if log_config is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) logger = logging.getLogger("") logger.setLevel(logging.INFO) logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) formatter = logging.Formatter(log_format) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) else: logging.config.dictConfig(log_config) # We add a log record factory that runs all messages through the # LoggingContextFilter so that we get the context *at the time we log* # rather than when we write to a handler. This can be done in config using # filter options, but care must when using e.g. MemoryHandler to buffer # writes. log_filter = LoggingContextFilter(request="") old_factory = logging.getLogRecordFactory() def factory(*args, **kwargs): record = old_factory(*args, **kwargs) log_filter.filter(record) return record logging.setLogRecordFactory(factory) # Route Twisted's native logging through to the standard library logging # system. observer = STDLibLogObserver() def _log(event): if "log_text" in event: if event["log_text"].startswith("DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event) logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs") return observer
https://github.com/matrix-org/synapse/issues/4240
2018-10-25 15:58:33,973 - twisted - 243 - ERROR - POST-299240- Traceback (most recent call last): 2018-10-25 15:58:33,973 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/handlers.py", line 76, in emit 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- if self.shouldRollover(record): 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/handlers.py", line 156, in shouldRollover 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- msg = "%s\n" % self.format(record) 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 734, in format 2018-10-25 15:58:33,975 - twisted - 243 - ERROR - POST-299240- return fmt.format(record) 2018-10-25 15:58:33,975 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 476, in format 2018-10-25 15:58:33,975 - twisted - 243 - ERROR - POST-299240- raise e 2018-10-25 15:58:33,977 - twisted - 243 - ERROR - POST-299240- UnicodeDecodeError: 'ascii' codec can't decode byte 0xd0 in position 46: ordinal not in range(128) 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- Logged from file _base.py, line 254 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- Traceback (most recent call last): 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 861, in emit 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- msg = self.format(record) 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 734, in format 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- return fmt.format(record) 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 476, in format 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- raise e 2018-10-25 15:58:33,980 - twisted - 243 - ERROR - POST-299240- UnicodeDecodeError: 'ascii' codec can't decode byte 0xd0 in position 46: ordinal not in range(128) 2018-10-25 15:58:33,980 - twisted - 243 - ERROR - POST-299240- Logged from file _base.py, line 254 (etc)
UnicodeDecodeError
def _log(event): if "log_text" in event: if event["log_text"].startswith("DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return # this is a workaround to make sure we don't get stack overflows when the # logging system raises an error which is written to stderr which is redirected # to the logging system, etc. if getattr(threadlocal, "active", False): # write the text of the event, if any, to the *real* stderr (which may # be redirected to /dev/null, but there's not much we can do) try: event_text = eventAsText(event) print("logging during logging: %s" % event_text, file=sys.__stderr__) except Exception: # gah. pass return try: threadlocal.active = True return observer(event) finally: threadlocal.active = False
def _log(event): if "log_text" in event: if event["log_text"].startswith("DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event)
https://github.com/matrix-org/synapse/issues/4240
2018-10-25 15:58:33,973 - twisted - 243 - ERROR - POST-299240- Traceback (most recent call last): 2018-10-25 15:58:33,973 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/handlers.py", line 76, in emit 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- if self.shouldRollover(record): 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/handlers.py", line 156, in shouldRollover 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- msg = "%s\n" % self.format(record) 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 734, in format 2018-10-25 15:58:33,975 - twisted - 243 - ERROR - POST-299240- return fmt.format(record) 2018-10-25 15:58:33,975 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 476, in format 2018-10-25 15:58:33,975 - twisted - 243 - ERROR - POST-299240- raise e 2018-10-25 15:58:33,977 - twisted - 243 - ERROR - POST-299240- UnicodeDecodeError: 'ascii' codec can't decode byte 0xd0 in position 46: ordinal not in range(128) 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- Logged from file _base.py, line 254 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- Traceback (most recent call last): 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 861, in emit 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- msg = self.format(record) 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 734, in format 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- return fmt.format(record) 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 476, in format 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- raise e 2018-10-25 15:58:33,980 - twisted - 243 - ERROR - POST-299240- UnicodeDecodeError: 'ascii' codec can't decode byte 0xd0 in position 46: ordinal not in range(128) 2018-10-25 15:58:33,980 - twisted - 243 - ERROR - POST-299240- Logged from file _base.py, line 254 (etc)
UnicodeDecodeError
def _purge_history_txn(self, txn, room_id, token_str, delete_local_events): token = RoomStreamToken.parse(token_str) # Tables that should be pruned: # event_auth # event_backward_extremities # event_edges # event_forward_extremities # event_json # event_push_actions # event_reference_hashes # event_relations # event_search # event_to_state_groups # events # rejections # room_depth # state_groups # state_groups_state # we will build a temporary table listing the events so that we don't # have to keep shovelling the list back and forth across the # connection. Annoyingly the python sqlite driver commits the # transaction on CREATE, so let's do this first. # # furthermore, we might already have the table from a previous (failed) # purge attempt, so let's drop the table first. txn.execute("DROP TABLE IF EXISTS events_to_purge") txn.execute( "CREATE TEMPORARY TABLE events_to_purge (" " event_id TEXT NOT NULL," " should_delete BOOLEAN NOT NULL" ")" ) # First ensure that we're not about to delete all the forward extremeties txn.execute( "SELECT e.event_id, e.depth FROM events as e " "INNER JOIN event_forward_extremities as f " "ON e.event_id = f.event_id " "AND e.room_id = f.room_id " "WHERE f.room_id = ?", (room_id,), ) rows = txn.fetchall() max_depth = max(row[1] for row in rows) if max_depth < token.topological: # We need to ensure we don't delete all the events from the database # otherwise we wouldn't be able to send any events (due to not # having any backwards extremeties) raise SynapseError( 400, "topological_ordering is greater than forward extremeties" ) logger.info("[purge] looking for events to delete") should_delete_expr = "state_key IS NULL" should_delete_params = () # type: Tuple[Any, ...] if not delete_local_events: should_delete_expr += " AND event_id NOT LIKE ?" # We include the parameter twice since we use the expression twice should_delete_params += ("%:" + self.hs.hostname, "%:" + self.hs.hostname) should_delete_params += (room_id, token.topological) # Note that we insert events that are outliers and aren't going to be # deleted, as nothing will happen to them. txn.execute( "INSERT INTO events_to_purge" " SELECT event_id, %s" " FROM events AS e LEFT JOIN state_events USING (event_id)" " WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?" % (should_delete_expr, should_delete_expr), should_delete_params, ) # We create the indices *after* insertion as that's a lot faster. # create an index on should_delete because later we'll be looking for # the should_delete / shouldn't_delete subsets txn.execute( "CREATE INDEX events_to_purge_should_delete ON events_to_purge(should_delete)" ) # We do joins against events_to_purge for e.g. calculating state # groups to purge, etc., so lets make an index. txn.execute("CREATE INDEX events_to_purge_id ON events_to_purge(event_id)") txn.execute("SELECT event_id, should_delete FROM events_to_purge") event_rows = txn.fetchall() logger.info( "[purge] found %i events before cutoff, of which %i can be deleted", len(event_rows), sum(1 for e in event_rows if e[1]), ) logger.info("[purge] Finding new backward extremities") # We calculate the new entries for the backward extremeties by finding # events to be purged that are pointed to by events we're not going to # purge. txn.execute( "SELECT DISTINCT e.event_id FROM events_to_purge AS e" " INNER JOIN event_edges AS ed ON e.event_id = ed.prev_event_id" " LEFT JOIN events_to_purge AS ep2 ON ed.event_id = ep2.event_id" " WHERE ep2.event_id IS NULL" ) new_backwards_extrems = txn.fetchall() logger.info("[purge] replacing backward extremities: %r", new_backwards_extrems) txn.execute("DELETE FROM event_backward_extremities WHERE room_id = ?", (room_id,)) # Update backward extremeties txn.executemany( "INSERT INTO event_backward_extremities (room_id, event_id) VALUES (?, ?)", [(room_id, event_id) for (event_id,) in new_backwards_extrems], ) logger.info("[purge] finding state groups referenced by deleted events") # Get all state groups that are referenced by events that are to be # deleted. txn.execute( """ SELECT DISTINCT state_group FROM events_to_purge INNER JOIN event_to_state_groups USING (event_id) """ ) referenced_state_groups = {sg for (sg,) in txn} logger.info( "[purge] found %i referenced state groups", len(referenced_state_groups) ) logger.info("[purge] removing events from event_to_state_groups") txn.execute( "DELETE FROM event_to_state_groups " "WHERE event_id IN (SELECT event_id from events_to_purge)" ) for event_id, _ in event_rows: txn.call_after(self._get_state_group_for_event.invalidate, (event_id,)) # Delete all remote non-state events for table in ( "events", "event_json", "event_auth", "event_edges", "event_forward_extremities", "event_reference_hashes", "event_relations", "event_search", "rejections", ): logger.info("[purge] removing events from %s", table) txn.execute( "DELETE FROM %s WHERE event_id IN (" " SELECT event_id FROM events_to_purge WHERE should_delete" ")" % (table,) ) # event_push_actions lacks an index on event_id, and has one on # (room_id, event_id) instead. for table in ("event_push_actions",): logger.info("[purge] removing events from %s", table) txn.execute( "DELETE FROM %s WHERE room_id = ? AND event_id IN (" " SELECT event_id FROM events_to_purge WHERE should_delete" ")" % (table,), (room_id,), ) # Mark all state and own events as outliers logger.info("[purge] marking remaining events as outliers") txn.execute( "UPDATE events SET outlier = ?" " WHERE event_id IN (" " SELECT event_id FROM events_to_purge " " WHERE NOT should_delete" ")", (True,), ) # synapse tries to take out an exclusive lock on room_depth whenever it # persists events (because upsert), and once we run this update, we # will block that for the rest of our transaction. # # So, let's stick it at the end so that we don't block event # persistence. # # We do this by calculating the minimum depth of the backwards # extremities. However, the events in event_backward_extremities # are ones we don't have yet so we need to look at the events that # point to it via event_edges table. txn.execute( """ SELECT COALESCE(MIN(depth), 0) FROM event_backward_extremities AS eb INNER JOIN event_edges AS eg ON eg.prev_event_id = eb.event_id INNER JOIN events AS e ON e.event_id = eg.event_id WHERE eb.room_id = ? """, (room_id,), ) (min_depth,) = txn.fetchone() logger.info("[purge] updating room_depth to %d", min_depth) txn.execute( "UPDATE room_depth SET min_depth = ? WHERE room_id = ?", (min_depth, room_id), ) # finally, drop the temp table. this will commit the txn in sqlite, # so make sure to keep this actually last. txn.execute("DROP TABLE events_to_purge") logger.info("[purge] done") return referenced_state_groups
def _purge_history_txn(self, txn, room_id, token_str, delete_local_events): token = RoomStreamToken.parse(token_str) # Tables that should be pruned: # event_auth # event_backward_extremities # event_edges # event_forward_extremities # event_json # event_push_actions # event_reference_hashes # event_search # event_to_state_groups # events # rejections # room_depth # state_groups # state_groups_state # we will build a temporary table listing the events so that we don't # have to keep shovelling the list back and forth across the # connection. Annoyingly the python sqlite driver commits the # transaction on CREATE, so let's do this first. # # furthermore, we might already have the table from a previous (failed) # purge attempt, so let's drop the table first. txn.execute("DROP TABLE IF EXISTS events_to_purge") txn.execute( "CREATE TEMPORARY TABLE events_to_purge (" " event_id TEXT NOT NULL," " should_delete BOOLEAN NOT NULL" ")" ) # First ensure that we're not about to delete all the forward extremeties txn.execute( "SELECT e.event_id, e.depth FROM events as e " "INNER JOIN event_forward_extremities as f " "ON e.event_id = f.event_id " "AND e.room_id = f.room_id " "WHERE f.room_id = ?", (room_id,), ) rows = txn.fetchall() max_depth = max(row[1] for row in rows) if max_depth < token.topological: # We need to ensure we don't delete all the events from the database # otherwise we wouldn't be able to send any events (due to not # having any backwards extremeties) raise SynapseError( 400, "topological_ordering is greater than forward extremeties" ) logger.info("[purge] looking for events to delete") should_delete_expr = "state_key IS NULL" should_delete_params = () # type: Tuple[Any, ...] if not delete_local_events: should_delete_expr += " AND event_id NOT LIKE ?" # We include the parameter twice since we use the expression twice should_delete_params += ("%:" + self.hs.hostname, "%:" + self.hs.hostname) should_delete_params += (room_id, token.topological) # Note that we insert events that are outliers and aren't going to be # deleted, as nothing will happen to them. txn.execute( "INSERT INTO events_to_purge" " SELECT event_id, %s" " FROM events AS e LEFT JOIN state_events USING (event_id)" " WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?" % (should_delete_expr, should_delete_expr), should_delete_params, ) # We create the indices *after* insertion as that's a lot faster. # create an index on should_delete because later we'll be looking for # the should_delete / shouldn't_delete subsets txn.execute( "CREATE INDEX events_to_purge_should_delete ON events_to_purge(should_delete)" ) # We do joins against events_to_purge for e.g. calculating state # groups to purge, etc., so lets make an index. txn.execute("CREATE INDEX events_to_purge_id ON events_to_purge(event_id)") txn.execute("SELECT event_id, should_delete FROM events_to_purge") event_rows = txn.fetchall() logger.info( "[purge] found %i events before cutoff, of which %i can be deleted", len(event_rows), sum(1 for e in event_rows if e[1]), ) logger.info("[purge] Finding new backward extremities") # We calculate the new entries for the backward extremeties by finding # events to be purged that are pointed to by events we're not going to # purge. txn.execute( "SELECT DISTINCT e.event_id FROM events_to_purge AS e" " INNER JOIN event_edges AS ed ON e.event_id = ed.prev_event_id" " LEFT JOIN events_to_purge AS ep2 ON ed.event_id = ep2.event_id" " WHERE ep2.event_id IS NULL" ) new_backwards_extrems = txn.fetchall() logger.info("[purge] replacing backward extremities: %r", new_backwards_extrems) txn.execute("DELETE FROM event_backward_extremities WHERE room_id = ?", (room_id,)) # Update backward extremeties txn.executemany( "INSERT INTO event_backward_extremities (room_id, event_id) VALUES (?, ?)", [(room_id, event_id) for (event_id,) in new_backwards_extrems], ) logger.info("[purge] finding state groups referenced by deleted events") # Get all state groups that are referenced by events that are to be # deleted. txn.execute( """ SELECT DISTINCT state_group FROM events_to_purge INNER JOIN event_to_state_groups USING (event_id) """ ) referenced_state_groups = {sg for (sg,) in txn} logger.info( "[purge] found %i referenced state groups", len(referenced_state_groups) ) logger.info("[purge] removing events from event_to_state_groups") txn.execute( "DELETE FROM event_to_state_groups " "WHERE event_id IN (SELECT event_id from events_to_purge)" ) for event_id, _ in event_rows: txn.call_after(self._get_state_group_for_event.invalidate, (event_id,)) # Delete all remote non-state events for table in ( "events", "event_json", "event_auth", "event_edges", "event_forward_extremities", "event_reference_hashes", "event_search", "rejections", ): logger.info("[purge] removing events from %s", table) txn.execute( "DELETE FROM %s WHERE event_id IN (" " SELECT event_id FROM events_to_purge WHERE should_delete" ")" % (table,) ) # event_push_actions lacks an index on event_id, and has one on # (room_id, event_id) instead. for table in ("event_push_actions",): logger.info("[purge] removing events from %s", table) txn.execute( "DELETE FROM %s WHERE room_id = ? AND event_id IN (" " SELECT event_id FROM events_to_purge WHERE should_delete" ")" % (table,), (room_id,), ) # Mark all state and own events as outliers logger.info("[purge] marking remaining events as outliers") txn.execute( "UPDATE events SET outlier = ?" " WHERE event_id IN (" " SELECT event_id FROM events_to_purge " " WHERE NOT should_delete" ")", (True,), ) # synapse tries to take out an exclusive lock on room_depth whenever it # persists events (because upsert), and once we run this update, we # will block that for the rest of our transaction. # # So, let's stick it at the end so that we don't block event # persistence. # # We do this by calculating the minimum depth of the backwards # extremities. However, the events in event_backward_extremities # are ones we don't have yet so we need to look at the events that # point to it via event_edges table. txn.execute( """ SELECT COALESCE(MIN(depth), 0) FROM event_backward_extremities AS eb INNER JOIN event_edges AS eg ON eg.prev_event_id = eb.event_id INNER JOIN events AS e ON e.event_id = eg.event_id WHERE eb.room_id = ? """, (room_id,), ) (min_depth,) = txn.fetchone() logger.info("[purge] updating room_depth to %d", min_depth) txn.execute( "UPDATE room_depth SET min_depth = ? WHERE room_id = ?", (min_depth, room_id), ) # finally, drop the temp table. this will commit the txn in sqlite, # so make sure to keep this actually last. txn.execute("DROP TABLE events_to_purge") logger.info("[purge] done") return referenced_state_groups
https://github.com/matrix-org/synapse/issues/7693
2020-06-13 15:25:39,124 - synapse.storage.data_stores.main.events - 95 - ERROR - persist_events-9 - IntegrityError, retrying. Traceback (most recent call last): File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py", line 93, in f res = yield func(self, *args, delete_existing=False, **kwargs) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py", line 211, in _persist_events_and_state_updates new_forward_extremeties=new_forward_extremeties, File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py", line 527, in runInteraction **kwargs File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py", line 575, in runWithConnection self._db_pool.runWithConnection(inner_func, *args, **kwargs) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/threadpool.py", line 250, in inContext result = inContext.theWork() File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/threadpool.py", line 266, in <lambda> inContext.theWork = lambda: context.call(ctx, func, *args, **kw) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/enterprise/adbapi.py", line 306, in _runWithConnection compat.reraise(excValue, excTraceback) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/compat.py", line 464, in reraise raise exception.with_traceback(traceback) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/enterprise/adbapi.py", line 297, in _runWithConnection result = func(conn, *args, **kw) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py", line 572, in inner_func return func(conn, *args, **kwargs) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py", line 418, in new_transaction r = func(cursor, *args, **kwargs) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/logging/utils.py", line 73, in wrapped return f(*args, **kwargs) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py", line 451, in _persist_events_txn backfilled=backfilled, File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py", line 1001, in _update_metadata_tables_txn self._handle_event_relations(txn, event) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py", line 1299, in _handle_event_relations "aggregation_key": aggregation_key, File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py", line 653, in simple_insert_txn txn.execute(sql, vals) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py", line 210, in execute self._do_execute(self.txn.execute, sql, *args) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py", line 236, in _do_execute return func(sql, *args) psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "event_relations_id" DETAIL: Key (event_id)=($hnVIuNuExbqJ-9iXkDpaRAGdKdeDue4YI8w9G1qOSHc) already exists. 2020-06-13 15:25:39,130 - synapse.storage.data_stores.main.events - 815 - INFO - persist_events-9 - Deleting existing 2020-06-13 15:25:39,147 - synapse.handlers.federation - 1064 - ERROR - GET-1637 - Failed to backfill from matrix.org because FirstError[#0, [Failure instance: Traceback: <class 'psycopg2.errors.UniqueViolation'>: duplicate key value violates unique constraint "event_relations_id" DETAIL: Key (event_id)=($hnVIuNuExbqJ-9iXkDpaRAGdKdeDue4YI8w9G1qOSHc) already exists. /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:654:_runCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1475:gotResult /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator --- <exception caught here> --- /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/persist_events.py:152:handle_queue_loop /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/persist_events.py:262:persisting_queue /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/persist_events.py:438:_persist_events /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:96:f /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:211:_persist_events_and_state_updates /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:527:runInteraction /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:575:runWithConnection /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/threadpool.py:250:inContext /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/threadpool.py:266:<lambda> /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/context.py:122:callWithContext /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/context.py:85:callWithContext /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/enterprise/adbapi.py:306:_runWithConnection /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/compat.py:464:reraise /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/enterprise/adbapi.py:297:_runWithConnection /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:572:inner_func /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:418:new_transaction /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/logging/utils.py:73:wrapped /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:451:_persist_events_txn /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:1001:_update_metadata_tables_txn /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:1299:_handle_event_relations /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:653:simple_insert_txn /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:210:execute /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:236:_do_execute ]] Traceback (most recent call last): File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/handlers/federation.py", line 1033, in try_backfill dom, room_id, limit=100, extremities=extremities File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/handlers/federation.py", line 909, in backfill await self._handle_new_event(dest, event, backfilled=True) File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/handlers/federation.py", line 1845, in _handle_new_event [(event, context)], backfilled=backfilled File "/opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/handlers/federation.py", line 2870, in persist_events_and_notify event_and_contexts, backfilled=backfilled twisted.internet.defer.FirstError: FirstError[#0, [Failure instance: Traceback: <class 'psycopg2.errors.UniqueViolation'>: duplicate key value violates unique constraint "event_relations_id" DETAIL: Key (event_id)=($hnVIuNuExbqJ-9iXkDpaRAGdKdeDue4YI8w9G1qOSHc) already exists. /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:654:_runCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1475:gotResult /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator --- <exception caught here> --- /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/persist_events.py:152:handle_queue_loop /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/persist_events.py:262:persisting_queue /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/persist_events.py:438:_persist_events /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:96:f /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:211:_persist_events_and_state_updates /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:527:runInteraction /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:575:runWithConnection /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/threadpool.py:250:inContext /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/threadpool.py:266:<lambda> /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/context.py:122:callWithContext /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/context.py:85:callWithContext /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/enterprise/adbapi.py:306:_runWithConnection /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/python/compat.py:464:reraise /opt/venvs/matrix-synapse/lib/python3.7/site-packages/twisted/enterprise/adbapi.py:297:_runWithConnection /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:572:inner_func /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:418:new_transaction /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/logging/utils.py:73:wrapped /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:451:_persist_events_txn /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:1001:_update_metadata_tables_txn /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/data_stores/main/events.py:1299:_handle_event_relations /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:653:simple_insert_txn /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:210:execute /opt/venvs/matrix-synapse/lib/python3.7/site-packages/synapse/storage/database.py:236:_do_execute ]]
twisted.internet.defer.FirstError
async def notify_device_update(self, user_id, device_ids): """Notify that a user's device(s) has changed. Pokes the notifier, and remote servers if the user is local. """ if not device_ids: # No changes to notify about, so this is a no-op. return users_who_share_room = await self.store.get_users_who_share_room_with_user(user_id) hosts = set() if self.hs.is_mine_id(user_id): hosts.update(get_domain_from_id(u) for u in users_who_share_room) hosts.discard(self.server_name) set_tag("target_hosts", hosts) position = await self.store.add_device_change_to_streams( user_id, device_ids, list(hosts) ) if not position: # This should only happen if there are no updates, so we bail. return for device_id in device_ids: logger.debug( "Notifying about update %r/%r, ID: %r", user_id, device_id, position ) room_ids = await self.store.get_rooms_for_user(user_id) # specify the user ID too since the user should always get their own device list # updates, even if they aren't in any rooms. self.notifier.on_new_event( "device_list_key", position, users=[user_id], rooms=room_ids ) if hosts: logger.info("Sending device list update notif for %r to: %r", user_id, hosts) for host in hosts: self.federation_sender.send_device_messages(host) log_kv({"message": "sent device update to host", "host": host})
async def notify_device_update(self, user_id, device_ids): """Notify that a user's device(s) has changed. Pokes the notifier, and remote servers if the user is local. """ users_who_share_room = await self.store.get_users_who_share_room_with_user(user_id) hosts = set() if self.hs.is_mine_id(user_id): hosts.update(get_domain_from_id(u) for u in users_who_share_room) hosts.discard(self.server_name) set_tag("target_hosts", hosts) position = await self.store.add_device_change_to_streams( user_id, device_ids, list(hosts) ) for device_id in device_ids: logger.debug( "Notifying about update %r/%r, ID: %r", user_id, device_id, position ) room_ids = await self.store.get_rooms_for_user(user_id) # specify the user ID too since the user should always get their own device list # updates, even if they aren't in any rooms. self.notifier.on_new_event( "device_list_key", position, users=[user_id], rooms=room_ids ) if hosts: logger.info("Sending device list update notif for %r to: %r", user_id, hosts) for host in hosts: self.federation_sender.send_device_messages(host) log_kv({"message": "sent device update to host", "host": host})
https://github.com/matrix-org/synapse/issues/7774
Traceback (most recent call last): File "/usr/lib/python3/dist-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3/dist-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3/dist-packages/synapse/notifier.py", line 304, in on_new_event user_stream.notify(stream_key, new_token, time_now_ms) File "/usr/lib/python3/dist-packages/synapse/notifier.py", line 104, in notify self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) File "/usr/lib/python3/dist-packages/synapse/types.py", line 401, in copy_and_advance new_id = int(getattr(new_token, key)) TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
TypeError
def _background_insert_retention(self, progress, batch_size): """Retrieves a list of all rooms within a range and inserts an entry for each of them into the room_retention table. NULLs the property's columns if missing from the retention event in the room's state (or NULLs all of them if there's no retention event in the room's state), so that we fall back to the server's retention policy. """ last_room = progress.get("room_id", "") def _background_insert_retention_txn(txn): txn.execute( """ SELECT state.room_id, state.event_id, events.json FROM current_state_events as state LEFT JOIN event_json AS events ON (state.event_id = events.event_id) WHERE state.room_id > ? AND state.type = '%s' ORDER BY state.room_id ASC LIMIT ?; """ % EventTypes.Retention, (last_room, batch_size), ) rows = self.db.cursor_to_dict(txn) if not rows: return True for row in rows: if not row["json"]: retention_policy = {} else: ev = json.loads(row["json"]) retention_policy = ev["content"] self.db.simple_insert_txn( txn=txn, table="room_retention", values={ "room_id": row["room_id"], "event_id": row["event_id"], "min_lifetime": retention_policy.get("min_lifetime"), "max_lifetime": retention_policy.get("max_lifetime"), }, ) logger.info("Inserted %d rows into room_retention", len(rows)) self.db.updates._background_update_progress_txn( txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]} ) if batch_size > len(rows): return True else: return False end = yield self.db.runInteraction( "insert_room_retention", _background_insert_retention_txn, ) if end: yield self.db.updates._end_background_update("insert_room_retention") defer.returnValue(batch_size)
def _background_insert_retention(self, progress, batch_size): """Retrieves a list of all rooms within a range and inserts an entry for each of them into the room_retention table. NULLs the property's columns if missing from the retention event in the room's state (or NULLs all of them if there's no retention event in the room's state), so that we fall back to the server's retention policy. """ last_room = progress.get("room_id", "") def _background_insert_retention_txn(txn): txn.execute( """ SELECT state.room_id, state.event_id, events.json FROM current_state_events as state LEFT JOIN event_json AS events ON (state.event_id = events.event_id) WHERE state.room_id > ? AND state.type = '%s' ORDER BY state.room_id ASC LIMIT ?; """ % EventTypes.Retention, (last_room, batch_size), ) rows = self.db.cursor_to_dict(txn) if not rows: return True for row in rows: if not row["json"]: retention_policy = {} else: ev = json.loads(row["json"]) retention_policy = json.dumps(ev["content"]) self.db.simple_insert_txn( txn=txn, table="room_retention", values={ "room_id": row["room_id"], "event_id": row["event_id"], "min_lifetime": retention_policy.get("min_lifetime"), "max_lifetime": retention_policy.get("max_lifetime"), }, ) logger.info("Inserted %d rows into room_retention", len(rows)) self.db.updates._background_update_progress_txn( txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]} ) if batch_size > len(rows): return True else: return False end = yield self.db.runInteraction( "insert_room_retention", _background_insert_retention_txn, ) if end: yield self.db.updates._end_background_update("insert_room_retention") defer.returnValue(batch_size)
https://github.com/matrix-org/synapse/issues/7784
2020-07-05 10:59:39,226 - synapse.storage.background_updates - 227 - INFO - background_updates-0 - Starting update batch on background update 'insert_room_retention' 2020-07-05 10:59:39,232 - synapse.storage.background_updates - 114 - ERROR - background_updates-0 - Error doing update Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/synapse/storage/background_updates.py", line 111, in run_background_updates self.BACKGROUND_UPDATE_DURATION_MS File "/usr/local/lib/python3.7/site-packages/synapse/storage/background_updates.py", line 222, in do_next_background_update await self._do_background_update(desired_duration_ms) File "/usr/local/lib/python3.7/site-packages/synapse/storage/background_updates.py", line 255, in _do_background_update items_updated = await update_handler(progress, batch_size) File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/usr/local/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/usr/local/lib/python3.7/site-packages/synapse/storage/data_stores/main/room.py", line 938, in _background_insert_retention "insert_room_retention", _background_insert_retention_txn, File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/usr/local/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/usr/local/lib/python3.7/site-packages/synapse/storage/database.py", line 527, in runInteraction **kwargs File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/usr/local/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/usr/local/lib/python3.7/site-packages/synapse/storage/database.py", line 575, in runWithConnection self._db_pool.runWithConnection(inner_func, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/twisted/python/threadpool.py", line 250, in inContext result = inContext.theWork() File "/usr/local/lib/python3.7/site-packages/twisted/python/threadpool.py", line 266, in <lambda> inContext.theWork = lambda: context.call(ctx, func, *args, **kw) File "/usr/local/lib/python3.7/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/usr/local/lib/python3.7/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/usr/local/lib/python3.7/site-packages/twisted/enterprise/adbapi.py", line 306, in _runWithConnection compat.reraise(excValue, excTraceback) File "/usr/local/lib/python3.7/site-packages/twisted/python/compat.py", line 464, in reraise raise exception.with_traceback(traceback) File "/usr/local/lib/python3.7/site-packages/twisted/enterprise/adbapi.py", line 297, in _runWithConnection result = func(conn, *args, **kw) File "/usr/local/lib/python3.7/site-packages/synapse/storage/database.py", line 572, in inner_func return func(conn, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/synapse/storage/database.py", line 418, in new_transaction r = func(cursor, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/synapse/storage/data_stores/main/room.py", line 921, in _background_insert_retention_txn "min_lifetime": retention_policy.get("min_lifetime"), AttributeError: 'str' object has no attribute 'get'
AttributeError
def _background_insert_retention_txn(txn): txn.execute( """ SELECT state.room_id, state.event_id, events.json FROM current_state_events as state LEFT JOIN event_json AS events ON (state.event_id = events.event_id) WHERE state.room_id > ? AND state.type = '%s' ORDER BY state.room_id ASC LIMIT ?; """ % EventTypes.Retention, (last_room, batch_size), ) rows = self.db.cursor_to_dict(txn) if not rows: return True for row in rows: if not row["json"]: retention_policy = {} else: ev = json.loads(row["json"]) retention_policy = ev["content"] self.db.simple_insert_txn( txn=txn, table="room_retention", values={ "room_id": row["room_id"], "event_id": row["event_id"], "min_lifetime": retention_policy.get("min_lifetime"), "max_lifetime": retention_policy.get("max_lifetime"), }, ) logger.info("Inserted %d rows into room_retention", len(rows)) self.db.updates._background_update_progress_txn( txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]} ) if batch_size > len(rows): return True else: return False
def _background_insert_retention_txn(txn): txn.execute( """ SELECT state.room_id, state.event_id, events.json FROM current_state_events as state LEFT JOIN event_json AS events ON (state.event_id = events.event_id) WHERE state.room_id > ? AND state.type = '%s' ORDER BY state.room_id ASC LIMIT ?; """ % EventTypes.Retention, (last_room, batch_size), ) rows = self.db.cursor_to_dict(txn) if not rows: return True for row in rows: if not row["json"]: retention_policy = {} else: ev = json.loads(row["json"]) retention_policy = json.dumps(ev["content"]) self.db.simple_insert_txn( txn=txn, table="room_retention", values={ "room_id": row["room_id"], "event_id": row["event_id"], "min_lifetime": retention_policy.get("min_lifetime"), "max_lifetime": retention_policy.get("max_lifetime"), }, ) logger.info("Inserted %d rows into room_retention", len(rows)) self.db.updates._background_update_progress_txn( txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]} ) if batch_size > len(rows): return True else: return False
https://github.com/matrix-org/synapse/issues/7784
2020-07-05 10:59:39,226 - synapse.storage.background_updates - 227 - INFO - background_updates-0 - Starting update batch on background update 'insert_room_retention' 2020-07-05 10:59:39,232 - synapse.storage.background_updates - 114 - ERROR - background_updates-0 - Error doing update Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/synapse/storage/background_updates.py", line 111, in run_background_updates self.BACKGROUND_UPDATE_DURATION_MS File "/usr/local/lib/python3.7/site-packages/synapse/storage/background_updates.py", line 222, in do_next_background_update await self._do_background_update(desired_duration_ms) File "/usr/local/lib/python3.7/site-packages/synapse/storage/background_updates.py", line 255, in _do_background_update items_updated = await update_handler(progress, batch_size) File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/usr/local/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/usr/local/lib/python3.7/site-packages/synapse/storage/data_stores/main/room.py", line 938, in _background_insert_retention "insert_room_retention", _background_insert_retention_txn, File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/usr/local/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/usr/local/lib/python3.7/site-packages/synapse/storage/database.py", line 527, in runInteraction **kwargs File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/usr/local/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/usr/local/lib/python3.7/site-packages/synapse/storage/database.py", line 575, in runWithConnection self._db_pool.runWithConnection(inner_func, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/twisted/python/threadpool.py", line 250, in inContext result = inContext.theWork() File "/usr/local/lib/python3.7/site-packages/twisted/python/threadpool.py", line 266, in <lambda> inContext.theWork = lambda: context.call(ctx, func, *args, **kw) File "/usr/local/lib/python3.7/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/usr/local/lib/python3.7/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/usr/local/lib/python3.7/site-packages/twisted/enterprise/adbapi.py", line 306, in _runWithConnection compat.reraise(excValue, excTraceback) File "/usr/local/lib/python3.7/site-packages/twisted/python/compat.py", line 464, in reraise raise exception.with_traceback(traceback) File "/usr/local/lib/python3.7/site-packages/twisted/enterprise/adbapi.py", line 297, in _runWithConnection result = func(conn, *args, **kw) File "/usr/local/lib/python3.7/site-packages/synapse/storage/database.py", line 572, in inner_func return func(conn, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/synapse/storage/database.py", line 418, in new_transaction r = func(cursor, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/synapse/storage/data_stores/main/room.py", line 921, in _background_insert_retention_txn "min_lifetime": retention_policy.get("min_lifetime"), AttributeError: 'str' object has no attribute 'get'
AttributeError
def _invalidate_get_users_with_receipts_in_room(self, room_id, receipt_type, user_id): if receipt_type != "m.read": return # Returns either an ObservableDeferred or the raw result res = self.get_users_with_read_receipts_in_room.cache.get( room_id, None, update_metrics=False ) # first handle the ObservableDeferred case if isinstance(res, ObservableDeferred): if res.has_called(): res = res.get_result() else: res = None if res and user_id in res: # We'd only be adding to the set, so no point invalidating if the # user is already there return self.get_users_with_read_receipts_in_room.invalidate((room_id,))
def _invalidate_get_users_with_receipts_in_room(self, room_id, receipt_type, user_id): if receipt_type != "m.read": return # Returns either an ObservableDeferred or the raw result res = self.get_users_with_read_receipts_in_room.cache.get( room_id, None, update_metrics=False ) # first handle the Deferred case if isinstance(res, defer.Deferred): if res.called: res = res.result else: res = None if res and user_id in res: # We'd only be adding to the set, so no point invalidating if the # user is already there return self.get_users_with_read_receipts_in_room.invalidate((room_id,))
https://github.com/matrix-org/synapse/issues/3234
homeserver - 2018-05-18 00:40:04,123 - synapse.federation.federation_server - 643 - ERROR - Failed to handle edu 'm.receipt' Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/federation/federation_server.py", line 639, in on_edu yield handler(origin, content) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/python/failure.py", line 422, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/handlers/receipts.py", line 84, in _received_remote_receipt yield self._handle_new_receipts(receipts) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/python/failure.py", line 422, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/handlers/receipts.py", line 101, in _handle_new_receipts room_id, receipt_type, user_id, event_ids, data File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/python/failure.py", line 422, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/storage/receipts.py", line 456, in insert_receipt stream_id=stream_id, File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1386, in _inlineCallbacks result = g.send(result) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/storage/_base.py", line 323, in runInteraction after_callback(*after_args, **after_kwargs) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/storage/receipts.py", line 308, in _invalidate_get_users_with_receipts_in_room if user_id in res: TypeError: argument of type 'ObservableDeferred' is not iterable
TypeError
def observe(self) -> defer.Deferred: """Observe the underlying deferred. This returns a brand new deferred that is resolved when the underlying deferred is resolved. Interacting with the returned deferred does not effect the underlying deferred. """ if not self._result: d = defer.Deferred() def remove(r): self._observers.discard(d) return r d.addBoth(remove) self._observers.add(d) return d else: success, res = self._result return defer.succeed(res) if success else defer.fail(res)
def observe(self) -> defer.Deferred: """Observe the underlying deferred. This returns a brand new deferred that is resolved when the underlying deferred is resolved. Interacting with the returned deferred does not effect the underdlying deferred. """ if not self._result: d = defer.Deferred() def remove(r): self._observers.discard(d) return r d.addBoth(remove) self._observers.add(d) return d else: success, res = self._result return defer.succeed(res) if success else defer.fail(res)
https://github.com/matrix-org/synapse/issues/3234
homeserver - 2018-05-18 00:40:04,123 - synapse.federation.federation_server - 643 - ERROR - Failed to handle edu 'm.receipt' Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/federation/federation_server.py", line 639, in on_edu yield handler(origin, content) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/python/failure.py", line 422, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/handlers/receipts.py", line 84, in _received_remote_receipt yield self._handle_new_receipts(receipts) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/python/failure.py", line 422, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/handlers/receipts.py", line 101, in _handle_new_receipts room_id, receipt_type, user_id, event_ids, data File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/python/failure.py", line 422, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/storage/receipts.py", line 456, in insert_receipt stream_id=stream_id, File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1386, in _inlineCallbacks result = g.send(result) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/storage/_base.py", line 323, in runInteraction after_callback(*after_args, **after_kwargs) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/storage/receipts.py", line 308, in _invalidate_get_users_with_receipts_in_room if user_id in res: TypeError: argument of type 'ObservableDeferred' is not iterable
TypeError
def _invalidate_get_users_with_receipts_in_room(self, room_id, receipt_type, user_id): if receipt_type != "m.read": return # Returns either an ObservableDeferred or the raw result res = self.get_users_with_read_receipts_in_room.cache.get( room_id, None, update_metrics=False, ) # first handle the Deferred case if isinstance(res, defer.Deferred): if res.called: res = res.result else: res = None if res and user_id in res: # We'd only be adding to the set, so no point invalidating if the # user is already there return self.get_users_with_read_receipts_in_room.invalidate((room_id,))
def _invalidate_get_users_with_receipts_in_room(self, room_id, receipt_type, user_id): if receipt_type != "m.read": return # Returns an ObservableDeferred res = self.get_users_with_read_receipts_in_room.cache.get( room_id, None, update_metrics=False, ) if res: if isinstance(res, defer.Deferred) and res.called: res = res.result if user_id in res: # We'd only be adding to the set, so no point invalidating if the # user is already there return self.get_users_with_read_receipts_in_room.invalidate((room_id,))
https://github.com/matrix-org/synapse/issues/3234
homeserver - 2018-05-18 00:40:04,123 - synapse.federation.federation_server - 643 - ERROR - Failed to handle edu 'm.receipt' Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/federation/federation_server.py", line 639, in on_edu yield handler(origin, content) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/python/failure.py", line 422, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/handlers/receipts.py", line 84, in _received_remote_receipt yield self._handle_new_receipts(receipts) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/python/failure.py", line 422, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/handlers/receipts.py", line 101, in _handle_new_receipts room_id, receipt_type, user_id, event_ids, data File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/python/failure.py", line 422, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/storage/receipts.py", line 456, in insert_receipt stream_id=stream_id, File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1386, in _inlineCallbacks result = g.send(result) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/storage/_base.py", line 323, in runInteraction after_callback(*after_args, **after_kwargs) File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/storage/receipts.py", line 308, in _invalidate_get_users_with_receipts_in_room if user_id in res: TypeError: argument of type 'ObservableDeferred' is not iterable
TypeError
def register_user(self, localpart, displayname=None, emails=[]): """Registers a new user with given localpart and optional displayname, emails. Args: localpart (str): The localpart of the new user. displayname (str|None): The displayname of the new user. emails (List[str]): Emails to bind to the new user. Raises: SynapseError if there is an error performing the registration. Check the 'errcode' property for more information on the reason for failure Returns: defer.Deferred[str]: user_id """ return defer.ensureDeferred( self._hs.get_registration_handler().register_user( localpart=localpart, default_display_name=displayname, bind_emails=emails, ) )
def register_user(self, localpart, displayname=None, emails=[]): """Registers a new user with given localpart and optional displayname, emails. Args: localpart (str): The localpart of the new user. displayname (str|None): The displayname of the new user. emails (List[str]): Emails to bind to the new user. Raises: SynapseError if there is an error performing the registration. Check the 'errcode' property for more information on the reason for failure Returns: Deferred[str]: user_id """ return defer.ensureDeferred( self._hs.get_registration_handler().register_user( localpart=localpart, default_display_name=displayname, bind_emails=emails, ) )
https://github.com/matrix-org/synapse/issues/7683
2020-06-11 21:42:52,523 - synapse.http.server - 113 - ERROR - - Failed handle request via 'LoginRestServlet': <XForwardedForRequest at 0x7f89689119e8 method='POST' uri='/_matrix/client/r0/login' clientproto='HTTP/1.1' site=8008> Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: @girish:cloudron.fun During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler await h(self, request) File "/app/code/env/lib/python3.6/site-packages/synapse/http/server.py", line 350, in _async_render callback_return = await callback_return File "/app/code/env/lib/python3.6/site-packages/synapse/rest/client/v1/login.py", line 149, in on_POST result = await self._do_other_login(login_submission) File "/app/code/env/lib/python3.6/site-packages/synapse/rest/client/v1/login.py", line 264, in _do_other_login identifier["user"], login_submission File "/app/code/env/lib/python3.6/site-packages/synapse/handlers/auth.py", line 722, in validate_login is_valid = await provider.check_password(qualified_user_id, password) File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/app/code/env/lib/python3.6/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/app/code/env/lib/python3.6/site-packages/ldap_auth_provider.py", line 180, in check_password user_id = yield self.register_user(localpart, givenName, mail) File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/app/code/env/lib/python3.6/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/app/code/env/lib/python3.6/site-packages/ldap_auth_provider.py", line 291, in register_user localpart=localpart, displayname=name, emails=emails, File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/app/code/env/lib/python3.6/site-packages/synapse/module_api/__init__.py", line 113, in register _, access_token = yield self.register_device(user_id) TypeError: 'coroutine' object is not iterable
TypeError
def register_device(self, user_id, device_id=None, initial_display_name=None): """Register a device for a user and generate an access token. Args: user_id (str): full canonical @user:id device_id (str|None): The device ID to check, or None to generate a new one. initial_display_name (str|None): An optional display name for the device. Returns: defer.Deferred[tuple[str, str]]: Tuple of device ID and access token """ return defer.ensureDeferred( self._hs.get_registration_handler().register_device( user_id=user_id, device_id=device_id, initial_display_name=initial_display_name, ) )
def register_device(self, user_id, device_id=None, initial_display_name=None): """Register a device for a user and generate an access token. Args: user_id (str): full canonical @user:id device_id (str|None): The device ID to check, or None to generate a new one. initial_display_name (str|None): An optional display name for the device. Returns: defer.Deferred[tuple[str, str]]: Tuple of device ID and access token """ return self._hs.get_registration_handler().register_device( user_id=user_id, device_id=device_id, initial_display_name=initial_display_name, )
https://github.com/matrix-org/synapse/issues/7683
2020-06-11 21:42:52,523 - synapse.http.server - 113 - ERROR - - Failed handle request via 'LoginRestServlet': <XForwardedForRequest at 0x7f89689119e8 method='POST' uri='/_matrix/client/r0/login' clientproto='HTTP/1.1' site=8008> Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: @girish:cloudron.fun During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/code/env/lib/python3.6/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler await h(self, request) File "/app/code/env/lib/python3.6/site-packages/synapse/http/server.py", line 350, in _async_render callback_return = await callback_return File "/app/code/env/lib/python3.6/site-packages/synapse/rest/client/v1/login.py", line 149, in on_POST result = await self._do_other_login(login_submission) File "/app/code/env/lib/python3.6/site-packages/synapse/rest/client/v1/login.py", line 264, in _do_other_login identifier["user"], login_submission File "/app/code/env/lib/python3.6/site-packages/synapse/handlers/auth.py", line 722, in validate_login is_valid = await provider.check_password(qualified_user_id, password) File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/app/code/env/lib/python3.6/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/app/code/env/lib/python3.6/site-packages/ldap_auth_provider.py", line 180, in check_password user_id = yield self.register_user(localpart, givenName, mail) File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/app/code/env/lib/python3.6/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/app/code/env/lib/python3.6/site-packages/ldap_auth_provider.py", line 291, in register_user localpart=localpart, displayname=name, emails=emails, File "/app/code/env/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/app/code/env/lib/python3.6/site-packages/synapse/module_api/__init__.py", line 113, in register _, access_token = yield self.register_device(user_id) TypeError: 'coroutine' object is not iterable
TypeError
def _event_match(self, condition: dict, user_id: str) -> bool: pattern = condition.get("pattern", None) if not pattern: pattern_type = condition.get("pattern_type", None) if pattern_type == "user_id": pattern = user_id elif pattern_type == "user_localpart": pattern = UserID.from_string(user_id).localpart if not pattern: logger.warning("event_match condition with no pattern") return False # XXX: optimisation: cache our pattern regexps if condition["key"] == "content.body": body = self._event.content.get("body", None) if not body or not isinstance(body, str): return False return _glob_matches(pattern, body, word_boundary=True) else: haystack = self._get_value(condition["key"]) if haystack is None: return False return _glob_matches(pattern, haystack)
def _event_match(self, condition: dict, user_id: str) -> bool: pattern = condition.get("pattern", None) if not pattern: pattern_type = condition.get("pattern_type", None) if pattern_type == "user_id": pattern = user_id elif pattern_type == "user_localpart": pattern = UserID.from_string(user_id).localpart if not pattern: logger.warning("event_match condition with no pattern") return False # XXX: optimisation: cache our pattern regexps if condition["key"] == "content.body": body = self._event.content.get("body", None) if not body: return False return _glob_matches(pattern, body, word_boundary=True) else: haystack = self._get_value(condition["key"]) if haystack is None: return False return _glob_matches(pattern, haystack)
https://github.com/matrix-org/synapse/issues/7700
2020-06-15 19:00:06,270 - synapse.federation.federation_server - 290 - ERROR - PUT-1359406-$1592244001432863GBGGz:matrix.org- Failed to handle PDU $1592244001432863GBGGz:matrix.org ... Traceback (most recent call last): File "/opt/synapse/synapse/synapse/federation/federation_server.py", line 279, in process_pdus_for_room await self._handle_received_pdu(origin, pdu) File "/opt/synapse/synapse/synapse/federation/federation_server.py", line 658, in _handle_received_pdu await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True) File "/opt/synapse/synapse/synapse/handlers/federation.py", line 412, in on_receive_pdu await self._process_received_pdu(origin, pdu, state=state) File "/opt/synapse/synapse/synapse/handlers/federation.py", line 690, in _process_received_pdu context = await self._handle_new_event(origin, event, state=state) File "/opt/synapse/synapse/synapse/handlers/federation.py", line 1841, in _handle_new_event event, context File "/opt/synapse/synapse/synapse/push/action_generator.py", line 43, in handle_push_actions_for_event yield self.bulk_evaluator.action_for_event_by_user(event, context) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/opt/synapse/synapse/synapse/push/bulk_push_rule_evaluator.py", line 190, in action_for_event_by_user evaluator, rule["conditions"], uid, display_name, condition_cache File "/opt/synapse/synapse/synapse/push/bulk_push_rule_evaluator.py", line 215, in _condition_checker res = evaluator.matches(cond, uid, display_name) File "/opt/synapse/synapse/synapse/push/push_rule_evaluator.py", line 107, in matches return self._contains_display_name(display_name) File "/opt/synapse/synapse/synapse/push/push_rule_evaluator.py", line 161, in _contains_display_name return r.search(body) TypeError: expected string or bytes-like object 2020-06-15 19:00:06,285 - synapse.logging.context - 396 - WARNING - PUT-1359406-$1592244001432863GBGGz:matrix.org- Re-starting finished log context PUT-1359406-$1592244001432863GBGGz:matrix.org
TypeError
def _contains_display_name(self, display_name: str) -> bool: if not display_name: return False body = self._event.content.get("body", None) if not body or not isinstance(body, str): return False # Similar to _glob_matches, but do not treat display_name as a glob. r = regex_cache.get((display_name, False, True), None) if not r: r = re.escape(display_name) r = _re_word_boundary(r) r = re.compile(r, flags=re.IGNORECASE) regex_cache[(display_name, False, True)] = r return r.search(body)
def _contains_display_name(self, display_name: str) -> bool: if not display_name: return False body = self._event.content.get("body", None) if not body: return False # Similar to _glob_matches, but do not treat display_name as a glob. r = regex_cache.get((display_name, False, True), None) if not r: r = re.escape(display_name) r = _re_word_boundary(r) r = re.compile(r, flags=re.IGNORECASE) regex_cache[(display_name, False, True)] = r return r.search(body)
https://github.com/matrix-org/synapse/issues/7700
2020-06-15 19:00:06,270 - synapse.federation.federation_server - 290 - ERROR - PUT-1359406-$1592244001432863GBGGz:matrix.org- Failed to handle PDU $1592244001432863GBGGz:matrix.org ... Traceback (most recent call last): File "/opt/synapse/synapse/synapse/federation/federation_server.py", line 279, in process_pdus_for_room await self._handle_received_pdu(origin, pdu) File "/opt/synapse/synapse/synapse/federation/federation_server.py", line 658, in _handle_received_pdu await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True) File "/opt/synapse/synapse/synapse/handlers/federation.py", line 412, in on_receive_pdu await self._process_received_pdu(origin, pdu, state=state) File "/opt/synapse/synapse/synapse/handlers/federation.py", line 690, in _process_received_pdu context = await self._handle_new_event(origin, event, state=state) File "/opt/synapse/synapse/synapse/handlers/federation.py", line 1841, in _handle_new_event event, context File "/opt/synapse/synapse/synapse/push/action_generator.py", line 43, in handle_push_actions_for_event yield self.bulk_evaluator.action_for_event_by_user(event, context) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/opt/synapse/synapse/synapse/push/bulk_push_rule_evaluator.py", line 190, in action_for_event_by_user evaluator, rule["conditions"], uid, display_name, condition_cache File "/opt/synapse/synapse/synapse/push/bulk_push_rule_evaluator.py", line 215, in _condition_checker res = evaluator.matches(cond, uid, display_name) File "/opt/synapse/synapse/synapse/push/push_rule_evaluator.py", line 107, in matches return self._contains_display_name(display_name) File "/opt/synapse/synapse/synapse/push/push_rule_evaluator.py", line 161, in _contains_display_name return r.search(body) TypeError: expected string or bytes-like object 2020-06-15 19:00:06,285 - synapse.logging.context - 396 - WARNING - PUT-1359406-$1592244001432863GBGGz:matrix.org- Re-starting finished log context PUT-1359406-$1592244001432863GBGGz:matrix.org
TypeError
def add_resizable_cache(cache_name: str, cache_resize_callback: Callable): """Register a cache that's size can dynamically change Args: cache_name: A reference to the cache cache_resize_callback: A callback function that will be ran whenever the cache needs to be resized """ # Some caches have '*' in them which we strip out. cache_name = _canonicalise_cache_name(cache_name) # sometimes caches are initialised from background threads, so we need to make # sure we don't conflict with another thread running a resize operation with _CACHES_LOCK: _CACHES[cache_name] = cache_resize_callback # Ensure all loaded caches are sized appropriately # # This method should only run once the config has been read, # as it uses values read from it if properties.resize_all_caches_func: properties.resize_all_caches_func()
def add_resizable_cache(cache_name: str, cache_resize_callback: Callable): """Register a cache that's size can dynamically change Args: cache_name: A reference to the cache cache_resize_callback: A callback function that will be ran whenever the cache needs to be resized """ # Some caches have '*' in them which we strip out. cache_name = _canonicalise_cache_name(cache_name) _CACHES[cache_name] = cache_resize_callback # Ensure all loaded caches are sized appropriately # # This method should only run once the config has been read, # as it uses values read from it if properties.resize_all_caches_func: properties.resize_all_caches_func()
https://github.com/matrix-org/synapse/issues/7610
2020-06-01 05:34:30,471 - synapse.storage.data_stores.main.event_push_actions - 503 - INFO - None - Found stream ordering 1 month ago: it's 447264 2020-06-01 05:34:30,472 - synapse.storage.data_stores.main.event_push_actions - 506 - INFO - None - Searching for stream ordering 1 day ago 2020-06-01 05:34:30,575 - synapse.storage.data_stores.main.event_push_actions - 510 - INFO - None - Found stream ordering 1 day ago: it's 480728 2020-06-01 05:34:32,166 - synapse.storage.data_stores - 77 - INFO - None - Starting 'state' data store 2020-06-01 05:34:32,171 - synapse.storage.data_stores - 90 - INFO - None - Database 'master' prepared 2020-06-01 05:34:32,172 - synapse.server - 275 - INFO - None - Finished setting up. 2020-06-01 05:34:32,437 - synapse.app.homeserver - 111 - INFO - - Running 2020-06-01 05:34:32,438 - synapse.app.homeserver - 30 - INFO - - Set file limit to: 1048576 2020-06-01 05:34:32,456 - synapse.config.tls - 517 - INFO - - Loading TLS key from /data/temptest.draak.fr.tls.key 2020-06-01 05:34:32,475 - synapse.config.tls - 494 - INFO - - Loading TLS certificate from /data/temptest.draak.fr.tls.crt 2020-06-01 05:34:32,714 - twisted - 192 - ERROR - - Traceback (most recent call last): 2020-06-01 05:34:32,718 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/_base.py", line 278, in start 2020-06-01 05:34:32,720 - twisted - 192 - ERROR - - hs.start_listening(listeners) 2020-06-01 05:34:32,722 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 292, in start_listening 2020-06-01 05:34:32,724 - twisted - 192 - ERROR - - self._listening_services.extend(self._listener_http(config, listener)) 2020-06-01 05:34:32,726 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 106, in _listener_http 2020-06-01 05:34:32,728 - twisted - 192 - ERROR - - self._configure_named_resource(name, res.get("compress", False)) 2020-06-01 05:34:32,730 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 182, in _configure_named_resource 2020-06-01 05:34:32,733 - twisted - 192 - ERROR - - client_resource = ClientRestResource(self) 2020-06-01 05:34:32,735 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 73, in __init__ 2020-06-01 05:34:32,737 - twisted - 192 - ERROR - - self.register_servlets(self, hs) 2020-06-01 05:34:32,739 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 80, in register_servlets 2020-06-01 05:34:32,741 - twisted - 192 - ERROR - - initial_sync.register_servlets(hs, client_resource) 2020-06-01 05:34:32,744 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 47, in register_servlets 2020-06-01 05:34:32,746 - twisted - 192 - ERROR - - InitialSyncRestServlet(hs).register(http_server) 2020-06-01 05:34:32,748 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 28, in __init__ 2020-06-01 05:34:32,750 - twisted - 192 - ERROR - - self.initial_sync_handler = hs.get_initial_sync_handler() 2020-06-01 05:34:32,752 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get 2020-06-01 05:34:32,754 - twisted - 192 - ERROR - - dep = builder() 2020-06-01 05:34:32,756 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 422, in build_initial_sync_handler 2020-06-01 05:34:32,757 - twisted - 192 - ERROR - - return InitialSyncHandler(self) 2020-06-01 05:34:32,759 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/handlers/initial_sync.py", line 39, in __init__ 2020-06-01 05:34:32,761 - twisted - 192 - ERROR - - super(InitialSyncHandler, self).__init__(hs) 2020-06-01 05:34:32,763 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/handlers/_base.py", line 43, in __init__ 2020-06-01 05:34:32,766 - twisted - 192 - ERROR - - self.auth = hs.get_auth() 2020-06-01 05:34:32,767 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get 2020-06-01 05:34:32,769 - twisted - 192 - ERROR - - dep = builder() 2020-06-01 05:34:32,771 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 339, in build_auth 2020-06-01 05:34:32,773 - twisted - 192 - ERROR - - return Auth(self) 2020-06-01 05:34:32,775 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/api/auth.py", line 78, in __init__ 2020-06-01 05:34:32,777 - twisted - 192 - ERROR - - register_cache("cache", "token_cache", self.token_cache) 2020-06-01 05:34:32,778 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/util/caches/__init__.py", line 117, in register_cache 2020-06-01 05:34:32,780 - twisted - 192 - ERROR - - add_resizable_cache(cache_name, resize_callback) 2020-06-01 05:34:32,782 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 76, in add_resizable_cache 2020-06-01 05:34:32,784 - twisted - 192 - ERROR - - properties.resize_all_caches_func() 2020-06-01 05:34:32,786 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 196, in resize_all_caches 2020-06-01 05:34:32,788 - twisted - 192 - ERROR - - for cache_name, callback in _CACHES.items(): 2020-06-01 05:34:32,790 - twisted - 192 - ERROR - - RuntimeError: dictionary changed size during iteration 2020-06-01 05:34:32,912 - twisted - 192 - CRITICAL - - Unhandled error in Deferred: 2020-06-01 05:34:32,919 - twisted - 192 - CRITICAL - - Traceback (most recent call last): File "/usr/lib/python3.8/site-packages/synapse/app/_base.py", line 278, in start hs.start_listening(listeners) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 292, in start_listening self._listening_services.extend(self._listener_http(config, listener)) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 106, in _listener_http self._configure_named_resource(name, res.get("compress", False)) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 182, in _configure_named_resource client_resource = ClientRestResource(self) File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 73, in __init__ self.register_servlets(self, hs) File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 80, in register_servlets initial_sync.register_servlets(hs, client_resource) File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 47, in register_servlets InitialSyncRestServlet(hs).register(http_server) File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 28, in __init__ self.initial_sync_handler = hs.get_initial_sync_handler() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get dep = builder() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 422, in build_initial_sync_handler return InitialSyncHandler(self) File "/usr/lib/python3.8/site-packages/synapse/handlers/initial_sync.py", line 39, in __init__ super(InitialSyncHandler, self).__init__(hs) File "/usr/lib/python3.8/site-packages/synapse/handlers/_base.py", line 43, in __init__ self.auth = hs.get_auth() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get dep = builder() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 339, in build_auth return Auth(self) File "/usr/lib/python3.8/site-packages/synapse/api/auth.py", line 78, in __init__ register_cache("cache", "token_cache", self.token_cache) File "/usr/lib/python3.8/site-packages/synapse/util/caches/__init__.py", line 117, in register_cache add_resizable_cache(cache_name, resize_callback) File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 76, in add_resizable_cache properties.resize_all_caches_func() File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 196, in resize_all_caches for cache_name, callback in _CACHES.items(): RuntimeError: dictionary changed size during iteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 440, in start _base.start(hs, config.listeners) File "/usr/lib/python3.8/site-packages/synapse/app/_base.py", line 298, in start sys.exit(1) SystemExit: 1 2020-06-01 05:34:33,018 - twisted - 192 - INFO - - Main loop terminated.
RuntimeError
def reset(): """Resets the caches to their defaults. Used for tests.""" properties.default_factor_size = float( os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE) ) properties.resize_all_caches_func = None with _CACHES_LOCK: _CACHES.clear()
def reset(): """Resets the caches to their defaults. Used for tests.""" properties.default_factor_size = float( os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE) ) properties.resize_all_caches_func = None _CACHES.clear()
https://github.com/matrix-org/synapse/issues/7610
2020-06-01 05:34:30,471 - synapse.storage.data_stores.main.event_push_actions - 503 - INFO - None - Found stream ordering 1 month ago: it's 447264 2020-06-01 05:34:30,472 - synapse.storage.data_stores.main.event_push_actions - 506 - INFO - None - Searching for stream ordering 1 day ago 2020-06-01 05:34:30,575 - synapse.storage.data_stores.main.event_push_actions - 510 - INFO - None - Found stream ordering 1 day ago: it's 480728 2020-06-01 05:34:32,166 - synapse.storage.data_stores - 77 - INFO - None - Starting 'state' data store 2020-06-01 05:34:32,171 - synapse.storage.data_stores - 90 - INFO - None - Database 'master' prepared 2020-06-01 05:34:32,172 - synapse.server - 275 - INFO - None - Finished setting up. 2020-06-01 05:34:32,437 - synapse.app.homeserver - 111 - INFO - - Running 2020-06-01 05:34:32,438 - synapse.app.homeserver - 30 - INFO - - Set file limit to: 1048576 2020-06-01 05:34:32,456 - synapse.config.tls - 517 - INFO - - Loading TLS key from /data/temptest.draak.fr.tls.key 2020-06-01 05:34:32,475 - synapse.config.tls - 494 - INFO - - Loading TLS certificate from /data/temptest.draak.fr.tls.crt 2020-06-01 05:34:32,714 - twisted - 192 - ERROR - - Traceback (most recent call last): 2020-06-01 05:34:32,718 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/_base.py", line 278, in start 2020-06-01 05:34:32,720 - twisted - 192 - ERROR - - hs.start_listening(listeners) 2020-06-01 05:34:32,722 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 292, in start_listening 2020-06-01 05:34:32,724 - twisted - 192 - ERROR - - self._listening_services.extend(self._listener_http(config, listener)) 2020-06-01 05:34:32,726 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 106, in _listener_http 2020-06-01 05:34:32,728 - twisted - 192 - ERROR - - self._configure_named_resource(name, res.get("compress", False)) 2020-06-01 05:34:32,730 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 182, in _configure_named_resource 2020-06-01 05:34:32,733 - twisted - 192 - ERROR - - client_resource = ClientRestResource(self) 2020-06-01 05:34:32,735 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 73, in __init__ 2020-06-01 05:34:32,737 - twisted - 192 - ERROR - - self.register_servlets(self, hs) 2020-06-01 05:34:32,739 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 80, in register_servlets 2020-06-01 05:34:32,741 - twisted - 192 - ERROR - - initial_sync.register_servlets(hs, client_resource) 2020-06-01 05:34:32,744 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 47, in register_servlets 2020-06-01 05:34:32,746 - twisted - 192 - ERROR - - InitialSyncRestServlet(hs).register(http_server) 2020-06-01 05:34:32,748 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 28, in __init__ 2020-06-01 05:34:32,750 - twisted - 192 - ERROR - - self.initial_sync_handler = hs.get_initial_sync_handler() 2020-06-01 05:34:32,752 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get 2020-06-01 05:34:32,754 - twisted - 192 - ERROR - - dep = builder() 2020-06-01 05:34:32,756 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 422, in build_initial_sync_handler 2020-06-01 05:34:32,757 - twisted - 192 - ERROR - - return InitialSyncHandler(self) 2020-06-01 05:34:32,759 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/handlers/initial_sync.py", line 39, in __init__ 2020-06-01 05:34:32,761 - twisted - 192 - ERROR - - super(InitialSyncHandler, self).__init__(hs) 2020-06-01 05:34:32,763 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/handlers/_base.py", line 43, in __init__ 2020-06-01 05:34:32,766 - twisted - 192 - ERROR - - self.auth = hs.get_auth() 2020-06-01 05:34:32,767 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get 2020-06-01 05:34:32,769 - twisted - 192 - ERROR - - dep = builder() 2020-06-01 05:34:32,771 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 339, in build_auth 2020-06-01 05:34:32,773 - twisted - 192 - ERROR - - return Auth(self) 2020-06-01 05:34:32,775 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/api/auth.py", line 78, in __init__ 2020-06-01 05:34:32,777 - twisted - 192 - ERROR - - register_cache("cache", "token_cache", self.token_cache) 2020-06-01 05:34:32,778 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/util/caches/__init__.py", line 117, in register_cache 2020-06-01 05:34:32,780 - twisted - 192 - ERROR - - add_resizable_cache(cache_name, resize_callback) 2020-06-01 05:34:32,782 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 76, in add_resizable_cache 2020-06-01 05:34:32,784 - twisted - 192 - ERROR - - properties.resize_all_caches_func() 2020-06-01 05:34:32,786 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 196, in resize_all_caches 2020-06-01 05:34:32,788 - twisted - 192 - ERROR - - for cache_name, callback in _CACHES.items(): 2020-06-01 05:34:32,790 - twisted - 192 - ERROR - - RuntimeError: dictionary changed size during iteration 2020-06-01 05:34:32,912 - twisted - 192 - CRITICAL - - Unhandled error in Deferred: 2020-06-01 05:34:32,919 - twisted - 192 - CRITICAL - - Traceback (most recent call last): File "/usr/lib/python3.8/site-packages/synapse/app/_base.py", line 278, in start hs.start_listening(listeners) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 292, in start_listening self._listening_services.extend(self._listener_http(config, listener)) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 106, in _listener_http self._configure_named_resource(name, res.get("compress", False)) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 182, in _configure_named_resource client_resource = ClientRestResource(self) File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 73, in __init__ self.register_servlets(self, hs) File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 80, in register_servlets initial_sync.register_servlets(hs, client_resource) File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 47, in register_servlets InitialSyncRestServlet(hs).register(http_server) File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 28, in __init__ self.initial_sync_handler = hs.get_initial_sync_handler() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get dep = builder() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 422, in build_initial_sync_handler return InitialSyncHandler(self) File "/usr/lib/python3.8/site-packages/synapse/handlers/initial_sync.py", line 39, in __init__ super(InitialSyncHandler, self).__init__(hs) File "/usr/lib/python3.8/site-packages/synapse/handlers/_base.py", line 43, in __init__ self.auth = hs.get_auth() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get dep = builder() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 339, in build_auth return Auth(self) File "/usr/lib/python3.8/site-packages/synapse/api/auth.py", line 78, in __init__ register_cache("cache", "token_cache", self.token_cache) File "/usr/lib/python3.8/site-packages/synapse/util/caches/__init__.py", line 117, in register_cache add_resizable_cache(cache_name, resize_callback) File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 76, in add_resizable_cache properties.resize_all_caches_func() File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 196, in resize_all_caches for cache_name, callback in _CACHES.items(): RuntimeError: dictionary changed size during iteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 440, in start _base.start(hs, config.listeners) File "/usr/lib/python3.8/site-packages/synapse/app/_base.py", line 298, in start sys.exit(1) SystemExit: 1 2020-06-01 05:34:33,018 - twisted - 192 - INFO - - Main loop terminated.
RuntimeError
def resize_all_caches(self): """Ensure all cache sizes are up to date For each cache, run the mapped callback function with either a specific cache factor or the default, global one. """ # block other threads from modifying _CACHES while we iterate it. with _CACHES_LOCK: for cache_name, callback in _CACHES.items(): new_factor = self.cache_factors.get(cache_name, self.global_factor) callback(new_factor)
def resize_all_caches(self): """Ensure all cache sizes are up to date For each cache, run the mapped callback function with either a specific cache factor or the default, global one. """ for cache_name, callback in _CACHES.items(): new_factor = self.cache_factors.get(cache_name, self.global_factor) callback(new_factor)
https://github.com/matrix-org/synapse/issues/7610
2020-06-01 05:34:30,471 - synapse.storage.data_stores.main.event_push_actions - 503 - INFO - None - Found stream ordering 1 month ago: it's 447264 2020-06-01 05:34:30,472 - synapse.storage.data_stores.main.event_push_actions - 506 - INFO - None - Searching for stream ordering 1 day ago 2020-06-01 05:34:30,575 - synapse.storage.data_stores.main.event_push_actions - 510 - INFO - None - Found stream ordering 1 day ago: it's 480728 2020-06-01 05:34:32,166 - synapse.storage.data_stores - 77 - INFO - None - Starting 'state' data store 2020-06-01 05:34:32,171 - synapse.storage.data_stores - 90 - INFO - None - Database 'master' prepared 2020-06-01 05:34:32,172 - synapse.server - 275 - INFO - None - Finished setting up. 2020-06-01 05:34:32,437 - synapse.app.homeserver - 111 - INFO - - Running 2020-06-01 05:34:32,438 - synapse.app.homeserver - 30 - INFO - - Set file limit to: 1048576 2020-06-01 05:34:32,456 - synapse.config.tls - 517 - INFO - - Loading TLS key from /data/temptest.draak.fr.tls.key 2020-06-01 05:34:32,475 - synapse.config.tls - 494 - INFO - - Loading TLS certificate from /data/temptest.draak.fr.tls.crt 2020-06-01 05:34:32,714 - twisted - 192 - ERROR - - Traceback (most recent call last): 2020-06-01 05:34:32,718 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/_base.py", line 278, in start 2020-06-01 05:34:32,720 - twisted - 192 - ERROR - - hs.start_listening(listeners) 2020-06-01 05:34:32,722 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 292, in start_listening 2020-06-01 05:34:32,724 - twisted - 192 - ERROR - - self._listening_services.extend(self._listener_http(config, listener)) 2020-06-01 05:34:32,726 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 106, in _listener_http 2020-06-01 05:34:32,728 - twisted - 192 - ERROR - - self._configure_named_resource(name, res.get("compress", False)) 2020-06-01 05:34:32,730 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 182, in _configure_named_resource 2020-06-01 05:34:32,733 - twisted - 192 - ERROR - - client_resource = ClientRestResource(self) 2020-06-01 05:34:32,735 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 73, in __init__ 2020-06-01 05:34:32,737 - twisted - 192 - ERROR - - self.register_servlets(self, hs) 2020-06-01 05:34:32,739 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 80, in register_servlets 2020-06-01 05:34:32,741 - twisted - 192 - ERROR - - initial_sync.register_servlets(hs, client_resource) 2020-06-01 05:34:32,744 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 47, in register_servlets 2020-06-01 05:34:32,746 - twisted - 192 - ERROR - - InitialSyncRestServlet(hs).register(http_server) 2020-06-01 05:34:32,748 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 28, in __init__ 2020-06-01 05:34:32,750 - twisted - 192 - ERROR - - self.initial_sync_handler = hs.get_initial_sync_handler() 2020-06-01 05:34:32,752 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get 2020-06-01 05:34:32,754 - twisted - 192 - ERROR - - dep = builder() 2020-06-01 05:34:32,756 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 422, in build_initial_sync_handler 2020-06-01 05:34:32,757 - twisted - 192 - ERROR - - return InitialSyncHandler(self) 2020-06-01 05:34:32,759 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/handlers/initial_sync.py", line 39, in __init__ 2020-06-01 05:34:32,761 - twisted - 192 - ERROR - - super(InitialSyncHandler, self).__init__(hs) 2020-06-01 05:34:32,763 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/handlers/_base.py", line 43, in __init__ 2020-06-01 05:34:32,766 - twisted - 192 - ERROR - - self.auth = hs.get_auth() 2020-06-01 05:34:32,767 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get 2020-06-01 05:34:32,769 - twisted - 192 - ERROR - - dep = builder() 2020-06-01 05:34:32,771 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/server.py", line 339, in build_auth 2020-06-01 05:34:32,773 - twisted - 192 - ERROR - - return Auth(self) 2020-06-01 05:34:32,775 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/api/auth.py", line 78, in __init__ 2020-06-01 05:34:32,777 - twisted - 192 - ERROR - - register_cache("cache", "token_cache", self.token_cache) 2020-06-01 05:34:32,778 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/util/caches/__init__.py", line 117, in register_cache 2020-06-01 05:34:32,780 - twisted - 192 - ERROR - - add_resizable_cache(cache_name, resize_callback) 2020-06-01 05:34:32,782 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 76, in add_resizable_cache 2020-06-01 05:34:32,784 - twisted - 192 - ERROR - - properties.resize_all_caches_func() 2020-06-01 05:34:32,786 - twisted - 192 - ERROR - - File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 196, in resize_all_caches 2020-06-01 05:34:32,788 - twisted - 192 - ERROR - - for cache_name, callback in _CACHES.items(): 2020-06-01 05:34:32,790 - twisted - 192 - ERROR - - RuntimeError: dictionary changed size during iteration 2020-06-01 05:34:32,912 - twisted - 192 - CRITICAL - - Unhandled error in Deferred: 2020-06-01 05:34:32,919 - twisted - 192 - CRITICAL - - Traceback (most recent call last): File "/usr/lib/python3.8/site-packages/synapse/app/_base.py", line 278, in start hs.start_listening(listeners) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 292, in start_listening self._listening_services.extend(self._listener_http(config, listener)) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 106, in _listener_http self._configure_named_resource(name, res.get("compress", False)) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 182, in _configure_named_resource client_resource = ClientRestResource(self) File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 73, in __init__ self.register_servlets(self, hs) File "/usr/lib/python3.8/site-packages/synapse/rest/__init__.py", line 80, in register_servlets initial_sync.register_servlets(hs, client_resource) File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 47, in register_servlets InitialSyncRestServlet(hs).register(http_server) File "/usr/lib/python3.8/site-packages/synapse/rest/client/v1/initial_sync.py", line 28, in __init__ self.initial_sync_handler = hs.get_initial_sync_handler() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get dep = builder() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 422, in build_initial_sync_handler return InitialSyncHandler(self) File "/usr/lib/python3.8/site-packages/synapse/handlers/initial_sync.py", line 39, in __init__ super(InitialSyncHandler, self).__init__(hs) File "/usr/lib/python3.8/site-packages/synapse/handlers/_base.py", line 43, in __init__ self.auth = hs.get_auth() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 622, in _get dep = builder() File "/usr/lib/python3.8/site-packages/synapse/server.py", line 339, in build_auth return Auth(self) File "/usr/lib/python3.8/site-packages/synapse/api/auth.py", line 78, in __init__ register_cache("cache", "token_cache", self.token_cache) File "/usr/lib/python3.8/site-packages/synapse/util/caches/__init__.py", line 117, in register_cache add_resizable_cache(cache_name, resize_callback) File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 76, in add_resizable_cache properties.resize_all_caches_func() File "/usr/lib/python3.8/site-packages/synapse/config/cache.py", line 196, in resize_all_caches for cache_name, callback in _CACHES.items(): RuntimeError: dictionary changed size during iteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/usr/lib/python3.8/site-packages/synapse/app/homeserver.py", line 440, in start _base.start(hs, config.listeners) File "/usr/lib/python3.8/site-packages/synapse/app/_base.py", line 298, in start sys.exit(1) SystemExit: 1 2020-06-01 05:34:33,018 - twisted - 192 - INFO - - Main loop terminated.
RuntimeError
def __init__(self, hs): super(GenericWorkerReplicationHandler, self).__init__(hs) self.store = hs.get_datastore() self.typing_handler = hs.get_typing_handler() self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence self.notifier = hs.get_notifier() self.notify_pushers = hs.config.start_pushers self.pusher_pool = hs.get_pusherpool() self.send_handler = None # type: Optional[FederationSenderHandler] if hs.config.send_federation: self.send_handler = FederationSenderHandler(hs)
def __init__(self, hs): super(GenericWorkerReplicationHandler, self).__init__(hs) self.store = hs.get_datastore() self.typing_handler = hs.get_typing_handler() self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence self.notifier = hs.get_notifier() self.notify_pushers = hs.config.start_pushers self.pusher_pool = hs.get_pusherpool() if hs.config.send_federation: self.send_handler = FederationSenderHandler(hs, self) else: self.send_handler = None
https://github.com/matrix-org/synapse/issues/7535
synapse.app.generic_worker: [replication-RDATA-federation-3176] Error updating federation stream position Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/synapse/app/generic_worker.py", line 851, in update_token self.replication_client.send_federation_ack( AttributeError: 'GenericWorkerReplicationHandler' object has no attribute 'send_federation_ack'
AttributeError
async def _process_and_notify(self, stream_name, instance_name, token, rows): try: if self.send_handler: await self.send_handler.process_replication_rows(stream_name, token, rows) if stream_name == PushRulesStream.NAME: self.notifier.on_new_event( "push_rules_key", token, users=[row.user_id for row in rows] ) elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): self.notifier.on_new_event( "account_data_key", token, users=[row.user_id for row in rows] ) elif stream_name == ReceiptsStream.NAME: self.notifier.on_new_event( "receipt_key", token, rooms=[row.room_id for row in rows] ) await self.pusher_pool.on_new_receipts( token, token, {row.room_id for row in rows} ) elif stream_name == TypingStream.NAME: self.typing_handler.process_replication_rows(token, rows) self.notifier.on_new_event( "typing_key", token, rooms=[row.room_id for row in rows] ) elif stream_name == ToDeviceStream.NAME: entities = [row.entity for row in rows if row.entity.startswith("@")] if entities: self.notifier.on_new_event("to_device_key", token, users=entities) elif stream_name == DeviceListsStream.NAME: all_room_ids = set() # type: Set[str] for row in rows: if row.entity.startswith("@"): room_ids = await self.store.get_rooms_for_user(row.entity) all_room_ids.update(room_ids) self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) elif stream_name == PresenceStream.NAME: await self.presence_handler.process_replication_rows(token, rows) elif stream_name == GroupServerStream.NAME: self.notifier.on_new_event( "groups_key", token, users=[row.user_id for row in rows] ) elif stream_name == PushersStream.NAME: for row in rows: if row.deleted: self.stop_pusher(row.user_id, row.app_id, row.pushkey) else: await self.start_pusher(row.user_id, row.app_id, row.pushkey) except Exception: logger.exception("Error processing replication")
async def _process_and_notify(self, stream_name, instance_name, token, rows): try: if self.send_handler: await self.send_handler.process_replication_rows(stream_name, token, rows) if stream_name == PushRulesStream.NAME: self.notifier.on_new_event( "push_rules_key", token, users=[row.user_id for row in rows] ) elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): self.notifier.on_new_event( "account_data_key", token, users=[row.user_id for row in rows] ) elif stream_name == ReceiptsStream.NAME: self.notifier.on_new_event( "receipt_key", token, rooms=[row.room_id for row in rows] ) await self.pusher_pool.on_new_receipts( token, token, {row.room_id for row in rows} ) elif stream_name == TypingStream.NAME: self.typing_handler.process_replication_rows(token, rows) self.notifier.on_new_event( "typing_key", token, rooms=[row.room_id for row in rows] ) elif stream_name == ToDeviceStream.NAME: entities = [row.entity for row in rows if row.entity.startswith("@")] if entities: self.notifier.on_new_event("to_device_key", token, users=entities) elif stream_name == DeviceListsStream.NAME: all_room_ids = set() for row in rows: if row.entity.startswith("@"): room_ids = await self.store.get_rooms_for_user(row.entity) all_room_ids.update(room_ids) self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) elif stream_name == PresenceStream.NAME: await self.presence_handler.process_replication_rows(token, rows) elif stream_name == GroupServerStream.NAME: self.notifier.on_new_event( "groups_key", token, users=[row.user_id for row in rows] ) elif stream_name == PushersStream.NAME: for row in rows: if row.deleted: self.stop_pusher(row.user_id, row.app_id, row.pushkey) else: await self.start_pusher(row.user_id, row.app_id, row.pushkey) except Exception: logger.exception("Error processing replication")
https://github.com/matrix-org/synapse/issues/7535
synapse.app.generic_worker: [replication-RDATA-federation-3176] Error updating federation stream position Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/synapse/app/generic_worker.py", line 851, in update_token self.replication_client.send_federation_ack( AttributeError: 'GenericWorkerReplicationHandler' object has no attribute 'send_federation_ack'
AttributeError
def __init__(self, hs: GenericWorkerServer): self.store = hs.get_datastore() self._is_mine_id = hs.is_mine_id self.federation_sender = hs.get_federation_sender() self._hs = hs # if the worker is restarted, we want to pick up where we left off in # the replication stream, so load the position from the database. # # XXX is this actually worthwhile? Whenever the master is restarted, we'll # drop some rows anyway (which is mostly fine because we're only dropping # typing and presence notifications). If the replication stream is # unreliable, why do we do all this hoop-jumping to store the position in the # database? See also https://github.com/matrix-org/synapse/issues/7535. # self.federation_position = self.store.federation_out_pos_startup self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") self._last_ack = self.federation_position
def __init__(self, hs: GenericWorkerServer, replication_client): self.store = hs.get_datastore() self._is_mine_id = hs.is_mine_id self.federation_sender = hs.get_federation_sender() self.replication_client = replication_client self.federation_position = self.store.federation_out_pos_startup self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") self._last_ack = self.federation_position self._room_serials = {} self._room_typing = {}
https://github.com/matrix-org/synapse/issues/7535
synapse.app.generic_worker: [replication-RDATA-federation-3176] Error updating federation stream position Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/synapse/app/generic_worker.py", line 851, in update_token self.replication_client.send_federation_ack( AttributeError: 'GenericWorkerReplicationHandler' object has no attribute 'send_federation_ack'
AttributeError
async def update_token(self, token): """Update the record of where we have processed to in the federation stream. Called after we have processed a an update received over replication. Sends a FEDERATION_ACK back to the master, and stores the token that we have processed in `federation_stream_position` so that we can restart where we left off. """ try: self.federation_position = token # We linearize here to ensure we don't have races updating the token # # XXX this appears to be redundant, since the ReplicationCommandHandler # has a linearizer which ensures that we only process one line of # replication data at a time. Should we remove it, or is it doing useful # service for robustness? Or could we replace it with an assertion that # we're not being re-entered? with await self._fed_position_linearizer.queue(None): await self.store.update_federation_out_pos( "federation", self.federation_position ) # We ACK this token over replication so that the master can drop # its in memory queues self._hs.get_tcp_replication().send_federation_ack(self.federation_position) self._last_ack = self.federation_position except Exception: logger.exception("Error updating federation stream position")
async def update_token(self, token): try: self.federation_position = token # We linearize here to ensure we don't have races updating the token with await self._fed_position_linearizer.queue(None): if self._last_ack < self.federation_position: await self.store.update_federation_out_pos( "federation", self.federation_position ) # We ACK this token over replication so that the master can drop # its in memory queues self.replication_client.send_federation_ack(self.federation_position) self._last_ack = self.federation_position except Exception: logger.exception("Error updating federation stream position")
https://github.com/matrix-org/synapse/issues/7535
synapse.app.generic_worker: [replication-RDATA-federation-3176] Error updating federation stream position Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/var/lib/synapse/venv/lib/python3.8/site-packages/synapse/app/generic_worker.py", line 851, in update_token self.replication_client.send_federation_ack( AttributeError: 'GenericWorkerReplicationHandler' object has no attribute 'send_federation_ack'
AttributeError
def __init__(self, hs): self.store = hs.get_datastore() self.federation = hs.get_federation_client() self.device_handler = hs.get_device_handler() self.is_mine = hs.is_mine self.clock = hs.get_clock() self._edu_updater = SigningKeyEduUpdater(hs, self) federation_registry = hs.get_federation_registry() self._is_master = hs.config.worker_app is None if not self._is_master: self._user_device_resync_client = ( ReplicationUserDevicesResyncRestServlet.make_client(hs) ) else: # Only register this edu handler on master as it requires writing # device updates to the db # # FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec federation_registry.register_edu_handler( "org.matrix.signing_key_update", self._edu_updater.incoming_signing_key_update, ) # doesn't really work as part of the generic query API, because the # query request requires an object POST, but we abuse the # "query handler" interface. federation_registry.register_query_handler( "client_keys", self.on_federation_query_client_keys )
def __init__(self, hs): self.store = hs.get_datastore() self.federation = hs.get_federation_client() self.device_handler = hs.get_device_handler() self.is_mine = hs.is_mine self.clock = hs.get_clock() self._edu_updater = SigningKeyEduUpdater(hs, self) self._is_master = hs.config.worker_app is None if not self._is_master: self._user_device_resync_client = ( ReplicationUserDevicesResyncRestServlet.make_client(hs) ) federation_registry = hs.get_federation_registry() # FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec federation_registry.register_edu_handler( "org.matrix.signing_key_update", self._edu_updater.incoming_signing_key_update, ) # doesn't really work as part of the generic query API, because the # query request requires an object POST, but we abuse the # "query handler" interface. federation_registry.register_query_handler( "client_keys", self.on_federation_query_client_keys )
https://github.com/matrix-org/synapse/issues/7252
2020-04-08 09:38:48,601 - synapse.federation.federation_server - 781 - ERROR - PUT-22124562 - Failed to handle edu 'org.matrix.signing_key_update' Capture point (most recent call last): File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/synapse/src/synapse/app/federation_reader.py", line 24, in <module> start(sys.argv[1:]) File "/home/synapse/src/synapse/app/generic_worker.py", line 930, in start _base.start_worker_reactor("synapse-generic-worker", config) File "/home/synapse/src/synapse/app/_base.py", line 77, in start_worker_reactor run_command=run_command, File "/home/synapse/src/synapse/app/_base.py", line 137, in start_reactor daemon.start() File "/home/synapse/env-py37/lib/python3.7/site-packages/daemonize.py", line 248, in start self.action(*privileged_action_result) File "/home/synapse/src/synapse/app/_base.py", line 114, in run run_command() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 1283, in run self.mainLoop() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 1292, in mainLoop self.runUntilCurrent() File "/home/synapse/src/synapse/metrics/__init__.py", line 436, in f ret = func(*args, **kwargs) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 886, in runUntilCurrent f(*a, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1421, in _inlineCallbacks status.deferred.callback(getattr(e, "value", None)) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1421, in _inlineCallbacks status.deferred.callback(getattr(e, "value", None)) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/src/synapse/http/server.py", line 209, in wrapped_async_request_handler await h(self, request) File "/home/synapse/src/synapse/http/server.py", line 78, in wrapped_request_handler await h(self, request) File "/home/synapse/src/synapse/http/server.py", line 331, in _async_render callback_return = await callback_return File "/home/synapse/src/synapse/federation/transport/server.py", line 344, in new_func origin, content, request.args, *args, **kwargs File "/home/synapse/src/synapse/federation/transport/server.py", line 424, in on_PUT origin, transaction_data File "/home/synapse/src/synapse/federation/federation_server.py", line 130, in on_incoming_transaction origin, transaction, request_time File "/home/synapse/src/synapse/federation/federation_server.py", line 179, in _handle_incoming_transaction run_in_background(self._handle_edus_in_txn, origin, transaction), File "/home/synapse/src/synapse/logging/context.py", line 616, in run_in_background res = defer.ensureDeferred(res) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 911, in ensureDeferred return _cancellableInlineCallbacks(coro) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1529, in _cancellableInlineCallbacks _inlineCallbacks(None, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/src/synapse/federation/federation_server.py", line 310, in _handle_edus_in_txn TRANSACTION_CONCURRENCY_LIMIT, File "/home/synapse/src/synapse/util/async_helpers.py", line 165, in concurrently_execute [run_in_background(_concurrently_execute_inner) for _ in range(limit)], File "/home/synapse/src/synapse/util/async_helpers.py", line 165, in <listcomp> [run_in_background(_concurrently_execute_inner) for _ in range(limit)], File "/home/synapse/src/synapse/logging/context.py", line 616, in run_in_background res = defer.ensureDeferred(res) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 911, in ensureDeferred return _cancellableInlineCallbacks(coro) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1529, in _cancellableInlineCallbacks _inlineCallbacks(None, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/src/synapse/util/async_helpers.py", line 159, in _concurrently_execute_inner await maybe_awaitable(func(next(it))) File "/home/synapse/src/synapse/federation/federation_server.py", line 305, in _process_edu await self.registry.on_edu(edu.edu_type, origin, edu.content) File "/home/synapse/src/synapse/federation/federation_server.py", line 819, in on_edu edu_type, origin, content Traceback (most recent call last): File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/src/synapse/federation/federation_server.py", line 777, in on_edu await handler(origin, content) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 749, in send raise result.value AttributeError: 'GenericWorkerSlavedStore' object has no attribute 'set_e2e_cross_signing_key'
AttributeError
def _get_public_room_list( self, limit: Optional[int] = None, since_token: Optional[str] = None, search_filter: Optional[Dict] = None, network_tuple: ThirdPartyInstanceID = EMPTY_THIRD_PARTY_ID, from_federation: bool = False, ) -> Dict[str, Any]: """Generate a public room list. Args: limit: Maximum amount of rooms to return. since_token: search_filter: Dictionary to filter rooms by. network_tuple: Which public list to use. This can be (None, None) to indicate the main list, or a particular appservice and network id to use an appservice specific one. Setting to None returns all public rooms across all lists. from_federation: Whether this request originated from a federating server or a client. Used for room filtering. """ # Pagination tokens work by storing the room ID sent in the last batch, # plus the direction (forwards or backwards). Next batch tokens always # go forwards, prev batch tokens always go backwards. if since_token: batch_token = RoomListNextBatch.from_token(since_token) bounds = (batch_token.last_joined_members, batch_token.last_room_id) forwards = batch_token.direction_is_forward else: batch_token = None bounds = None forwards = True # we request one more than wanted to see if there are more pages to come probing_limit = limit + 1 if limit is not None else None results = yield self.store.get_largest_public_rooms( network_tuple, search_filter, probing_limit, bounds=bounds, forwards=forwards, ignore_non_federatable=from_federation, ) def build_room_entry(room): entry = { "room_id": room["room_id"], "name": room["name"], "topic": room["topic"], "canonical_alias": room["canonical_alias"], "num_joined_members": room["joined_members"], "avatar_url": room["avatar"], "world_readable": room["history_visibility"] == "world_readable", "guest_can_join": room["guest_access"] == "can_join", } # Filter out Nones – rather omit the field altogether return {k: v for k, v in entry.items() if v is not None} results = [build_room_entry(r) for r in results] response = {} num_results = len(results) if limit is not None: more_to_come = num_results == probing_limit # Depending on direction we trim either the front or back. if forwards: results = results[:limit] else: results = results[-limit:] else: more_to_come = False if num_results > 0: final_entry = results[-1] initial_entry = results[0] if forwards: if batch_token: # If there was a token given then we assume that there # must be previous results. response["prev_batch"] = RoomListNextBatch( last_joined_members=initial_entry["num_joined_members"], last_room_id=initial_entry["room_id"], direction_is_forward=False, ).to_token() if more_to_come: response["next_batch"] = RoomListNextBatch( last_joined_members=final_entry["num_joined_members"], last_room_id=final_entry["room_id"], direction_is_forward=True, ).to_token() else: if batch_token: response["next_batch"] = RoomListNextBatch( last_joined_members=final_entry["num_joined_members"], last_room_id=final_entry["room_id"], direction_is_forward=True, ).to_token() if more_to_come: response["prev_batch"] = RoomListNextBatch( last_joined_members=initial_entry["num_joined_members"], last_room_id=initial_entry["room_id"], direction_is_forward=False, ).to_token() response["chunk"] = results response["total_room_count_estimate"] = yield self.store.count_public_rooms( network_tuple, ignore_non_federatable=from_federation ) return response
def _get_public_room_list( self, limit=None, since_token=None, search_filter=None, network_tuple=EMPTY_THIRD_PARTY_ID, from_federation=False, ): """Generate a public room list. Args: limit (int|None): Maximum amount of rooms to return. since_token (str|None) search_filter (dict|None): Dictionary to filter rooms by. network_tuple (ThirdPartyInstanceID): Which public list to use. This can be (None, None) to indicate the main list, or a particular appservice and network id to use an appservice specific one. Setting to None returns all public rooms across all lists. from_federation (bool): Whether this request originated from a federating server or a client. Used for room filtering. """ # Pagination tokens work by storing the room ID sent in the last batch, # plus the direction (forwards or backwards). Next batch tokens always # go forwards, prev batch tokens always go backwards. if since_token: batch_token = RoomListNextBatch.from_token(since_token) bounds = (batch_token.last_joined_members, batch_token.last_room_id) forwards = batch_token.direction_is_forward else: batch_token = None bounds = None forwards = True # we request one more than wanted to see if there are more pages to come probing_limit = limit + 1 if limit is not None else None results = yield self.store.get_largest_public_rooms( network_tuple, search_filter, probing_limit, bounds=bounds, forwards=forwards, ignore_non_federatable=from_federation, ) def build_room_entry(room): entry = { "room_id": room["room_id"], "name": room["name"], "topic": room["topic"], "canonical_alias": room["canonical_alias"], "num_joined_members": room["joined_members"], "avatar_url": room["avatar"], "world_readable": room["history_visibility"] == "world_readable", "guest_can_join": room["guest_access"] == "can_join", } # Filter out Nones – rather omit the field altogether return {k: v for k, v in entry.items() if v is not None} results = [build_room_entry(r) for r in results] response = {} num_results = len(results) if limit is not None: more_to_come = num_results == probing_limit # Depending on direction we trim either the front or back. if forwards: results = results[:limit] else: results = results[-limit:] else: more_to_come = False if num_results > 0: final_entry = results[-1] initial_entry = results[0] if forwards: if batch_token: # If there was a token given then we assume that there # must be previous results. response["prev_batch"] = RoomListNextBatch( last_joined_members=initial_entry["num_joined_members"], last_room_id=initial_entry["room_id"], direction_is_forward=False, ).to_token() if more_to_come: response["next_batch"] = RoomListNextBatch( last_joined_members=final_entry["num_joined_members"], last_room_id=final_entry["room_id"], direction_is_forward=True, ).to_token() else: if batch_token: response["next_batch"] = RoomListNextBatch( last_joined_members=final_entry["num_joined_members"], last_room_id=final_entry["room_id"], direction_is_forward=True, ).to_token() if more_to_come: response["prev_batch"] = RoomListNextBatch( last_joined_members=initial_entry["num_joined_members"], last_room_id=initial_entry["room_id"], direction_is_forward=False, ).to_token() response["chunk"] = results response["total_room_count_estimate"] = yield self.store.count_public_rooms( network_tuple, ignore_non_federatable=from_federation ) return response
https://github.com/matrix-org/synapse/issues/6325
2019-11-04 19:52:26,074 - synapse.http.server - 109 - ERROR - POST-705775- Failed handle request via 'PublicRoomListRestServlet': <XForwardedForRequest at 0x80e5ef048 method='POST' uri='/_matrix/client/r0/publicRooms?server=domain.tld&amp;access_token=<redacted>' clientproto='HTTP/1.1' site=8008> Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/synapse/http/server.py", line 77, in wrapped_request_handler await h(self, request) File "/usr/local/lib/python3.6/site-packages/synapse/http/server.py", line 326, in _async_render callback_return = await callback_return File "/usr/local/lib/python3.6/site-packages/synapse/rest/client/v1/room.py", line 417, in on_POST third_party_instance_id=third_party_instance_id, File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/usr/local/lib/python3.6/site-packages/synapse/handlers/room_list.py", line 391, in get_remote_public_room_list third_party_instance_id=third_party_instance_id, File "/usr/local/lib/python3.6/site-packages/synapse/handlers/room_list.py", line 441, in _get_remote_list_cached third_party_instance_id=third_party_instance_id, File "/usr/local/lib/python3.6/site-packages/synapse/util/caches/response_cache.py", line 151, in wrap result = self.set(key, d) File "/usr/local/lib/python3.6/site-packages/synapse/util/caches/response_cache.py", line 95, in set result = ObservableDeferred(deferred, consumeErrors=True) File "/usr/local/lib/python3.6/site-packages/synapse/util/async_helpers.py", line 87, in __init__ deferred.addCallbacks(callback, errback) AttributeError: 'NoneType' object has no attribute 'addCallbacks'
AttributeError
async def on_GET(self, request): server = parse_string(request, "server", default=None) try: await self.auth.get_user_by_req(request, allow_guest=True) except InvalidClientCredentialsError as e: # Option to allow servers to require auth when accessing # /publicRooms via CS API. This is especially helpful in private # federations. if not self.hs.config.allow_public_rooms_without_auth: raise # We allow people to not be authed if they're just looking at our # room list, but require auth when we proxy the request. # In both cases we call the auth function, as that has the side # effect of logging who issued this request if an access token was # provided. if server: raise e else: pass limit = parse_integer(request, "limit", 0) since_token = parse_string(request, "since", None) if limit == 0: # zero is a special value which corresponds to no limit. limit = None handler = self.hs.get_room_list_handler() if server and server != self.hs.config.server_name: try: data = await handler.get_remote_public_room_list( server, limit=limit, since_token=since_token ) except HttpResponseException as e: raise e.to_synapse_error() else: data = await handler.get_local_public_room_list( limit=limit, since_token=since_token ) return 200, data
async def on_GET(self, request): server = parse_string(request, "server", default=None) try: await self.auth.get_user_by_req(request, allow_guest=True) except InvalidClientCredentialsError as e: # Option to allow servers to require auth when accessing # /publicRooms via CS API. This is especially helpful in private # federations. if not self.hs.config.allow_public_rooms_without_auth: raise # We allow people to not be authed if they're just looking at our # room list, but require auth when we proxy the request. # In both cases we call the auth function, as that has the side # effect of logging who issued this request if an access token was # provided. if server: raise e else: pass limit = parse_integer(request, "limit", 0) since_token = parse_string(request, "since", None) if limit == 0: # zero is a special value which corresponds to no limit. limit = None handler = self.hs.get_room_list_handler() if server: data = await handler.get_remote_public_room_list( server, limit=limit, since_token=since_token ) else: data = await handler.get_local_public_room_list( limit=limit, since_token=since_token ) return 200, data
https://github.com/matrix-org/synapse/issues/6325
2019-11-04 19:52:26,074 - synapse.http.server - 109 - ERROR - POST-705775- Failed handle request via 'PublicRoomListRestServlet': <XForwardedForRequest at 0x80e5ef048 method='POST' uri='/_matrix/client/r0/publicRooms?server=domain.tld&amp;access_token=<redacted>' clientproto='HTTP/1.1' site=8008> Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/synapse/http/server.py", line 77, in wrapped_request_handler await h(self, request) File "/usr/local/lib/python3.6/site-packages/synapse/http/server.py", line 326, in _async_render callback_return = await callback_return File "/usr/local/lib/python3.6/site-packages/synapse/rest/client/v1/room.py", line 417, in on_POST third_party_instance_id=third_party_instance_id, File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/usr/local/lib/python3.6/site-packages/synapse/handlers/room_list.py", line 391, in get_remote_public_room_list third_party_instance_id=third_party_instance_id, File "/usr/local/lib/python3.6/site-packages/synapse/handlers/room_list.py", line 441, in _get_remote_list_cached third_party_instance_id=third_party_instance_id, File "/usr/local/lib/python3.6/site-packages/synapse/util/caches/response_cache.py", line 151, in wrap result = self.set(key, d) File "/usr/local/lib/python3.6/site-packages/synapse/util/caches/response_cache.py", line 95, in set result = ObservableDeferred(deferred, consumeErrors=True) File "/usr/local/lib/python3.6/site-packages/synapse/util/async_helpers.py", line 87, in __init__ deferred.addCallbacks(callback, errback) AttributeError: 'NoneType' object has no attribute 'addCallbacks'
AttributeError
async def on_POST(self, request): await self.auth.get_user_by_req(request, allow_guest=True) server = parse_string(request, "server", default=None) content = parse_json_object_from_request(request) limit = int(content.get("limit", 100)) # type: Optional[int] since_token = content.get("since", None) search_filter = content.get("filter", None) include_all_networks = content.get("include_all_networks", False) third_party_instance_id = content.get("third_party_instance_id", None) if include_all_networks: network_tuple = None if third_party_instance_id is not None: raise SynapseError( 400, "Can't use include_all_networks with an explicit network" ) elif third_party_instance_id is None: network_tuple = ThirdPartyInstanceID(None, None) else: network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id) if limit == 0: # zero is a special value which corresponds to no limit. limit = None handler = self.hs.get_room_list_handler() if server and server != self.hs.config.server_name: try: data = await handler.get_remote_public_room_list( server, limit=limit, since_token=since_token, search_filter=search_filter, include_all_networks=include_all_networks, third_party_instance_id=third_party_instance_id, ) except HttpResponseException as e: raise e.to_synapse_error() else: data = await handler.get_local_public_room_list( limit=limit, since_token=since_token, search_filter=search_filter, network_tuple=network_tuple, ) return 200, data
async def on_POST(self, request): await self.auth.get_user_by_req(request, allow_guest=True) server = parse_string(request, "server", default=None) content = parse_json_object_from_request(request) limit = int(content.get("limit", 100)) # type: Optional[int] since_token = content.get("since", None) search_filter = content.get("filter", None) include_all_networks = content.get("include_all_networks", False) third_party_instance_id = content.get("third_party_instance_id", None) if include_all_networks: network_tuple = None if third_party_instance_id is not None: raise SynapseError( 400, "Can't use include_all_networks with an explicit network" ) elif third_party_instance_id is None: network_tuple = ThirdPartyInstanceID(None, None) else: network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id) if limit == 0: # zero is a special value which corresponds to no limit. limit = None handler = self.hs.get_room_list_handler() if server: data = await handler.get_remote_public_room_list( server, limit=limit, since_token=since_token, search_filter=search_filter, include_all_networks=include_all_networks, third_party_instance_id=third_party_instance_id, ) else: data = await handler.get_local_public_room_list( limit=limit, since_token=since_token, search_filter=search_filter, network_tuple=network_tuple, ) return 200, data
https://github.com/matrix-org/synapse/issues/6325
2019-11-04 19:52:26,074 - synapse.http.server - 109 - ERROR - POST-705775- Failed handle request via 'PublicRoomListRestServlet': <XForwardedForRequest at 0x80e5ef048 method='POST' uri='/_matrix/client/r0/publicRooms?server=domain.tld&amp;access_token=<redacted>' clientproto='HTTP/1.1' site=8008> Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/synapse/http/server.py", line 77, in wrapped_request_handler await h(self, request) File "/usr/local/lib/python3.6/site-packages/synapse/http/server.py", line 326, in _async_render callback_return = await callback_return File "/usr/local/lib/python3.6/site-packages/synapse/rest/client/v1/room.py", line 417, in on_POST third_party_instance_id=third_party_instance_id, File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/usr/local/lib/python3.6/site-packages/synapse/handlers/room_list.py", line 391, in get_remote_public_room_list third_party_instance_id=third_party_instance_id, File "/usr/local/lib/python3.6/site-packages/synapse/handlers/room_list.py", line 441, in _get_remote_list_cached third_party_instance_id=third_party_instance_id, File "/usr/local/lib/python3.6/site-packages/synapse/util/caches/response_cache.py", line 151, in wrap result = self.set(key, d) File "/usr/local/lib/python3.6/site-packages/synapse/util/caches/response_cache.py", line 95, in set result = ObservableDeferred(deferred, consumeErrors=True) File "/usr/local/lib/python3.6/site-packages/synapse/util/async_helpers.py", line 87, in __init__ deferred.addCallbacks(callback, errback) AttributeError: 'NoneType' object has no attribute 'addCallbacks'
AttributeError
def upload_room_keys(self, user_id, version, room_keys): """Bulk upload a list of room keys into a given backup version, asserting that the given version is the current backup version. room_keys are merged into the current backup as described in RoomKeysServlet.on_PUT(). Args: user_id(str): the user whose backup we're setting version(str): the version ID of the backup we're updating room_keys(dict): a nested dict describing the room_keys we're setting: { "rooms": { "!abc:matrix.org": { "sessions": { "c0ff33": { "first_message_index": 1, "forwarded_count": 1, "is_verified": false, "session_data": "SSBBTSBBIEZJU0gK" } } } } } Returns: A dict containing the count and etag for the backup version Raises: NotFoundError: if there are no versions defined RoomKeysVersionError: if the uploaded version is not the current version """ # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): # Check that the version we're trying to upload is the current version try: version_info = yield self.store.get_e2e_room_keys_version_info(user_id) except StoreError as e: if e.code == 404: raise NotFoundError("Version '%s' not found" % (version,)) else: raise if version_info["version"] != version: # Check that the version we're trying to upload actually exists try: version_info = yield self.store.get_e2e_room_keys_version_info( user_id, version ) # if we get this far, the version must exist raise RoomKeysVersionError(current_version=version_info["version"]) except StoreError as e: if e.code == 404: raise NotFoundError("Version '%s' not found" % (version,)) else: raise # Fetch any existing room keys for the sessions that have been # submitted. Then compare them with the submitted keys. If the # key is new, insert it; if the key should be updated, then update # it; otherwise, drop it. existing_keys = yield self.store.get_e2e_room_keys_multi( user_id, version, room_keys["rooms"] ) to_insert = [] # batch the inserts together changed = False # if anything has changed, we need to update the etag for room_id, room in iteritems(room_keys["rooms"]): for session_id, room_key in iteritems(room["sessions"]): if not isinstance(room_key["is_verified"], bool): msg = ( "is_verified must be a boolean in keys for session %s in" "room %s" % (session_id, room_id) ) raise SynapseError(400, msg, Codes.INVALID_PARAM) log_kv( { "message": "Trying to upload room key", "room_id": room_id, "session_id": session_id, "user_id": user_id, } ) current_room_key = existing_keys.get(room_id, {}).get(session_id) if current_room_key: if self._should_replace_room_key(current_room_key, room_key): log_kv({"message": "Replacing room key."}) # updates are done one at a time in the DB, so send # updates right away rather than batching them up, # like we do with the inserts yield self.store.update_e2e_room_key( user_id, version, room_id, session_id, room_key ) changed = True else: log_kv({"message": "Not replacing room_key."}) else: log_kv( { "message": "Room key not found.", "room_id": room_id, "user_id": user_id, } ) log_kv({"message": "Replacing room key."}) to_insert.append((room_id, session_id, room_key)) changed = True if len(to_insert): yield self.store.add_e2e_room_keys(user_id, version, to_insert) version_etag = version_info["etag"] if changed: version_etag = version_etag + 1 yield self.store.update_e2e_room_keys_version( user_id, version, None, version_etag ) count = yield self.store.count_e2e_room_keys(user_id, version) return {"etag": str(version_etag), "count": count}
def upload_room_keys(self, user_id, version, room_keys): """Bulk upload a list of room keys into a given backup version, asserting that the given version is the current backup version. room_keys are merged into the current backup as described in RoomKeysServlet.on_PUT(). Args: user_id(str): the user whose backup we're setting version(str): the version ID of the backup we're updating room_keys(dict): a nested dict describing the room_keys we're setting: { "rooms": { "!abc:matrix.org": { "sessions": { "c0ff33": { "first_message_index": 1, "forwarded_count": 1, "is_verified": false, "session_data": "SSBBTSBBIEZJU0gK" } } } } } Returns: A dict containing the count and etag for the backup version Raises: NotFoundError: if there are no versions defined RoomKeysVersionError: if the uploaded version is not the current version """ # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? with (yield self._upload_linearizer.queue(user_id)): # Check that the version we're trying to upload is the current version try: version_info = yield self.store.get_e2e_room_keys_version_info(user_id) except StoreError as e: if e.code == 404: raise NotFoundError("Version '%s' not found" % (version,)) else: raise if version_info["version"] != version: # Check that the version we're trying to upload actually exists try: version_info = yield self.store.get_e2e_room_keys_version_info( user_id, version ) # if we get this far, the version must exist raise RoomKeysVersionError(current_version=version_info["version"]) except StoreError as e: if e.code == 404: raise NotFoundError("Version '%s' not found" % (version,)) else: raise # Fetch any existing room keys for the sessions that have been # submitted. Then compare them with the submitted keys. If the # key is new, insert it; if the key should be updated, then update # it; otherwise, drop it. existing_keys = yield self.store.get_e2e_room_keys_multi( user_id, version, room_keys["rooms"] ) to_insert = [] # batch the inserts together changed = False # if anything has changed, we need to update the etag for room_id, room in iteritems(room_keys["rooms"]): for session_id, room_key in iteritems(room["sessions"]): log_kv( { "message": "Trying to upload room key", "room_id": room_id, "session_id": session_id, "user_id": user_id, } ) current_room_key = existing_keys.get(room_id, {}).get(session_id) if current_room_key: if self._should_replace_room_key(current_room_key, room_key): log_kv({"message": "Replacing room key."}) # updates are done one at a time in the DB, so send # updates right away rather than batching them up, # like we do with the inserts yield self.store.update_e2e_room_key( user_id, version, room_id, session_id, room_key ) changed = True else: log_kv({"message": "Not replacing room_key."}) else: log_kv( { "message": "Room key not found.", "room_id": room_id, "user_id": user_id, } ) log_kv({"message": "Replacing room key."}) to_insert.append((room_id, session_id, room_key)) changed = True if len(to_insert): yield self.store.add_e2e_room_keys(user_id, version, to_insert) version_etag = version_info["etag"] if changed: version_etag = version_etag + 1 yield self.store.update_e2e_room_keys_version( user_id, version, None, version_etag ) count = yield self.store.count_e2e_room_keys(user_id, version) return {"etag": str(version_etag), "count": count}
https://github.com/matrix-org/synapse/issues/7036
[homeserver_1] 2020-03-04 17:46:42,842 - synapse.http.server - 110 - ERROR - PUT-58- Failed handle request via 'RoomKeysServlet': <XForwardedForRequest at 0x7fd4f193bd68 method='PUT' uri='/_matrix/client/unstable/room_keys/keys?version=2' clientproto='HTTP/1.0' site=8118> Traceback (most recent call last): File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/http/server.py", line 78, in wrapped_request_handler await h(self, request) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/http/server.py", line 331, in _async_render callback_return = await callback_return File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/rest/client/v2_alpha/room_keys.py", line 134, in on_PUT ret = await self.e2e_room_keys_handler.upload_room_keys(user_id, version, body) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/handlers/e2e_room_keys.py", line 226, in upload_room_keys user_id, version, room_id, session_id, room_key File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/storage/data_stores/main/e2e_room_keys.py", line 55, in update_e2e_room_key desc="update_e2e_room_key", File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/storage/database.py", line 495, in runInteraction **kwargs File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/storage/database.py", line 543, in runWithConnection self._db_pool.runWithConnection(inner_func, *args, **kwargs) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/python/threadpool.py", line 250, in inContext result = inContext.theWork() File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/python/threadpool.py", line 266, in <lambda> inContext.theWork = lambda: context.call(ctx, func, *args, **kw) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/enterprise/adbapi.py", line 306, in _runWithConnection compat.reraise(excValue, excTraceback) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/python/compat.py", line 464, in reraise raise exception.with_traceback(traceback) File "/home/matrix/synapse/lib/python3.6/site-packages/twisted/enterprise/adbapi.py", line 297, in _runWithConnection result = func(conn, *args, **kw) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/storage/database.py", line 540, in inner_func return func(conn, *args, **kwargs) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/storage/database.py", line 378, in new_transaction r = func(cursor, *args, **kwargs) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/storage/database.py", line 1211, in simple_update_one_txn rowcount = cls.simple_update_txn(txn, table, keyvalues, updatevalues) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/storage/database.py", line 1181, in simple_update_txn txn.execute(update_sql, list(updatevalues.values()) + list(keyvalues.values())) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/storage/database.py", line 175, in execute self._do_execute(self.txn.execute, sql, *args) File "/home/matrix/synapse/lib/python3.6/site-packages/synapse/storage/database.py", line 201, in _do_execute return func(sql, *args) psycopg2.ProgrammingError: can't adapt type 'dict' [homeserver_1] 2020-03-04 17:46:42,941 - synapse.access.http.8118 - 302 - INFO - PUT-58- 174.3.196.16 - 8118 - {@travis:t2l.io} Processed request: 0.144sec/0.000sec (0.093sec, 0.000sec) (0.002sec/0.047sec/3) 67B 500 "PUT /_matrix/client/unstable/room_keys/keys?version=2 HTTP/1.0" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36" [0 dbevts]
psycopg2.ProgrammingError
async def on_PUT(self, request, user_id): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) target_user = UserID.from_string(user_id) body = parse_json_object_from_request(request) if not self.hs.is_mine(target_user): raise SynapseError(400, "This endpoint can only be used with local users") user = await self.admin_handler.get_user(target_user) user_id = target_user.to_string() if user: # modify user if "displayname" in body: await self.profile_handler.set_displayname( target_user, requester, body["displayname"], True ) if "threepids" in body: # check for required parameters for each threepid for threepid in body["threepids"]: assert_params_in_dict(threepid, ["medium", "address"]) # remove old threepids from user threepids = await self.store.user_get_threepids(user_id) for threepid in threepids: try: await self.auth_handler.delete_threepid( user_id, threepid["medium"], threepid["address"], None ) except Exception: logger.exception("Failed to remove threepids") raise SynapseError(500, "Failed to remove threepids") # add new threepids to user current_time = self.hs.get_clock().time_msec() for threepid in body["threepids"]: await self.auth_handler.add_threepid( user_id, threepid["medium"], threepid["address"], current_time ) if "avatar_url" in body: await self.profile_handler.set_avatar_url( target_user, requester, body["avatar_url"], True ) if "admin" in body: set_admin_to = bool(body["admin"]) if set_admin_to != user["admin"]: auth_user = requester.user if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") await self.store.set_server_admin(target_user, set_admin_to) if "password" in body: if ( not isinstance(body["password"], text_type) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") else: new_password = body["password"] await self.set_password_handler.set_password( target_user.to_string(), new_password, requester ) if "deactivated" in body: deactivate = body["deactivated"] if not isinstance(deactivate, bool): raise SynapseError( 400, "'deactivated' parameter is not of type boolean" ) if deactivate and not user["deactivated"]: await self.deactivate_account_handler.deactivate_account( target_user.to_string(), False ) user = await self.admin_handler.get_user(target_user) return 200, user else: # create user password = body.get("password") if password is not None and ( not isinstance(body["password"], text_type) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") admin = body.get("admin", None) user_type = body.get("user_type", None) displayname = body.get("displayname", None) threepids = body.get("threepids", None) if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: raise SynapseError(400, "Invalid user type") user_id = await self.registration_handler.register_user( localpart=target_user.localpart, password=password, admin=bool(admin), default_display_name=displayname, user_type=user_type, ) if "threepids" in body: # check for required parameters for each threepid for threepid in body["threepids"]: assert_params_in_dict(threepid, ["medium", "address"]) current_time = self.hs.get_clock().time_msec() for threepid in body["threepids"]: await self.auth_handler.add_threepid( user_id, threepid["medium"], threepid["address"], current_time ) if "avatar_url" in body: await self.profile_handler.set_avatar_url( user_id, requester, body["avatar_url"], True ) ret = await self.admin_handler.get_user(target_user) return 201, ret
async def on_PUT(self, request, user_id): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) target_user = UserID.from_string(user_id) body = parse_json_object_from_request(request) if not self.hs.is_mine(target_user): raise SynapseError(400, "This endpoint can only be used with local users") user = await self.admin_handler.get_user(target_user) user_id = target_user.to_string() if user: # modify user if "displayname" in body: await self.profile_handler.set_displayname( target_user, requester, body["displayname"], True ) if "threepids" in body: # check for required parameters for each threepid for threepid in body["threepids"]: assert_params_in_dict(threepid, ["medium", "address"]) # remove old threepids from user threepids = await self.store.user_get_threepids(user_id) for threepid in threepids: try: await self.auth_handler.delete_threepid( user_id, threepid["medium"], threepid["address"], None ) except Exception: logger.exception("Failed to remove threepids") raise SynapseError(500, "Failed to remove threepids") # add new threepids to user current_time = self.hs.get_clock().time_msec() for threepid in body["threepids"]: await self.auth_handler.add_threepid( user_id, threepid["medium"], threepid["address"], current_time ) if "avatar_url" in body: await self.profile_handler.set_avatar_url( target_user, requester, body["avatar_url"], True ) if "admin" in body: set_admin_to = bool(body["admin"]) if set_admin_to != user["admin"]: auth_user = requester.user if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") await self.admin_handler.set_user_server_admin( target_user, set_admin_to ) if "password" in body: if ( not isinstance(body["password"], text_type) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") else: new_password = body["password"] await self.set_password_handler.set_password( target_user.to_string(), new_password, requester ) if "deactivated" in body: deactivate = body["deactivated"] if not isinstance(deactivate, bool): raise SynapseError( 400, "'deactivated' parameter is not of type boolean" ) if deactivate and not user["deactivated"]: await self.deactivate_account_handler.deactivate_account( target_user.to_string(), False ) user = await self.admin_handler.get_user(target_user) return 200, user else: # create user password = body.get("password") if password is not None and ( not isinstance(body["password"], text_type) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") admin = body.get("admin", None) user_type = body.get("user_type", None) displayname = body.get("displayname", None) threepids = body.get("threepids", None) if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: raise SynapseError(400, "Invalid user type") user_id = await self.registration_handler.register_user( localpart=target_user.localpart, password=password, admin=bool(admin), default_display_name=displayname, user_type=user_type, ) if "threepids" in body: # check for required parameters for each threepid for threepid in body["threepids"]: assert_params_in_dict(threepid, ["medium", "address"]) current_time = self.hs.get_clock().time_msec() for threepid in body["threepids"]: await self.auth_handler.add_threepid( user_id, threepid["medium"], threepid["address"], current_time ) if "avatar_url" in body: await self.profile_handler.set_avatar_url( user_id, requester, body["avatar_url"], True ) ret = await self.admin_handler.get_user(target_user) return 201, ret
https://github.com/matrix-org/synapse/issues/6910
2020-02-13 19:15:53,449 - synapse.http.server - 110 - ERROR - PUT-31 - Failed handle request via 'UserRestServletV2': <SynapseRequest at 0x7f6a6035bfd0 method='PUT' uri='/_synapse/admin/v2/users/@mjolnir-dev:redacted' clientproto='HTTP/1.0' site=8008> Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: {'displayname': 'mjolnir-dev', 'avatar_url': None} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: {'displayname': 'mjolnir-dev', 'avatar_url': None} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: ProfileInfo(avatar_url=None, display_name='mjolnir-dev') During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/http/server.py", line 78, in wrapped_request_handler await h(self, request) File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/http/server.py", line 331, in _async_render callback_return = await callback_return File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/rest/admin/users.py", line 185, in on_PUT await self.admin_handler.set_user_server_admin( AttributeError: 'AdminHandler' object has no attribute 'set_user_server_admin'
AttributeError
async def on_PUT(self, request, user_id): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) auth_user = requester.user target_user = UserID.from_string(user_id) body = parse_json_object_from_request(request) assert_params_in_dict(body, ["admin"]) if not self.hs.is_mine(target_user): raise SynapseError(400, "Only local users can be admins of this homeserver") set_admin_to = bool(body["admin"]) if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") await self.store.set_server_admin(target_user, set_admin_to) return 200, {}
async def on_PUT(self, request, user_id): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) auth_user = requester.user target_user = UserID.from_string(user_id) body = parse_json_object_from_request(request) assert_params_in_dict(body, ["admin"]) if not self.hs.is_mine(target_user): raise SynapseError(400, "Only local users can be admins of this homeserver") set_admin_to = bool(body["admin"]) if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") await self.store.set_user_server_admin(target_user, set_admin_to) return 200, {}
https://github.com/matrix-org/synapse/issues/6910
2020-02-13 19:15:53,449 - synapse.http.server - 110 - ERROR - PUT-31 - Failed handle request via 'UserRestServletV2': <SynapseRequest at 0x7f6a6035bfd0 method='PUT' uri='/_synapse/admin/v2/users/@mjolnir-dev:redacted' clientproto='HTTP/1.0' site=8008> Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: {'displayname': 'mjolnir-dev', 'avatar_url': None} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: {'displayname': 'mjolnir-dev', 'avatar_url': None} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: ProfileInfo(avatar_url=None, display_name='mjolnir-dev') During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/http/server.py", line 78, in wrapped_request_handler await h(self, request) File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/http/server.py", line 331, in _async_render callback_return = await callback_return File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/rest/admin/users.py", line 185, in on_PUT await self.admin_handler.set_user_server_admin( AttributeError: 'AdminHandler' object has no attribute 'set_user_server_admin'
AttributeError
def set_server_admin(self, user, admin): """Sets whether a user is an admin of this homeserver. Args: user (UserID): user ID of the user to test admin (bool): true iff the user is to be a server admin, false otherwise. """ def set_server_admin_txn(txn): self.db.simple_update_one_txn( txn, "users", {"name": user.to_string()}, {"admin": 1 if admin else 0} ) self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user.to_string(),)) return self.db.runInteraction("set_server_admin", set_server_admin_txn)
def set_server_admin(self, user, admin): """Sets whether a user is an admin of this homeserver. Args: user (UserID): user ID of the user to test admin (bool): true iff the user is to be a server admin, false otherwise. """ return self.db.simple_update_one( table="users", keyvalues={"name": user.to_string()}, updatevalues={"admin": 1 if admin else 0}, desc="set_server_admin", )
https://github.com/matrix-org/synapse/issues/6910
2020-02-13 19:15:53,449 - synapse.http.server - 110 - ERROR - PUT-31 - Failed handle request via 'UserRestServletV2': <SynapseRequest at 0x7f6a6035bfd0 method='PUT' uri='/_synapse/admin/v2/users/@mjolnir-dev:redacted' clientproto='HTTP/1.0' site=8008> Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: {'displayname': 'mjolnir-dev', 'avatar_url': None} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: {'displayname': 'mjolnir-dev', 'avatar_url': None} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: ProfileInfo(avatar_url=None, display_name='mjolnir-dev') During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/http/server.py", line 78, in wrapped_request_handler await h(self, request) File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/http/server.py", line 331, in _async_render callback_return = await callback_return File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/rest/admin/users.py", line 185, in on_PUT await self.admin_handler.set_user_server_admin( AttributeError: 'AdminHandler' object has no attribute 'set_user_server_admin'
AttributeError
async def on_PUT(self, request, user_id): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) target_user = UserID.from_string(user_id) body = parse_json_object_from_request(request) if not self.hs.is_mine(target_user): raise SynapseError(400, "This endpoint can only be used with local users") user = await self.admin_handler.get_user(target_user) user_id = target_user.to_string() if user: # modify user if "displayname" in body: await self.profile_handler.set_displayname( target_user, requester, body["displayname"], True ) if "threepids" in body: # check for required parameters for each threepid for threepid in body["threepids"]: assert_params_in_dict(threepid, ["medium", "address"]) # remove old threepids from user threepids = await self.store.user_get_threepids(user_id) for threepid in threepids: try: await self.auth_handler.delete_threepid( user_id, threepid["medium"], threepid["address"], None ) except Exception: logger.exception("Failed to remove threepids") raise SynapseError(500, "Failed to remove threepids") # add new threepids to user current_time = self.hs.get_clock().time_msec() for threepid in body["threepids"]: await self.auth_handler.add_threepid( user_id, threepid["medium"], threepid["address"], current_time ) if "avatar_url" in body: await self.profile_handler.set_avatar_url( target_user, requester, body["avatar_url"], True ) if "admin" in body: set_admin_to = bool(body["admin"]) if set_admin_to != user["admin"]: auth_user = requester.user if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") await self.store.set_server_admin(target_user, set_admin_to) if "password" in body: if ( not isinstance(body["password"], text_type) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") else: new_password = body["password"] await self.set_password_handler.set_password( target_user.to_string(), new_password, requester ) if "deactivated" in body: deactivate = bool(body["deactivated"]) if deactivate and not user["deactivated"]: result = await self.deactivate_account_handler.deactivate_account( target_user.to_string(), False ) if not result: raise SynapseError(500, "Could not deactivate user") user = await self.admin_handler.get_user(target_user) return 200, user else: # create user password = body.get("password") if password is not None and ( not isinstance(body["password"], text_type) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") admin = body.get("admin", None) user_type = body.get("user_type", None) displayname = body.get("displayname", None) threepids = body.get("threepids", None) if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: raise SynapseError(400, "Invalid user type") user_id = await self.registration_handler.register_user( localpart=target_user.localpart, password=password, admin=bool(admin), default_display_name=displayname, user_type=user_type, ) if "threepids" in body: # check for required parameters for each threepid for threepid in body["threepids"]: assert_params_in_dict(threepid, ["medium", "address"]) current_time = self.hs.get_clock().time_msec() for threepid in body["threepids"]: await self.auth_handler.add_threepid( user_id, threepid["medium"], threepid["address"], current_time ) if "avatar_url" in body: await self.profile_handler.set_avatar_url( user_id, requester, body["avatar_url"], True ) ret = await self.admin_handler.get_user(target_user) return 201, ret
async def on_PUT(self, request, user_id): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) target_user = UserID.from_string(user_id) body = parse_json_object_from_request(request) if not self.hs.is_mine(target_user): raise SynapseError(400, "This endpoint can only be used with local users") user = await self.admin_handler.get_user(target_user) user_id = target_user.to_string() if user: # modify user if "displayname" in body: await self.profile_handler.set_displayname( target_user, requester, body["displayname"], True ) if "threepids" in body: # check for required parameters for each threepid for threepid in body["threepids"]: assert_params_in_dict(threepid, ["medium", "address"]) # remove old threepids from user threepids = await self.store.user_get_threepids(user_id) for threepid in threepids: try: await self.auth_handler.delete_threepid( user_id, threepid["medium"], threepid["address"], None ) except Exception: logger.exception("Failed to remove threepids") raise SynapseError(500, "Failed to remove threepids") # add new threepids to user current_time = self.hs.get_clock().time_msec() for threepid in body["threepids"]: await self.auth_handler.add_threepid( user_id, threepid["medium"], threepid["address"], current_time ) if "avatar_url" in body: await self.profile_handler.set_avatar_url( target_user, requester, body["avatar_url"], True ) if "admin" in body: set_admin_to = bool(body["admin"]) if set_admin_to != user["admin"]: auth_user = requester.user if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") await self.admin_handler.set_user_server_admin( target_user, set_admin_to ) if "password" in body: if ( not isinstance(body["password"], text_type) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") else: new_password = body["password"] await self.set_password_handler.set_password( target_user.to_string(), new_password, requester ) if "deactivated" in body: deactivate = bool(body["deactivated"]) if deactivate and not user["deactivated"]: result = await self.deactivate_account_handler.deactivate_account( target_user.to_string(), False ) if not result: raise SynapseError(500, "Could not deactivate user") user = await self.admin_handler.get_user(target_user) return 200, user else: # create user password = body.get("password") if password is not None and ( not isinstance(body["password"], text_type) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") admin = body.get("admin", None) user_type = body.get("user_type", None) displayname = body.get("displayname", None) threepids = body.get("threepids", None) if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: raise SynapseError(400, "Invalid user type") user_id = await self.registration_handler.register_user( localpart=target_user.localpart, password=password, admin=bool(admin), default_display_name=displayname, user_type=user_type, ) if "threepids" in body: # check for required parameters for each threepid for threepid in body["threepids"]: assert_params_in_dict(threepid, ["medium", "address"]) current_time = self.hs.get_clock().time_msec() for threepid in body["threepids"]: await self.auth_handler.add_threepid( user_id, threepid["medium"], threepid["address"], current_time ) if "avatar_url" in body: await self.profile_handler.set_avatar_url( user_id, requester, body["avatar_url"], True ) ret = await self.admin_handler.get_user(target_user) return 201, ret
https://github.com/matrix-org/synapse/issues/6910
2020-02-13 19:15:53,449 - synapse.http.server - 110 - ERROR - PUT-31 - Failed handle request via 'UserRestServletV2': <SynapseRequest at 0x7f6a6035bfd0 method='PUT' uri='/_synapse/admin/v2/users/@mjolnir-dev:redacted' clientproto='HTTP/1.0' site=8008> Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: {'displayname': 'mjolnir-dev', 'avatar_url': None} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: {'displayname': 'mjolnir-dev', 'avatar_url': None} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) StopIteration: ProfileInfo(avatar_url=None, display_name='mjolnir-dev') During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/http/server.py", line 78, in wrapped_request_handler await h(self, request) File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/http/server.py", line 331, in _async_render callback_return = await callback_return File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/rest/admin/users.py", line 185, in on_PUT await self.admin_handler.set_user_server_admin( AttributeError: 'AdminHandler' object has no attribute 'set_user_server_admin'
AttributeError
def _check_sigs_and_hash_and_fetch( self, origin: str, pdus: List[EventBase], room_version: str, outlier: bool = False, include_none: bool = False, ): """Takes a list of PDUs and checks the signatures and hashs of each one. If a PDU fails its signature check then we check if we have it in the database and if not then request if from the originating server of that PDU. If a PDU fails its content hash check then it is redacted. The given list of PDUs are not modified, instead the function returns a new list. Args: origin pdu room_version outlier: Whether the events are outliers or not include_none: Whether to include None in the returned list for events that have failed their checks Returns: Deferred : A list of PDUs that have valid signatures and hashes. """ deferreds = self._check_sigs_and_hashes(room_version, pdus) @defer.inlineCallbacks def handle_check_result(pdu: EventBase, deferred: Deferred): try: res = yield make_deferred_yieldable(deferred) except SynapseError: res = None if not res: # Check local db. res = yield self.store.get_event( pdu.event_id, allow_rejected=True, allow_none=True ) if not res and pdu.origin != origin: try: # This should not exist in the base implementation, until # this is fixed, ignore it for typing. See issue #6997. res = yield defer.ensureDeferred( self.get_pdu( # type: ignore destinations=[pdu.origin], event_id=pdu.event_id, room_version=room_version, outlier=outlier, timeout=10000, ) ) except SynapseError: pass if not res: logger.warning( "Failed to find copy of %s with valid signature", pdu.event_id ) return res handle = preserve_fn(handle_check_result) deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] valid_pdus = yield make_deferred_yieldable( defer.gatherResults(deferreds2, consumeErrors=True) ).addErrback(unwrapFirstError) if include_none: return valid_pdus else: return [p for p in valid_pdus if p]
def _check_sigs_and_hash_and_fetch( self, origin, pdus, room_version, outlier=False, include_none=False ): """Takes a list of PDUs and checks the signatures and hashs of each one. If a PDU fails its signature check then we check if we have it in the database and if not then request if from the originating server of that PDU. If a PDU fails its content hash check then it is redacted. The given list of PDUs are not modified, instead the function returns a new list. Args: origin (str) pdu (list) room_version (str) outlier (bool): Whether the events are outliers or not include_none (str): Whether to include None in the returned list for events that have failed their checks Returns: Deferred : A list of PDUs that have valid signatures and hashes. """ deferreds = self._check_sigs_and_hashes(room_version, pdus) @defer.inlineCallbacks def handle_check_result(pdu, deferred): try: res = yield make_deferred_yieldable(deferred) except SynapseError: res = None if not res: # Check local db. res = yield self.store.get_event( pdu.event_id, allow_rejected=True, allow_none=True ) if not res and pdu.origin != origin: try: res = yield defer.ensureDeferred( self.get_pdu( destinations=[pdu.origin], event_id=pdu.event_id, room_version=room_version, outlier=outlier, timeout=10000, ) ) except SynapseError: pass if not res: logger.warning( "Failed to find copy of %s with valid signature", pdu.event_id ) return res handle = preserve_fn(handle_check_result) deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] valid_pdus = yield make_deferred_yieldable( defer.gatherResults(deferreds2, consumeErrors=True) ).addErrback(unwrapFirstError) if include_none: return valid_pdus else: return [p for p in valid_pdus if p]
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
def handle_check_result(pdu: EventBase, deferred: Deferred): try: res = yield make_deferred_yieldable(deferred) except SynapseError: res = None if not res: # Check local db. res = yield self.store.get_event( pdu.event_id, allow_rejected=True, allow_none=True ) if not res and pdu.origin != origin: try: # This should not exist in the base implementation, until # this is fixed, ignore it for typing. See issue #6997. res = yield defer.ensureDeferred( self.get_pdu( # type: ignore destinations=[pdu.origin], event_id=pdu.event_id, room_version=room_version, outlier=outlier, timeout=10000, ) ) except SynapseError: pass if not res: logger.warning("Failed to find copy of %s with valid signature", pdu.event_id) return res
def handle_check_result(pdu, deferred): try: res = yield make_deferred_yieldable(deferred) except SynapseError: res = None if not res: # Check local db. res = yield self.store.get_event( pdu.event_id, allow_rejected=True, allow_none=True ) if not res and pdu.origin != origin: try: res = yield defer.ensureDeferred( self.get_pdu( destinations=[pdu.origin], event_id=pdu.event_id, room_version=room_version, outlier=outlier, timeout=10000, ) ) except SynapseError: pass if not res: logger.warning("Failed to find copy of %s with valid signature", pdu.event_id) return res
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
def _check_sigs_and_hash(self, room_version: str, pdu: EventBase) -> Deferred: return make_deferred_yieldable(self._check_sigs_and_hashes(room_version, [pdu])[0])
def _check_sigs_and_hash(self, room_version, pdu): return make_deferred_yieldable(self._check_sigs_and_hashes(room_version, [pdu])[0])
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
def _check_sigs_and_hashes( self, room_version: str, pdus: List[EventBase] ) -> List[Deferred]: """Checks that each of the received events is correctly signed by the sending server. Args: room_version: The room version of the PDUs pdus: the events to be checked Returns: For each input event, a deferred which: * returns the original event if the checks pass * returns a redacted version of the event (if the signature matched but the hash did not) * throws a SynapseError if the signature check failed. The deferreds run their callbacks in the sentinel """ deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus) ctx = LoggingContext.current_context() def callback(_, pdu: EventBase): with PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): # let's try to distinguish between failures because the event was # redacted (which are somewhat expected) vs actual ball-tampering # incidents. # # This is just a heuristic, so we just assume that if the keys are # about the same between the redacted and received events, then the # received event was probably a redacted copy (but we then use our # *actual* redacted copy to be on the safe side.) redacted_event = prune_event(pdu) if set(redacted_event.keys()) == set(pdu.keys()) and set( six.iterkeys(redacted_event.content) ) == set(six.iterkeys(pdu.content)): logger.info( "Event %s seems to have been redacted; using our redacted copy", pdu.event_id, ) else: logger.warning( "Event %s content has been tampered, redacting", pdu.event_id, ) return redacted_event if self.spam_checker.check_event_for_spam(pdu): logger.warning( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json(), ) return prune_event(pdu) return pdu def errback(failure: Failure, pdu: EventBase): failure.trap(SynapseError) with PreserveLoggingContext(ctx): logger.warning( "Signature check failed for %s: %s", pdu.event_id, failure.getErrorMessage(), ) return failure for deferred, pdu in zip(deferreds, pdus): deferred.addCallbacks(callback, errback, callbackArgs=[pdu], errbackArgs=[pdu]) return deferreds
def _check_sigs_and_hashes(self, room_version, pdus): """Checks that each of the received events is correctly signed by the sending server. Args: room_version (str): The room version of the PDUs pdus (list[FrozenEvent]): the events to be checked Returns: list[Deferred]: for each input event, a deferred which: * returns the original event if the checks pass * returns a redacted version of the event (if the signature matched but the hash did not) * throws a SynapseError if the signature check failed. The deferreds run their callbacks in the sentinel """ deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus) ctx = LoggingContext.current_context() def callback(_, pdu): with PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): # let's try to distinguish between failures because the event was # redacted (which are somewhat expected) vs actual ball-tampering # incidents. # # This is just a heuristic, so we just assume that if the keys are # about the same between the redacted and received events, then the # received event was probably a redacted copy (but we then use our # *actual* redacted copy to be on the safe side.) redacted_event = prune_event(pdu) if set(redacted_event.keys()) == set(pdu.keys()) and set( six.iterkeys(redacted_event.content) ) == set(six.iterkeys(pdu.content)): logger.info( "Event %s seems to have been redacted; using our redacted copy", pdu.event_id, ) else: logger.warning( "Event %s content has been tampered, redacting", pdu.event_id, ) return redacted_event if self.spam_checker.check_event_for_spam(pdu): logger.warning( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json(), ) return prune_event(pdu) return pdu def errback(failure, pdu): failure.trap(SynapseError) with PreserveLoggingContext(ctx): logger.warning( "Signature check failed for %s: %s", pdu.event_id, failure.getErrorMessage(), ) return failure for deferred, pdu in zip(deferreds, pdus): deferred.addCallbacks(callback, errback, callbackArgs=[pdu], errbackArgs=[pdu]) return deferreds
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
def callback(_, pdu: EventBase): with PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): # let's try to distinguish between failures because the event was # redacted (which are somewhat expected) vs actual ball-tampering # incidents. # # This is just a heuristic, so we just assume that if the keys are # about the same between the redacted and received events, then the # received event was probably a redacted copy (but we then use our # *actual* redacted copy to be on the safe side.) redacted_event = prune_event(pdu) if set(redacted_event.keys()) == set(pdu.keys()) and set( six.iterkeys(redacted_event.content) ) == set(six.iterkeys(pdu.content)): logger.info( "Event %s seems to have been redacted; using our redacted copy", pdu.event_id, ) else: logger.warning( "Event %s content has been tampered, redacting", pdu.event_id, ) return redacted_event if self.spam_checker.check_event_for_spam(pdu): logger.warning( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json(), ) return prune_event(pdu) return pdu
def callback(_, pdu): with PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): # let's try to distinguish between failures because the event was # redacted (which are somewhat expected) vs actual ball-tampering # incidents. # # This is just a heuristic, so we just assume that if the keys are # about the same between the redacted and received events, then the # received event was probably a redacted copy (but we then use our # *actual* redacted copy to be on the safe side.) redacted_event = prune_event(pdu) if set(redacted_event.keys()) == set(pdu.keys()) and set( six.iterkeys(redacted_event.content) ) == set(six.iterkeys(pdu.content)): logger.info( "Event %s seems to have been redacted; using our redacted copy", pdu.event_id, ) else: logger.warning( "Event %s content has been tampered, redacting", pdu.event_id, ) return redacted_event if self.spam_checker.check_event_for_spam(pdu): logger.warning( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json(), ) return prune_event(pdu) return pdu
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
def errback(failure: Failure, pdu: EventBase): failure.trap(SynapseError) with PreserveLoggingContext(ctx): logger.warning( "Signature check failed for %s: %s", pdu.event_id, failure.getErrorMessage(), ) return failure
def errback(failure, pdu): failure.trap(SynapseError) with PreserveLoggingContext(ctx): logger.warning( "Signature check failed for %s: %s", pdu.event_id, failure.getErrorMessage(), ) return failure
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
def _check_sigs_on_pdus( keyring: Keyring, room_version: str, pdus: Iterable[EventBase] ) -> List[Deferred]: """Check that the given events are correctly signed Args: keyring: keyring object to do the checks room_version: the room version of the PDUs pdus: the events to be checked Returns: A Deferred for each event in pdus, which will either succeed if the signatures are valid, or fail (with a SynapseError) if not. """ # we want to check that the event is signed by: # # (a) the sender's server # # - except in the case of invites created from a 3pid invite, which are exempt # from this check, because the sender has to match that of the original 3pid # invite, but the event may come from a different HS, for reasons that I don't # entirely grok (why do the senders have to match? and if they do, why doesn't the # joining server ask the inviting server to do the switcheroo with # exchange_third_party_invite?). # # That's pretty awful, since redacting such an invite will render it invalid # (because it will then look like a regular invite without a valid signature), # and signatures are *supposed* to be valid whether or not an event has been # redacted. But this isn't the worst of the ways that 3pid invites are broken. # # (b) for V1 and V2 rooms, the server which created the event_id # # let's start by getting the domain for each pdu, and flattening the event back # to JSON. pdus_to_check = [ PduToCheckSig( pdu=p, redacted_pdu_json=prune_event(p).get_pdu_json(), sender_domain=get_domain_from_id(p.sender), deferreds=[], ) for p in pdus ] v = KNOWN_ROOM_VERSIONS.get(room_version) if not v: raise RuntimeError("Unrecognized room version %s" % (room_version,)) # First we check that the sender event is signed by the sender's domain # (except if its a 3pid invite, in which case it may be sent by any server) pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)] more_deferreds = keyring.verify_json_objects_for_server( [ ( p.sender_domain, p.redacted_pdu_json, p.pdu.origin_server_ts if v.enforce_key_validity else 0, p.pdu.event_id, ) for p in pdus_to_check_sender ] ) def sender_err(e, pdu_to_check): errmsg = "event id %s: unable to verify signature for sender %s: %s" % ( pdu_to_check.pdu.event_id, pdu_to_check.sender_domain, e.getErrorMessage(), ) raise SynapseError(403, errmsg, Codes.FORBIDDEN) for p, d in zip(pdus_to_check_sender, more_deferreds): d.addErrback(sender_err, p) p.deferreds.append(d) # now let's look for events where the sender's domain is different to the # event id's domain (normally only the case for joins/leaves), and add additional # checks. Only do this if the room version has a concept of event ID domain # (ie, the room version uses old-style non-hash event IDs). if v.event_format == EventFormatVersions.V1: pdus_to_check_event_id = [ p for p in pdus_to_check if p.sender_domain != get_domain_from_id(p.pdu.event_id) ] more_deferreds = keyring.verify_json_objects_for_server( [ ( get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, p.pdu.origin_server_ts if v.enforce_key_validity else 0, p.pdu.event_id, ) for p in pdus_to_check_event_id ] ) def event_err(e, pdu_to_check): errmsg = ( "event id %s: unable to verify signature for event id domain: %s" % (pdu_to_check.pdu.event_id, e.getErrorMessage()) ) raise SynapseError(403, errmsg, Codes.FORBIDDEN) for p, d in zip(pdus_to_check_event_id, more_deferreds): d.addErrback(event_err, p) p.deferreds.append(d) # replace lists of deferreds with single Deferreds return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
def _check_sigs_on_pdus(keyring, room_version, pdus): """Check that the given events are correctly signed Args: keyring (synapse.crypto.Keyring): keyring object to do the checks room_version (str): the room version of the PDUs pdus (Collection[EventBase]): the events to be checked Returns: List[Deferred]: a Deferred for each event in pdus, which will either succeed if the signatures are valid, or fail (with a SynapseError) if not. """ # we want to check that the event is signed by: # # (a) the sender's server # # - except in the case of invites created from a 3pid invite, which are exempt # from this check, because the sender has to match that of the original 3pid # invite, but the event may come from a different HS, for reasons that I don't # entirely grok (why do the senders have to match? and if they do, why doesn't the # joining server ask the inviting server to do the switcheroo with # exchange_third_party_invite?). # # That's pretty awful, since redacting such an invite will render it invalid # (because it will then look like a regular invite without a valid signature), # and signatures are *supposed* to be valid whether or not an event has been # redacted. But this isn't the worst of the ways that 3pid invites are broken. # # (b) for V1 and V2 rooms, the server which created the event_id # # let's start by getting the domain for each pdu, and flattening the event back # to JSON. pdus_to_check = [ PduToCheckSig( pdu=p, redacted_pdu_json=prune_event(p).get_pdu_json(), sender_domain=get_domain_from_id(p.sender), deferreds=[], ) for p in pdus ] v = KNOWN_ROOM_VERSIONS.get(room_version) if not v: raise RuntimeError("Unrecognized room version %s" % (room_version,)) # First we check that the sender event is signed by the sender's domain # (except if its a 3pid invite, in which case it may be sent by any server) pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)] more_deferreds = keyring.verify_json_objects_for_server( [ ( p.sender_domain, p.redacted_pdu_json, p.pdu.origin_server_ts if v.enforce_key_validity else 0, p.pdu.event_id, ) for p in pdus_to_check_sender ] ) def sender_err(e, pdu_to_check): errmsg = "event id %s: unable to verify signature for sender %s: %s" % ( pdu_to_check.pdu.event_id, pdu_to_check.sender_domain, e.getErrorMessage(), ) raise SynapseError(403, errmsg, Codes.FORBIDDEN) for p, d in zip(pdus_to_check_sender, more_deferreds): d.addErrback(sender_err, p) p.deferreds.append(d) # now let's look for events where the sender's domain is different to the # event id's domain (normally only the case for joins/leaves), and add additional # checks. Only do this if the room version has a concept of event ID domain # (ie, the room version uses old-style non-hash event IDs). if v.event_format == EventFormatVersions.V1: pdus_to_check_event_id = [ p for p in pdus_to_check if p.sender_domain != get_domain_from_id(p.pdu.event_id) ] more_deferreds = keyring.verify_json_objects_for_server( [ ( get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, p.pdu.origin_server_ts if v.enforce_key_validity else 0, p.pdu.event_id, ) for p in pdus_to_check_event_id ] ) def event_err(e, pdu_to_check): errmsg = ( "event id %s: unable to verify signature for event id domain: %s" % (pdu_to_check.pdu.event_id, e.getErrorMessage()) ) raise SynapseError(403, errmsg, Codes.FORBIDDEN) for p, d in zip(pdus_to_check_event_id, more_deferreds): d.addErrback(event_err, p) p.deferreds.append(d) # replace lists of deferreds with single Deferreds return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
def _flatten_deferred_list(deferreds: List[Deferred]) -> Deferred: """Given a list of deferreds, either return the single deferred, combine into a DeferredList, or return an already resolved deferred. """ if len(deferreds) > 1: return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True) elif len(deferreds) == 1: return deferreds[0] else: return defer.succeed(None)
def _flatten_deferred_list(deferreds): """Given a list of deferreds, either return the single deferred, combine into a DeferredList, or return an already resolved deferred. """ if len(deferreds) > 1: return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True) elif len(deferreds) == 1: return deferreds[0] else: return defer.succeed(None)
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
def _is_invite_via_3pid(event: EventBase) -> bool: return ( event.type == EventTypes.Member and event.membership == Membership.INVITE and "third_party_invite" in event.content )
def _is_invite_via_3pid(event): return ( event.type == EventTypes.Member and event.membership == Membership.INVITE and "third_party_invite" in event.content )
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
async def backfill( self, dest: str, room_id: str, limit: int, extremities: Iterable[str] ) -> Optional[List[EventBase]]: """Requests some more historic PDUs for the given room from the given destination server. Args: dest (str): The remote homeserver to ask. room_id (str): The room_id to backfill. limit (int): The maximum number of events to return. extremities (list): our current backwards extremities, to backfill from """ logger.debug("backfill extrem=%s", extremities) # If there are no extremities then we've (probably) reached the start. if not extremities: return None transaction_data = await self.transport_layer.backfill( dest, room_id, extremities, limit ) logger.debug("backfill transaction_data=%r", transaction_data) room_version = await self.store.get_room_version(room_id) pdus = [ event_from_pdu_json(p, room_version, outlier=False) for p in transaction_data["pdus"] ] # FIXME: We should handle signature failures more gracefully. pdus[:] = await make_deferred_yieldable( defer.gatherResults( self._check_sigs_and_hashes(room_version.identifier, pdus), consumeErrors=True, ).addErrback(unwrapFirstError) ) return pdus
async def backfill( self, dest: str, room_id: str, limit: int, extremities: Iterable[str] ) -> List[EventBase]: """Requests some more historic PDUs for the given room from the given destination server. Args: dest (str): The remote homeserver to ask. room_id (str): The room_id to backfill. limit (int): The maximum number of events to return. extremities (list): our current backwards extremities, to backfill from """ logger.debug("backfill extrem=%s", extremities) # If there are no extremeties then we've (probably) reached the start. if not extremities: return transaction_data = await self.transport_layer.backfill( dest, room_id, extremities, limit ) logger.debug("backfill transaction_data=%r", transaction_data) room_version = await self.store.get_room_version(room_id) pdus = [ event_from_pdu_json(p, room_version, outlier=False) for p in transaction_data["pdus"] ] # FIXME: We should handle signature failures more gracefully. pdus[:] = await make_deferred_yieldable( defer.gatherResults( self._check_sigs_and_hashes(room_version.identifier, pdus), consumeErrors=True, ).addErrback(unwrapFirstError) ) return pdus
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
async def get_pdu( self, destinations: Iterable[str], event_id: str, room_version: RoomVersion, outlier: bool = False, timeout: Optional[int] = None, ) -> Optional[EventBase]: """Requests the PDU with given origin and ID from the remote home servers. Will attempt to get the PDU from each destination in the list until one succeeds. Args: destinations: Which homeservers to query event_id: event to fetch room_version: version of the room outlier: Indicates whether the PDU is an `outlier`, i.e. if it's from an arbitary point in the context as opposed to part of the current block of PDUs. Defaults to `False` timeout: How long to try (in ms) each destination for before moving to the next destination. None indicates no timeout. Returns: The requested PDU, or None if we were unable to find it. """ # TODO: Rate limit the number of times we try and get the same event. ev = self._get_pdu_cache.get(event_id) if ev: return ev pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) signed_pdu = None for destination in destinations: now = self._clock.time_msec() last_attempt = pdu_attempts.get(destination, 0) if last_attempt + PDU_RETRY_TIME_MS > now: continue try: transaction_data = await self.transport_layer.get_event( destination, event_id, timeout=timeout ) logger.debug( "retrieved event id %s from %s: %r", event_id, destination, transaction_data, ) pdu_list = [ event_from_pdu_json(p, room_version, outlier=outlier) for p in transaction_data["pdus"] ] # type: List[EventBase] if pdu_list and pdu_list[0]: pdu = pdu_list[0] # Check signatures are correct. signed_pdu = await self._check_sigs_and_hash( room_version.identifier, pdu ) break pdu_attempts[destination] = now except SynapseError as e: logger.info( "Failed to get PDU %s from %s because %s", event_id, destination, e ) continue except NotRetryingDestination as e: logger.info(str(e)) continue except FederationDeniedError as e: logger.info(str(e)) continue except Exception as e: pdu_attempts[destination] = now logger.info( "Failed to get PDU %s from %s because %s", event_id, destination, e ) continue if signed_pdu: self._get_pdu_cache[event_id] = signed_pdu return signed_pdu
async def get_pdu( self, destinations: Iterable[str], event_id: str, room_version: RoomVersion, outlier: bool = False, timeout: Optional[int] = None, ) -> Optional[EventBase]: """Requests the PDU with given origin and ID from the remote home servers. Will attempt to get the PDU from each destination in the list until one succeeds. Args: destinations: Which homeservers to query event_id: event to fetch room_version: version of the room outlier: Indicates whether the PDU is an `outlier`, i.e. if it's from an arbitary point in the context as opposed to part of the current block of PDUs. Defaults to `False` timeout: How long to try (in ms) each destination for before moving to the next destination. None indicates no timeout. Returns: The requested PDU, or None if we were unable to find it. """ # TODO: Rate limit the number of times we try and get the same event. ev = self._get_pdu_cache.get(event_id) if ev: return ev pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) signed_pdu = None for destination in destinations: now = self._clock.time_msec() last_attempt = pdu_attempts.get(destination, 0) if last_attempt + PDU_RETRY_TIME_MS > now: continue try: transaction_data = await self.transport_layer.get_event( destination, event_id, timeout=timeout ) logger.debug( "retrieved event id %s from %s: %r", event_id, destination, transaction_data, ) pdu_list = [ event_from_pdu_json(p, room_version, outlier=outlier) for p in transaction_data["pdus"] ] if pdu_list and pdu_list[0]: pdu = pdu_list[0] # Check signatures are correct. signed_pdu = await self._check_sigs_and_hash( room_version.identifier, pdu ) break pdu_attempts[destination] = now except SynapseError as e: logger.info( "Failed to get PDU %s from %s because %s", event_id, destination, e ) continue except NotRetryingDestination as e: logger.info(str(e)) continue except FederationDeniedError as e: logger.info(str(e)) continue except Exception as e: pdu_attempts[destination] = now logger.info( "Failed to get PDU %s from %s because %s", event_id, destination, e ) continue if signed_pdu: self._get_pdu_cache[event_id] = signed_pdu return signed_pdu
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
async def send_join( self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion ) -> Dict[str, Any]: """Sends a join event to one of a list of homeservers. Doing so will cause the remote server to add the event to the graph, and send the event out to the rest of the federation. Args: destinations: Candidate homeservers which are probably participating in the room. pdu: event to be sent room_version: the version of the room (according to the server that did the make_join) Returns: a dict with members ``origin`` (a string giving the server the event was sent to, ``state`` (?) and ``auth_chain``. Raises: SynapseError: if the chosen remote server returns a 300/400 code. RuntimeError: if no servers were reachable. """ async def send_request(destination) -> Dict[str, Any]: content = await self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) state = [ event_from_pdu_json(p, room_version, outlier=True) for p in content.get("state", []) ] auth_chain = [ event_from_pdu_json(p, room_version, outlier=True) for p in content.get("auth_chain", []) ] pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)} create_event = None for e in state: if (e.type, e.state_key) == (EventTypes.Create, ""): create_event = e break if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") # the room version should be sane. create_room_version = create_event.content.get( "room_version", RoomVersions.V1.identifier ) if create_room_version != room_version.identifier: # either the server that fulfilled the make_join, or the server that is # handling the send_join, is lying. raise InvalidResponseError( "Unexpected room version %s in create event" % (create_room_version,) ) valid_pdus = await self._check_sigs_and_hash_and_fetch( destination, list(pdus.values()), outlier=True, room_version=room_version.identifier, ) valid_pdus_map = {p.event_id: p for p in valid_pdus} # NB: We *need* to copy to ensure that we don't have multiple # references being passed on, as that causes... issues. signed_state = [ copy.copy(valid_pdus_map[p.event_id]) for p in state if p.event_id in valid_pdus_map ] signed_auth = [ valid_pdus_map[p.event_id] for p in auth_chain if p.event_id in valid_pdus_map ] # NB: We *need* to copy to ensure that we don't have multiple # references being passed on, as that causes... issues. for s in signed_state: s.internal_metadata = copy.deepcopy(s.internal_metadata) # double-check that the same create event has ended up in the auth chain auth_chain_create_events = [ e.event_id for e in signed_auth if (e.type, e.state_key) == (EventTypes.Create, "") ] if auth_chain_create_events != [create_event.event_id]: raise InvalidResponseError( "Unexpected create event(s) in auth chain: %s" % (auth_chain_create_events,) ) return { "state": signed_state, "auth_chain": signed_auth, "origin": destination, } return await self._try_destination_list("send_join", destinations, send_request)
async def send_join( self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion ) -> Dict[str, Any]: """Sends a join event to one of a list of homeservers. Doing so will cause the remote server to add the event to the graph, and send the event out to the rest of the federation. Args: destinations: Candidate homeservers which are probably participating in the room. pdu: event to be sent room_version: the version of the room (according to the server that did the make_join) Returns: a dict with members ``origin`` (a string giving the server the event was sent to, ``state`` (?) and ``auth_chain``. Raises: SynapseError: if the chosen remote server returns a 300/400 code. RuntimeError: if no servers were reachable. """ async def send_request(destination) -> Dict[str, Any]: content = await self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) state = [ event_from_pdu_json(p, room_version, outlier=True) for p in content.get("state", []) ] auth_chain = [ event_from_pdu_json(p, room_version, outlier=True) for p in content.get("auth_chain", []) ] pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)} create_event = None for e in state: if (e.type, e.state_key) == (EventTypes.Create, ""): create_event = e break if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") # the room version should be sane. create_room_version = create_event.content.get( "room_version", RoomVersions.V1.identifier ) if create_room_version != room_version.identifier: # either the server that fulfilled the make_join, or the server that is # handling the send_join, is lying. raise InvalidResponseError( "Unexpected room version %s in create event" % (create_room_version,) ) valid_pdus = await self._check_sigs_and_hash_and_fetch( destination, list(pdus.values()), outlier=True, room_version=room_version.identifier, ) valid_pdus_map = {p.event_id: p for p in valid_pdus} # NB: We *need* to copy to ensure that we don't have multiple # references being passed on, as that causes... issues. signed_state = [ copy.copy(valid_pdus_map[p.event_id]) for p in state if p.event_id in valid_pdus_map ] signed_auth = [ valid_pdus_map[p.event_id] for p in auth_chain if p.event_id in valid_pdus_map ] # NB: We *need* to copy to ensure that we don't have multiple # references being passed on, as that causes... issues. for s in signed_state: s.internal_metadata = copy.deepcopy(s.internal_metadata) # double-check that the same create event has ended up in the auth chain auth_chain_create_events = [ e.event_id for e in signed_auth if (e.type, e.state_key) == (EventTypes.Create, "") ] if auth_chain_create_events != [create_event.event_id]: raise InvalidResponseError( "Unexpected create event(s) in auth chain" % (auth_chain_create_events,) ) return { "state": signed_state, "auth_chain": signed_auth, "origin": destination, } return await self._try_destination_list("send_join", destinations, send_request)
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
async def send_request(destination) -> Dict[str, Any]: content = await self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) state = [ event_from_pdu_json(p, room_version, outlier=True) for p in content.get("state", []) ] auth_chain = [ event_from_pdu_json(p, room_version, outlier=True) for p in content.get("auth_chain", []) ] pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)} create_event = None for e in state: if (e.type, e.state_key) == (EventTypes.Create, ""): create_event = e break if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") # the room version should be sane. create_room_version = create_event.content.get( "room_version", RoomVersions.V1.identifier ) if create_room_version != room_version.identifier: # either the server that fulfilled the make_join, or the server that is # handling the send_join, is lying. raise InvalidResponseError( "Unexpected room version %s in create event" % (create_room_version,) ) valid_pdus = await self._check_sigs_and_hash_and_fetch( destination, list(pdus.values()), outlier=True, room_version=room_version.identifier, ) valid_pdus_map = {p.event_id: p for p in valid_pdus} # NB: We *need* to copy to ensure that we don't have multiple # references being passed on, as that causes... issues. signed_state = [ copy.copy(valid_pdus_map[p.event_id]) for p in state if p.event_id in valid_pdus_map ] signed_auth = [ valid_pdus_map[p.event_id] for p in auth_chain if p.event_id in valid_pdus_map ] # NB: We *need* to copy to ensure that we don't have multiple # references being passed on, as that causes... issues. for s in signed_state: s.internal_metadata = copy.deepcopy(s.internal_metadata) # double-check that the same create event has ended up in the auth chain auth_chain_create_events = [ e.event_id for e in signed_auth if (e.type, e.state_key) == (EventTypes.Create, "") ] if auth_chain_create_events != [create_event.event_id]: raise InvalidResponseError( "Unexpected create event(s) in auth chain: %s" % (auth_chain_create_events,) ) return { "state": signed_state, "auth_chain": signed_auth, "origin": destination, }
async def send_request(destination) -> Dict[str, Any]: content = await self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) state = [ event_from_pdu_json(p, room_version, outlier=True) for p in content.get("state", []) ] auth_chain = [ event_from_pdu_json(p, room_version, outlier=True) for p in content.get("auth_chain", []) ] pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)} create_event = None for e in state: if (e.type, e.state_key) == (EventTypes.Create, ""): create_event = e break if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") # the room version should be sane. create_room_version = create_event.content.get( "room_version", RoomVersions.V1.identifier ) if create_room_version != room_version.identifier: # either the server that fulfilled the make_join, or the server that is # handling the send_join, is lying. raise InvalidResponseError( "Unexpected room version %s in create event" % (create_room_version,) ) valid_pdus = await self._check_sigs_and_hash_and_fetch( destination, list(pdus.values()), outlier=True, room_version=room_version.identifier, ) valid_pdus_map = {p.event_id: p for p in valid_pdus} # NB: We *need* to copy to ensure that we don't have multiple # references being passed on, as that causes... issues. signed_state = [ copy.copy(valid_pdus_map[p.event_id]) for p in state if p.event_id in valid_pdus_map ] signed_auth = [ valid_pdus_map[p.event_id] for p in auth_chain if p.event_id in valid_pdus_map ] # NB: We *need* to copy to ensure that we don't have multiple # references being passed on, as that causes... issues. for s in signed_state: s.internal_metadata = copy.deepcopy(s.internal_metadata) # double-check that the same create event has ended up in the auth chain auth_chain_create_events = [ e.event_id for e in signed_auth if (e.type, e.state_key) == (EventTypes.Create, "") ] if auth_chain_create_events != [create_event.event_id]: raise InvalidResponseError( "Unexpected create event(s) in auth chain" % (auth_chain_create_events,) ) return { "state": signed_state, "auth_chain": signed_auth, "origin": destination, }
https://github.com/matrix-org/synapse/issues/6978
2020-02-24 14:00:10,088 - synapse.federation.federation_client - 421 - WARNING - POST-1957- Failed to send_join via matrix.org ... Traceback (most recent call last): File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 402, in _try_destination_list res = await callback(destination) File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in send_request valid_pdus_map = {p.event_id: p for p in valid_pdus} File "/home/synapse/matrixtest/synapse/federation/federation_client.py", line 589, in <dictcomp> valid_pdus_map = {p.event_id: p for p in valid_pdus} AttributeError: 'coroutine' object has no attribute 'event_id' 2020-02-24 14:00:10,154 - synapse.http.server - 81 - INFO - POST-1957- <SynapseRequest at 0x7ff1bf3efd30 method='POST' uri='/_matrix/client/r0/join/%23synapse%3Amatrix.org' clientproto='HTTP/1.1' site=8447> SynapseError: 502 - Failed to send_join via any server
AttributeError
def read_config(self, config: dict, config_dir_path: str, **kwargs): acme_config = config.get("acme", None) if acme_config is None: acme_config = {} self.acme_enabled = acme_config.get("enabled", False) # hyperlink complains on py2 if this is not a Unicode self.acme_url = six.text_type( acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory") ) self.acme_port = acme_config.get("port", 80) self.acme_bind_addresses = acme_config.get("bind_addresses", ["::", "0.0.0.0"]) self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30) self.acme_domain = acme_config.get("domain", config.get("server_name")) self.acme_account_key_file = self.abspath( acme_config.get("account_key_file", config_dir_path + "/client.key") ) self.tls_certificate_file = self.abspath(config.get("tls_certificate_path")) self.tls_private_key_file = self.abspath(config.get("tls_private_key_path")) if self.root.server.has_tls_listener(): if not self.tls_certificate_file: raise ConfigError( "tls_certificate_path must be specified if TLS-enabled listeners are " "configured." ) if not self.tls_private_key_file: raise ConfigError( "tls_private_key_path must be specified if TLS-enabled listeners are " "configured." ) self._original_tls_fingerprints = config.get("tls_fingerprints", []) if self._original_tls_fingerprints is None: self._original_tls_fingerprints = [] self.tls_fingerprints = list(self._original_tls_fingerprints) # Whether to verify certificates on outbound federation traffic self.federation_verify_certificates = config.get( "federation_verify_certificates", True ) # Minimum TLS version to use for outbound federation traffic self.federation_client_minimum_tls_version = str( config.get("federation_client_minimum_tls_version", 1) ) if self.federation_client_minimum_tls_version not in ["1", "1.1", "1.2", "1.3"]: raise ConfigError( "federation_client_minimum_tls_version must be one of: 1, 1.1, 1.2, 1.3" ) # Prevent people shooting themselves in the foot here by setting it to # the biggest number blindly if self.federation_client_minimum_tls_version == "1.3": if getattr(SSL, "OP_NO_TLSv1_3", None) is None: raise ConfigError( ( "federation_client_minimum_tls_version cannot be 1.3, " "your OpenSSL does not support it" ) ) # Whitelist of domains to not verify certificates for fed_whitelist_entries = config.get( "federation_certificate_verification_whitelist", [] ) if fed_whitelist_entries is None: fed_whitelist_entries = [] # Support globs (*) in whitelist values self.federation_certificate_verification_whitelist = [] # type: List[str] for entry in fed_whitelist_entries: try: entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii")) except UnicodeEncodeError: raise ConfigError( "IDNA domain names are not allowed in the " "federation_certificate_verification_whitelist: %s" % (entry,) ) # Convert globs to regex self.federation_certificate_verification_whitelist.append(entry_regex) # List of custom certificate authorities for federation traffic validation custom_ca_list = config.get("federation_custom_ca_list", None) # Read in and parse custom CA certificates self.federation_ca_trust_root = None if custom_ca_list is not None: if len(custom_ca_list) == 0: # A trustroot cannot be generated without any CA certificates. # Raise an error if this option has been specified without any # corresponding certificates. raise ConfigError( "federation_custom_ca_list specified without any certificate files" ) certs = [] for ca_file in custom_ca_list: logger.debug("Reading custom CA certificate file: %s", ca_file) content = self.read_file(ca_file, "federation_custom_ca_list") # Parse the CA certificates try: cert_base = Certificate.loadPEM(content) certs.append(cert_base) except Exception as e: raise ConfigError( "Error parsing custom CA certificate file %s: %s" % (ca_file, e) ) self.federation_ca_trust_root = trustRootFromCertificates(certs) # This config option applies to non-federation HTTP clients # (e.g. for talking to recaptcha, identity servers, and such) # It should never be used in production, and is intended for # use only when running tests. self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get( "use_insecure_ssl_client_just_for_testing_do_not_use" ) self.tls_certificate = None self.tls_private_key = None
def read_config(self, config: dict, config_dir_path: str, **kwargs): acme_config = config.get("acme", None) if acme_config is None: acme_config = {} self.acme_enabled = acme_config.get("enabled", False) # hyperlink complains on py2 if this is not a Unicode self.acme_url = six.text_type( acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory") ) self.acme_port = acme_config.get("port", 80) self.acme_bind_addresses = acme_config.get("bind_addresses", ["::", "0.0.0.0"]) self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30) self.acme_domain = acme_config.get("domain", config.get("server_name")) self.acme_account_key_file = self.abspath( acme_config.get("account_key_file", config_dir_path + "/client.key") ) self.tls_certificate_file = self.abspath(config.get("tls_certificate_path")) self.tls_private_key_file = self.abspath(config.get("tls_private_key_path")) if self.root.server.has_tls_listener(): if not self.tls_certificate_file: raise ConfigError( "tls_certificate_path must be specified if TLS-enabled listeners are " "configured." ) if not self.tls_private_key_file: raise ConfigError( "tls_private_key_path must be specified if TLS-enabled listeners are " "configured." ) self._original_tls_fingerprints = config.get("tls_fingerprints", []) if self._original_tls_fingerprints is None: self._original_tls_fingerprints = [] self.tls_fingerprints = list(self._original_tls_fingerprints) # Whether to verify certificates on outbound federation traffic self.federation_verify_certificates = config.get( "federation_verify_certificates", True ) # Minimum TLS version to use for outbound federation traffic self.federation_client_minimum_tls_version = str( config.get("federation_client_minimum_tls_version", 1) ) if self.federation_client_minimum_tls_version not in ["1", "1.1", "1.2", "1.3"]: raise ConfigError( "federation_client_minimum_tls_version must be one of: 1, 1.1, 1.2, 1.3" ) # Prevent people shooting themselves in the foot here by setting it to # the biggest number blindly if self.federation_client_minimum_tls_version == "1.3": if getattr(SSL, "OP_NO_TLSv1_3", None) is None: raise ConfigError( ( "federation_client_minimum_tls_version cannot be 1.3, " "your OpenSSL does not support it" ) ) # Whitelist of domains to not verify certificates for fed_whitelist_entries = config.get( "federation_certificate_verification_whitelist", [] ) # Support globs (*) in whitelist values self.federation_certificate_verification_whitelist = [] # type: List[str] for entry in fed_whitelist_entries: try: entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii")) except UnicodeEncodeError: raise ConfigError( "IDNA domain names are not allowed in the " "federation_certificate_verification_whitelist: %s" % (entry,) ) # Convert globs to regex self.federation_certificate_verification_whitelist.append(entry_regex) # List of custom certificate authorities for federation traffic validation custom_ca_list = config.get("federation_custom_ca_list", None) # Read in and parse custom CA certificates self.federation_ca_trust_root = None if custom_ca_list is not None: if len(custom_ca_list) == 0: # A trustroot cannot be generated without any CA certificates. # Raise an error if this option has been specified without any # corresponding certificates. raise ConfigError( "federation_custom_ca_list specified without any certificate files" ) certs = [] for ca_file in custom_ca_list: logger.debug("Reading custom CA certificate file: %s", ca_file) content = self.read_file(ca_file, "federation_custom_ca_list") # Parse the CA certificates try: cert_base = Certificate.loadPEM(content) certs.append(cert_base) except Exception as e: raise ConfigError( "Error parsing custom CA certificate file %s: %s" % (ca_file, e) ) self.federation_ca_trust_root = trustRootFromCertificates(certs) # This config option applies to non-federation HTTP clients # (e.g. for talking to recaptcha, identity servers, and such) # It should never be used in production, and is intended for # use only when running tests. self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get( "use_insecure_ssl_client_just_for_testing_do_not_use" ) self.tls_certificate = None self.tls_private_key = None
https://github.com/matrix-org/synapse/issues/6817
synapse_1 | Traceback (most recent call last): synapse_1 | File "/usr/local/lib/python3.8/runpy.py", line 192, in _run_module_as_main synapse_1 | return _run_code(code, main_globals, None, synapse_1 | File "/usr/local/lib/python3.8/runpy.py", line 85, in _run_code synapse_1 | exec(code, run_globals) synapse_1 | File "/usr/local/lib/python3.8/site-packages/synapse/app/homeserver.py", line 639, in <module> synapse_1 | main() synapse_1 | File "/usr/local/lib/python3.8/site-packages/synapse/app/homeserver.py", line 634, in main synapse_1 | hs = setup(sys.argv[1:]) synapse_1 | File "/usr/local/lib/python3.8/site-packages/synapse/app/homeserver.py", line 318, in setup synapse_1 | config = HomeServerConfig.load_or_generate_config( synapse_1 | File "/usr/local/lib/python3.8/site-packages/synapse/config/_base.py", line 616, in load_or_generate_config synapse_1 | obj.parse_config_dict( synapse_1 | File "/usr/local/lib/python3.8/site-packages/synapse/config/_base.py", line 635, in parse_config_dict synapse_1 | self.invoke_all( synapse_1 | File "/usr/local/lib/python3.8/site-packages/synapse/config/_base.py", line 254, in invoke_all synapse_1 | res[name] = getattr(config, func_name)(*args, **kwargs) synapse_1 | File "/usr/local/lib/python3.8/site-packages/synapse/config/tls.py", line 115, in read_config synapse_1 | for entry in fed_whitelist_entries: synapse_1 | TypeError: 'NoneType' object is not iterable
TypeError
def __init__(self, database: Database, db_conn, hs): super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs) # Do not add more reserved users than the total allowable number # cur = LoggingTransaction( self.db.new_transaction( db_conn, "initialise_mau_threepids", [], [], self._initialise_reserved_users, hs.config.mau_limits_reserved_threepids[: self.hs.config.max_mau_value], )
def __init__(self, database: Database, db_conn, hs): super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs) self._clock = hs.get_clock() self.hs = hs # Do not add more reserved users than the total allowable number self.db.new_transaction( db_conn, "initialise_mau_threepids", [], [], self._initialise_reserved_users, hs.config.mau_limits_reserved_threepids[: self.hs.config.max_mau_value], )
https://github.com/matrix-org/synapse/issues/4639
Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: 2019-02-13 17:56:49,005 - synapse.http.server - 112 - ERROR - PUT-362- Failed handle request via <function _async_render at 0x7ff0363d32a8>: <XForwardedForRequest at 0x7ff02cca4b48 method=u'PUT' uri=u'/_matrix/client/r0/rooms/<<ROOM ID>>/send/m.room.message/163970?user_id=<<USER ID>>&amp;access_token=<redacted>&amp;ts=1477692257000' clientproto=u'HTTP/1.1' site=18095>: Traceback (most recent call last): Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: current.result = callback(current.result, *args, **kw) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: _inlineCallbacks(r, g, status) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: --- <exception caught here> --- Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield h(self, request) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 316, in _async_render Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: callback_return = yield callback(request, **kwargs) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/rest/client/v1/room.py", line 217, in on_POST Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id, Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 488, in create_and_send_nonmember_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 279, in create_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield self.auth.check_auth_blocking(requester.user.to_string()) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = g.send(result) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/api/auth.py", line 812, in check_auth_blocking Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: timestamp = yield self.store.user_last_seen_monthly_active(user_id) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: exceptions.AttributeError: 'EventCreatorSlavedStore' object has no attribute 'user_last_seen_monthly_active'
exceptions.AttributeError
def reap_monthly_active_users(self): """Cleans out monthly active user table to ensure that no stale entries exist. Returns: Deferred[] """ def _reap_users(txn, reserved_users): """ Args: reserved_users (tuple): reserved users to preserve """ thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) query_args = [thirty_days_ago] base_sql = "DELETE FROM monthly_active_users WHERE timestamp < ?" # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. if len(reserved_users) > 0: # questionmarks is a hack to overcome sqlite not supporting # tuples in 'WHERE IN %s' question_marks = ",".join("?" * len(reserved_users)) query_args.extend(reserved_users) sql = base_sql + " AND user_id NOT IN ({})".format(question_marks) else: sql = base_sql txn.execute(sql, query_args) max_mau_value = self.hs.config.max_mau_value if self.hs.config.limit_usage_by_mau: # If MAU user count still exceeds the MAU threshold, then delete on # a least recently active basis. # Note it is not possible to write this query using OFFSET due to # incompatibilities in how sqlite and postgres support the feature. # sqlite requires 'LIMIT -1 OFFSET ?', the LIMIT must be present # While Postgres does not require 'LIMIT', but also does not support # negative LIMIT values. So there is no way to write it that both can # support if len(reserved_users) == 0: sql = """ DELETE FROM monthly_active_users WHERE user_id NOT IN ( SELECT user_id FROM monthly_active_users ORDER BY timestamp DESC LIMIT ? ) """ txn.execute(sql, (max_mau_value,)) # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. else: # Must be >= 0 for postgres num_of_non_reserved_users_to_remove = max( max_mau_value - len(reserved_users), 0 ) # It is important to filter reserved users twice to guard # against the case where the reserved user is present in the # SELECT, meaning that a legitmate mau is deleted. sql = """ DELETE FROM monthly_active_users WHERE user_id NOT IN ( SELECT user_id FROM monthly_active_users WHERE user_id NOT IN ({}) ORDER BY timestamp DESC LIMIT ? ) AND user_id NOT IN ({}) """.format(question_marks, question_marks) query_args = [ *reserved_users, num_of_non_reserved_users_to_remove, *reserved_users, ] txn.execute(sql, query_args) # It seems poor to invalidate the whole cache, Postgres supports # 'Returning' which would allow me to invalidate only the # specific users, but sqlite has no way to do this and instead # I would need to SELECT and the DELETE which without locking # is racy. # Have resolved to invalidate the whole cache for now and do # something about it if and when the perf becomes significant self._invalidate_all_cache_and_stream(txn, self.user_last_seen_monthly_active) self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) reserved_users = yield self.get_registered_reserved_users() yield self.db.runInteraction( "reap_monthly_active_users", _reap_users, reserved_users )
def reap_monthly_active_users(self): """Cleans out monthly active user table to ensure that no stale entries exist. Returns: Deferred[] """ def _reap_users(txn, reserved_users): """ Args: reserved_users (tuple): reserved users to preserve """ thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) query_args = [thirty_days_ago] base_sql = "DELETE FROM monthly_active_users WHERE timestamp < ?" # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. if len(reserved_users) > 0: # questionmarks is a hack to overcome sqlite not supporting # tuples in 'WHERE IN %s' question_marks = ",".join("?" * len(reserved_users)) query_args.extend(reserved_users) sql = base_sql + " AND user_id NOT IN ({})".format(question_marks) else: sql = base_sql txn.execute(sql, query_args) max_mau_value = self.hs.config.max_mau_value if self.hs.config.limit_usage_by_mau: # If MAU user count still exceeds the MAU threshold, then delete on # a least recently active basis. # Note it is not possible to write this query using OFFSET due to # incompatibilities in how sqlite and postgres support the feature. # sqlite requires 'LIMIT -1 OFFSET ?', the LIMIT must be present # While Postgres does not require 'LIMIT', but also does not support # negative LIMIT values. So there is no way to write it that both can # support if len(reserved_users) == 0: sql = """ DELETE FROM monthly_active_users WHERE user_id NOT IN ( SELECT user_id FROM monthly_active_users ORDER BY timestamp DESC LIMIT ? ) """ txn.execute(sql, (max_mau_value,)) # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. else: # Must be >= 0 for postgres num_of_non_reserved_users_to_remove = max( max_mau_value - len(reserved_users), 0 ) # It is important to filter reserved users twice to guard # against the case where the reserved user is present in the # SELECT, meaning that a legitmate mau is deleted. sql = """ DELETE FROM monthly_active_users WHERE user_id NOT IN ( SELECT user_id FROM monthly_active_users WHERE user_id NOT IN ({}) ORDER BY timestamp DESC LIMIT ? ) AND user_id NOT IN ({}) """.format(question_marks, question_marks) query_args = [ *reserved_users, num_of_non_reserved_users_to_remove, *reserved_users, ] txn.execute(sql, query_args) reserved_users = yield self.get_registered_reserved_users() yield self.db.runInteraction( "reap_monthly_active_users", _reap_users, reserved_users ) # It seems poor to invalidate the whole cache, Postgres supports # 'Returning' which would allow me to invalidate only the # specific users, but sqlite has no way to do this and instead # I would need to SELECT and the DELETE which without locking # is racy. # Have resolved to invalidate the whole cache for now and do # something about it if and when the perf becomes significant self.user_last_seen_monthly_active.invalidate_all() self.get_monthly_active_count.invalidate_all()
https://github.com/matrix-org/synapse/issues/4639
Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: 2019-02-13 17:56:49,005 - synapse.http.server - 112 - ERROR - PUT-362- Failed handle request via <function _async_render at 0x7ff0363d32a8>: <XForwardedForRequest at 0x7ff02cca4b48 method=u'PUT' uri=u'/_matrix/client/r0/rooms/<<ROOM ID>>/send/m.room.message/163970?user_id=<<USER ID>>&amp;access_token=<redacted>&amp;ts=1477692257000' clientproto=u'HTTP/1.1' site=18095>: Traceback (most recent call last): Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: current.result = callback(current.result, *args, **kw) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: _inlineCallbacks(r, g, status) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: --- <exception caught here> --- Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield h(self, request) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 316, in _async_render Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: callback_return = yield callback(request, **kwargs) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/rest/client/v1/room.py", line 217, in on_POST Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id, Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 488, in create_and_send_nonmember_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 279, in create_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield self.auth.check_auth_blocking(requester.user.to_string()) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = g.send(result) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/api/auth.py", line 812, in check_auth_blocking Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: timestamp = yield self.store.user_last_seen_monthly_active(user_id) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: exceptions.AttributeError: 'EventCreatorSlavedStore' object has no attribute 'user_last_seen_monthly_active'
exceptions.AttributeError
def _reap_users(txn, reserved_users): """ Args: reserved_users (tuple): reserved users to preserve """ thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) query_args = [thirty_days_ago] base_sql = "DELETE FROM monthly_active_users WHERE timestamp < ?" # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. if len(reserved_users) > 0: # questionmarks is a hack to overcome sqlite not supporting # tuples in 'WHERE IN %s' question_marks = ",".join("?" * len(reserved_users)) query_args.extend(reserved_users) sql = base_sql + " AND user_id NOT IN ({})".format(question_marks) else: sql = base_sql txn.execute(sql, query_args) max_mau_value = self.hs.config.max_mau_value if self.hs.config.limit_usage_by_mau: # If MAU user count still exceeds the MAU threshold, then delete on # a least recently active basis. # Note it is not possible to write this query using OFFSET due to # incompatibilities in how sqlite and postgres support the feature. # sqlite requires 'LIMIT -1 OFFSET ?', the LIMIT must be present # While Postgres does not require 'LIMIT', but also does not support # negative LIMIT values. So there is no way to write it that both can # support if len(reserved_users) == 0: sql = """ DELETE FROM monthly_active_users WHERE user_id NOT IN ( SELECT user_id FROM monthly_active_users ORDER BY timestamp DESC LIMIT ? ) """ txn.execute(sql, (max_mau_value,)) # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. else: # Must be >= 0 for postgres num_of_non_reserved_users_to_remove = max( max_mau_value - len(reserved_users), 0 ) # It is important to filter reserved users twice to guard # against the case where the reserved user is present in the # SELECT, meaning that a legitmate mau is deleted. sql = """ DELETE FROM monthly_active_users WHERE user_id NOT IN ( SELECT user_id FROM monthly_active_users WHERE user_id NOT IN ({}) ORDER BY timestamp DESC LIMIT ? ) AND user_id NOT IN ({}) """.format(question_marks, question_marks) query_args = [ *reserved_users, num_of_non_reserved_users_to_remove, *reserved_users, ] txn.execute(sql, query_args) # It seems poor to invalidate the whole cache, Postgres supports # 'Returning' which would allow me to invalidate only the # specific users, but sqlite has no way to do this and instead # I would need to SELECT and the DELETE which without locking # is racy. # Have resolved to invalidate the whole cache for now and do # something about it if and when the perf becomes significant self._invalidate_all_cache_and_stream(txn, self.user_last_seen_monthly_active) self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())
def _reap_users(txn, reserved_users): """ Args: reserved_users (tuple): reserved users to preserve """ thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) query_args = [thirty_days_ago] base_sql = "DELETE FROM monthly_active_users WHERE timestamp < ?" # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. if len(reserved_users) > 0: # questionmarks is a hack to overcome sqlite not supporting # tuples in 'WHERE IN %s' question_marks = ",".join("?" * len(reserved_users)) query_args.extend(reserved_users) sql = base_sql + " AND user_id NOT IN ({})".format(question_marks) else: sql = base_sql txn.execute(sql, query_args) max_mau_value = self.hs.config.max_mau_value if self.hs.config.limit_usage_by_mau: # If MAU user count still exceeds the MAU threshold, then delete on # a least recently active basis. # Note it is not possible to write this query using OFFSET due to # incompatibilities in how sqlite and postgres support the feature. # sqlite requires 'LIMIT -1 OFFSET ?', the LIMIT must be present # While Postgres does not require 'LIMIT', but also does not support # negative LIMIT values. So there is no way to write it that both can # support if len(reserved_users) == 0: sql = """ DELETE FROM monthly_active_users WHERE user_id NOT IN ( SELECT user_id FROM monthly_active_users ORDER BY timestamp DESC LIMIT ? ) """ txn.execute(sql, (max_mau_value,)) # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. else: # Must be >= 0 for postgres num_of_non_reserved_users_to_remove = max( max_mau_value - len(reserved_users), 0 ) # It is important to filter reserved users twice to guard # against the case where the reserved user is present in the # SELECT, meaning that a legitmate mau is deleted. sql = """ DELETE FROM monthly_active_users WHERE user_id NOT IN ( SELECT user_id FROM monthly_active_users WHERE user_id NOT IN ({}) ORDER BY timestamp DESC LIMIT ? ) AND user_id NOT IN ({}) """.format(question_marks, question_marks) query_args = [ *reserved_users, num_of_non_reserved_users_to_remove, *reserved_users, ] txn.execute(sql, query_args)
https://github.com/matrix-org/synapse/issues/4639
Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: 2019-02-13 17:56:49,005 - synapse.http.server - 112 - ERROR - PUT-362- Failed handle request via <function _async_render at 0x7ff0363d32a8>: <XForwardedForRequest at 0x7ff02cca4b48 method=u'PUT' uri=u'/_matrix/client/r0/rooms/<<ROOM ID>>/send/m.room.message/163970?user_id=<<USER ID>>&amp;access_token=<redacted>&amp;ts=1477692257000' clientproto=u'HTTP/1.1' site=18095>: Traceback (most recent call last): Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: current.result = callback(current.result, *args, **kw) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: _inlineCallbacks(r, g, status) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: --- <exception caught here> --- Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield h(self, request) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 316, in _async_render Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: callback_return = yield callback(request, **kwargs) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/rest/client/v1/room.py", line 217, in on_POST Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id, Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 488, in create_and_send_nonmember_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 279, in create_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield self.auth.check_auth_blocking(requester.user.to_string()) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = g.send(result) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/api/auth.py", line 812, in check_auth_blocking Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: timestamp = yield self.store.user_last_seen_monthly_active(user_id) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: exceptions.AttributeError: 'EventCreatorSlavedStore' object has no attribute 'user_last_seen_monthly_active'
exceptions.AttributeError
def upsert_monthly_active_user(self, user_id): """Updates or inserts the user into the monthly active user table, which is used to track the current MAU usage of the server Args: user_id (str): user to add/update """ # Support user never to be included in MAU stats. Note I can't easily call this # from upsert_monthly_active_user_txn because then I need a _txn form of # is_support_user which is complicated because I want to cache the result. # Therefore I call it here and ignore the case where # upsert_monthly_active_user_txn is called directly from # _initialise_reserved_users reasoning that it would be very strange to # include a support user in this context. is_support = yield self.is_support_user(user_id) if is_support: return yield self.db.runInteraction( "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id )
def upsert_monthly_active_user(self, user_id): """Updates or inserts the user into the monthly active user table, which is used to track the current MAU usage of the server Args: user_id (str): user to add/update """ # Support user never to be included in MAU stats. Note I can't easily call this # from upsert_monthly_active_user_txn because then I need a _txn form of # is_support_user which is complicated because I want to cache the result. # Therefore I call it here and ignore the case where # upsert_monthly_active_user_txn is called directly from # _initialise_reserved_users reasoning that it would be very strange to # include a support user in this context. is_support = yield self.is_support_user(user_id) if is_support: return yield self.db.runInteraction( "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) user_in_mau = self.user_last_seen_monthly_active.cache.get( (user_id,), None, update_metrics=False ) if user_in_mau is None: self.get_monthly_active_count.invalidate(()) self.user_last_seen_monthly_active.invalidate((user_id,))
https://github.com/matrix-org/synapse/issues/4639
Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: 2019-02-13 17:56:49,005 - synapse.http.server - 112 - ERROR - PUT-362- Failed handle request via <function _async_render at 0x7ff0363d32a8>: <XForwardedForRequest at 0x7ff02cca4b48 method=u'PUT' uri=u'/_matrix/client/r0/rooms/<<ROOM ID>>/send/m.room.message/163970?user_id=<<USER ID>>&amp;access_token=<redacted>&amp;ts=1477692257000' clientproto=u'HTTP/1.1' site=18095>: Traceback (most recent call last): Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: current.result = callback(current.result, *args, **kw) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: _inlineCallbacks(r, g, status) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: --- <exception caught here> --- Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield h(self, request) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 316, in _async_render Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: callback_return = yield callback(request, **kwargs) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/rest/client/v1/room.py", line 217, in on_POST Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id, Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 488, in create_and_send_nonmember_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 279, in create_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield self.auth.check_auth_blocking(requester.user.to_string()) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = g.send(result) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/api/auth.py", line 812, in check_auth_blocking Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: timestamp = yield self.store.user_last_seen_monthly_active(user_id) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: exceptions.AttributeError: 'EventCreatorSlavedStore' object has no attribute 'user_last_seen_monthly_active'
exceptions.AttributeError
def upsert_monthly_active_user_txn(self, txn, user_id): """Updates or inserts monthly active user member We consciously do not call is_support_txn from this method because it is not possible to cache the response. is_support_txn will be false in almost all cases, so it seems reasonable to call it only for upsert_monthly_active_user and to call is_support_txn manually for cases where upsert_monthly_active_user_txn is called directly, like _initialise_reserved_users In short, don't call this method with support users. (Support users should not appear in the MAU stats). Args: txn (cursor): user_id (str): user to add/update Returns: bool: True if a new entry was created, False if an existing one was updated. """ # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple # upserts into a single txn) introduced a lot of extra complexity. # See https://github.com/matrix-org/synapse/issues/3854 for more is_insert = self.db.simple_upsert_txn( txn, table="monthly_active_users", keyvalues={"user_id": user_id}, values={"timestamp": int(self._clock.time_msec())}, ) self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) self._invalidate_cache_and_stream( txn, self.user_last_seen_monthly_active, (user_id,) ) return is_insert
def upsert_monthly_active_user_txn(self, txn, user_id): """Updates or inserts monthly active user member Note that, after calling this method, it will generally be necessary to invalidate the caches on user_last_seen_monthly_active and get_monthly_active_count. We can't do that here, because we are running in a database thread rather than the main thread, and we can't call txn.call_after because txn may not be a LoggingTransaction. We consciously do not call is_support_txn from this method because it is not possible to cache the response. is_support_txn will be false in almost all cases, so it seems reasonable to call it only for upsert_monthly_active_user and to call is_support_txn manually for cases where upsert_monthly_active_user_txn is called directly, like _initialise_reserved_users In short, don't call this method with support users. (Support users should not appear in the MAU stats). Args: txn (cursor): user_id (str): user to add/update Returns: bool: True if a new entry was created, False if an existing one was updated. """ # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple # upserts into a single txn) introduced a lot of extra complexity. # See https://github.com/matrix-org/synapse/issues/3854 for more is_insert = self.db.simple_upsert_txn( txn, table="monthly_active_users", keyvalues={"user_id": user_id}, values={"timestamp": int(self._clock.time_msec())}, ) return is_insert
https://github.com/matrix-org/synapse/issues/4639
Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: 2019-02-13 17:56:49,005 - synapse.http.server - 112 - ERROR - PUT-362- Failed handle request via <function _async_render at 0x7ff0363d32a8>: <XForwardedForRequest at 0x7ff02cca4b48 method=u'PUT' uri=u'/_matrix/client/r0/rooms/<<ROOM ID>>/send/m.room.message/163970?user_id=<<USER ID>>&amp;access_token=<redacted>&amp;ts=1477692257000' clientproto=u'HTTP/1.1' site=18095>: Traceback (most recent call last): Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: current.result = callback(current.result, *args, **kw) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: _inlineCallbacks(r, g, status) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: --- <exception caught here> --- Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield h(self, request) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/http/server.py", line 316, in _async_render Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: callback_return = yield callback(request, **kwargs) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/rest/client/v1/room.py", line 217, in on_POST Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id, Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 488, in create_and_send_nonmember_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: txn_id=txn_id Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = result.throwExceptionIntoGenerator(g) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: return g.throw(self.type, self.value, self.tb) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/handlers/message.py", line 279, in create_event Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: yield self.auth.check_auth_blocking(requester.user.to_string()) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: result = g.send(result) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: File "/opt/synapse/env/local/lib/python2.7/site-packages/synapse/api/auth.py", line 812, in check_auth_blocking Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: timestamp = yield self.store.user_last_seen_monthly_active(user_id) Feb 13 17:56:49 ip-10-1-2-232 matrix_synapse[3223]: exceptions.AttributeError: 'EventCreatorSlavedStore' object has no attribute 'user_last_seen_monthly_active'
exceptions.AttributeError
def read_config(self, config, config_dir_path, **kwargs): acme_config = config.get("acme", None) if acme_config is None: acme_config = {} self.acme_enabled = acme_config.get("enabled", False) # hyperlink complains on py2 if this is not a Unicode self.acme_url = six.text_type( acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory") ) self.acme_port = acme_config.get("port", 80) self.acme_bind_addresses = acme_config.get("bind_addresses", ["::", "0.0.0.0"]) self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30) self.acme_domain = acme_config.get("domain", config.get("server_name")) self.acme_account_key_file = self.abspath( acme_config.get("account_key_file", config_dir_path + "/client.key") ) self.tls_certificate_file = self.abspath(config.get("tls_certificate_path")) self.tls_private_key_file = self.abspath(config.get("tls_private_key_path")) if self.has_tls_listener(): if not self.tls_certificate_file: raise ConfigError( "tls_certificate_path must be specified if TLS-enabled listeners are " "configured." ) if not self.tls_private_key_file: raise ConfigError( "tls_private_key_path must be specified if TLS-enabled listeners are " "configured." ) self._original_tls_fingerprints = config.get("tls_fingerprints", []) if self._original_tls_fingerprints is None: self._original_tls_fingerprints = [] self.tls_fingerprints = list(self._original_tls_fingerprints) # Whether to verify certificates on outbound federation traffic self.federation_verify_certificates = config.get( "federation_verify_certificates", True ) # Minimum TLS version to use for outbound federation traffic self.federation_client_minimum_tls_version = str( config.get("federation_client_minimum_tls_version", 1) ) if self.federation_client_minimum_tls_version not in ["1", "1.1", "1.2", "1.3"]: raise ConfigError( "federation_client_minimum_tls_version must be one of: 1, 1.1, 1.2, 1.3" ) # Prevent people shooting themselves in the foot here by setting it to # the biggest number blindly if self.federation_client_minimum_tls_version == "1.3": if getattr(SSL, "OP_NO_TLSv1_3", None) is None: raise ConfigError( ( "federation_client_minimum_tls_version cannot be 1.3, " "your OpenSSL does not support it" ) ) # Whitelist of domains to not verify certificates for fed_whitelist_entries = config.get( "federation_certificate_verification_whitelist", [] ) # Support globs (*) in whitelist values self.federation_certificate_verification_whitelist = [] for entry in fed_whitelist_entries: try: entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii")) except UnicodeEncodeError: raise ConfigError( "IDNA domain names are not allowed in the " "federation_certificate_verification_whitelist: %s" % (entry,) ) # Convert globs to regex self.federation_certificate_verification_whitelist.append(entry_regex) # List of custom certificate authorities for federation traffic validation custom_ca_list = config.get("federation_custom_ca_list", None) # Read in and parse custom CA certificates self.federation_ca_trust_root = None if custom_ca_list is not None: if len(custom_ca_list) == 0: # A trustroot cannot be generated without any CA certificates. # Raise an error if this option has been specified without any # corresponding certificates. raise ConfigError( "federation_custom_ca_list specified without any certificate files" ) certs = [] for ca_file in custom_ca_list: logger.debug("Reading custom CA certificate file: %s", ca_file) content = self.read_file(ca_file, "federation_custom_ca_list") # Parse the CA certificates try: cert_base = Certificate.loadPEM(content) certs.append(cert_base) except Exception as e: raise ConfigError( "Error parsing custom CA certificate file %s: %s" % (ca_file, e) ) self.federation_ca_trust_root = trustRootFromCertificates(certs) # This config option applies to non-federation HTTP clients # (e.g. for talking to recaptcha, identity servers, and such) # It should never be used in production, and is intended for # use only when running tests. self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get( "use_insecure_ssl_client_just_for_testing_do_not_use" ) self.tls_certificate = None self.tls_private_key = None
def read_config(self, config, config_dir_path, **kwargs): acme_config = config.get("acme", None) if acme_config is None: acme_config = {} self.acme_enabled = acme_config.get("enabled", False) # hyperlink complains on py2 if this is not a Unicode self.acme_url = six.text_type( acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory") ) self.acme_port = acme_config.get("port", 80) self.acme_bind_addresses = acme_config.get("bind_addresses", ["::", "0.0.0.0"]) self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30) self.acme_domain = acme_config.get("domain", config.get("server_name")) self.acme_account_key_file = self.abspath( acme_config.get("account_key_file", config_dir_path + "/client.key") ) self.tls_certificate_file = self.abspath(config.get("tls_certificate_path")) self.tls_private_key_file = self.abspath(config.get("tls_private_key_path")) if self.has_tls_listener(): if not self.tls_certificate_file: raise ConfigError( "tls_certificate_path must be specified if TLS-enabled listeners are " "configured." ) if not self.tls_private_key_file: raise ConfigError( "tls_private_key_path must be specified if TLS-enabled listeners are " "configured." ) self._original_tls_fingerprints = config.get("tls_fingerprints", []) if self._original_tls_fingerprints is None: self._original_tls_fingerprints = [] self.tls_fingerprints = list(self._original_tls_fingerprints) # Whether to verify certificates on outbound federation traffic self.federation_verify_certificates = config.get( "federation_verify_certificates", True ) # Minimum TLS version to use for outbound federation traffic self.federation_client_minimum_tls_version = str( config.get("federation_client_minimum_tls_version", 1) ) if self.federation_client_minimum_tls_version not in ["1", "1.1", "1.2", "1.3"]: raise ConfigError( "federation_client_minimum_tls_version must be one of: 1, 1.1, 1.2, 1.3" ) # Prevent people shooting themselves in the foot here by setting it to # the biggest number blindly if self.federation_client_minimum_tls_version == "1.3": if getattr(SSL, "OP_NO_TLSv1_3", None) is None: raise ConfigError( ( "federation_client_minimum_tls_version cannot be 1.3, " "your OpenSSL does not support it" ) ) # Whitelist of domains to not verify certificates for fed_whitelist_entries = config.get( "federation_certificate_verification_whitelist", [] ) # Support globs (*) in whitelist values self.federation_certificate_verification_whitelist = [] for entry in fed_whitelist_entries: # Convert globs to regex entry_regex = glob_to_regex(entry) self.federation_certificate_verification_whitelist.append(entry_regex) # List of custom certificate authorities for federation traffic validation custom_ca_list = config.get("federation_custom_ca_list", None) # Read in and parse custom CA certificates self.federation_ca_trust_root = None if custom_ca_list is not None: if len(custom_ca_list) == 0: # A trustroot cannot be generated without any CA certificates. # Raise an error if this option has been specified without any # corresponding certificates. raise ConfigError( "federation_custom_ca_list specified without any certificate files" ) certs = [] for ca_file in custom_ca_list: logger.debug("Reading custom CA certificate file: %s", ca_file) content = self.read_file(ca_file, "federation_custom_ca_list") # Parse the CA certificates try: cert_base = Certificate.loadPEM(content) certs.append(cert_base) except Exception as e: raise ConfigError( "Error parsing custom CA certificate file %s: %s" % (ca_file, e) ) self.federation_ca_trust_root = trustRootFromCertificates(certs) # This config option applies to non-federation HTTP clients # (e.g. for talking to recaptcha, identity servers, and such) # It should never be used in production, and is intended for # use only when running tests. self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get( "use_insecure_ssl_client_just_for_testing_do_not_use" ) self.tls_certificate = None self.tls_private_key = None
https://github.com/matrix-org/synapse/issues/5939
Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/synapse/http/federation/well_known_resolver.py", line 114, in _do_get_well_known self._well_known_agent.request(b"GET", uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 2126, in request deferred = self._agent.request(method, uri, headers, bodyProducer) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1732, in request endpoint = self._getEndpoint(parsedURI) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1715, in _getEndpoint return self._endpointFactory.endpointForURI(uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1590, in endpointForURI uri.port) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 155, in creatorForNetloc return self.get_options(hostname) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 124, in get_options if regex.match(host): TypeError: cannot use a string pattern on a bytes-like object
TypeError
def get_options(self, host: bytes): # IPolicyForHTTPS.get_options takes bytes, but we want to compare # against the str whitelist. The hostnames in the whitelist are already # IDNA-encoded like the hosts will be here. ascii_host = host.decode("ascii") # Check if certificate verification has been enabled should_verify = self._config.federation_verify_certificates # Check if we've disabled certificate verification for this host if should_verify: for regex in self._config.federation_certificate_verification_whitelist: if regex.match(ascii_host): should_verify = False break ssl_context = ( self._verify_ssl_context if should_verify else self._no_verify_ssl_context ) return SSLClientConnectionCreator(host, ssl_context, should_verify)
def get_options(self, host): # Check if certificate verification has been enabled should_verify = self._config.federation_verify_certificates # Check if we've disabled certificate verification for this host if should_verify: for regex in self._config.federation_certificate_verification_whitelist: if regex.match(host): should_verify = False break ssl_context = ( self._verify_ssl_context if should_verify else self._no_verify_ssl_context ) return SSLClientConnectionCreator(host, ssl_context, should_verify)
https://github.com/matrix-org/synapse/issues/5939
Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/synapse/http/federation/well_known_resolver.py", line 114, in _do_get_well_known self._well_known_agent.request(b"GET", uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 2126, in request deferred = self._agent.request(method, uri, headers, bodyProducer) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1732, in request endpoint = self._getEndpoint(parsedURI) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1715, in _getEndpoint return self._endpointFactory.endpointForURI(uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1590, in endpointForURI uri.port) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 155, in creatorForNetloc return self.get_options(hostname) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 124, in get_options if regex.match(host): TypeError: cannot use a string pattern on a bytes-like object
TypeError
def __init__(self, hostname: bytes, ctx, verify_certs: bool): self._ctx = ctx self._verifier = ConnectionVerifier(hostname, verify_certs)
def __init__(self, hostname, ctx, verify_certs): self._ctx = ctx self._verifier = ConnectionVerifier(hostname, verify_certs)
https://github.com/matrix-org/synapse/issues/5939
Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/synapse/http/federation/well_known_resolver.py", line 114, in _do_get_well_known self._well_known_agent.request(b"GET", uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 2126, in request deferred = self._agent.request(method, uri, headers, bodyProducer) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1732, in request endpoint = self._getEndpoint(parsedURI) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1715, in _getEndpoint return self._endpointFactory.endpointForURI(uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1590, in endpointForURI uri.port) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 155, in creatorForNetloc return self.get_options(hostname) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 124, in get_options if regex.match(host): TypeError: cannot use a string pattern on a bytes-like object
TypeError
def __init__(self, hostname: bytes, verify_certs): self._verify_certs = verify_certs _decoded = hostname.decode("ascii") if isIPAddress(_decoded) or isIPv6Address(_decoded): self._is_ip_address = True else: self._is_ip_address = False self._hostnameBytes = hostname self._hostnameASCII = self._hostnameBytes.decode("ascii")
def __init__(self, hostname, verify_certs): self._verify_certs = verify_certs if isIPAddress(hostname) or isIPv6Address(hostname): self._hostnameBytes = hostname.encode("ascii") self._is_ip_address = True else: # twisted's ClientTLSOptions falls back to the stdlib impl here if # idna is not installed, but points out that lacks support for # IDNA2008 (http://bugs.python.org/issue17305). # # We can rely on having idna. self._hostnameBytes = idna.encode(hostname) self._is_ip_address = False self._hostnameASCII = self._hostnameBytes.decode("ascii")
https://github.com/matrix-org/synapse/issues/5939
Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/synapse/http/federation/well_known_resolver.py", line 114, in _do_get_well_known self._well_known_agent.request(b"GET", uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 2126, in request deferred = self._agent.request(method, uri, headers, bodyProducer) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1732, in request endpoint = self._getEndpoint(parsedURI) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1715, in _getEndpoint return self._endpointFactory.endpointForURI(uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1590, in endpointForURI uri.port) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 155, in creatorForNetloc return self.get_options(hostname) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 124, in get_options if regex.match(host): TypeError: cannot use a string pattern on a bytes-like object
TypeError
def __init__(self, reactor, tls_client_options_factory, srv_resolver, parsed_uri): self._reactor = reactor self._parsed_uri = parsed_uri # set up the TLS connection params # # XXX disabling TLS is really only supported here for the benefit of the # unit tests. We should make the UTs cope with TLS rather than having to make # the code support the unit tests. if tls_client_options_factory is None: self._tls_options = None else: self._tls_options = tls_client_options_factory.get_options( self._parsed_uri.host ) self._srv_resolver = srv_resolver
def __init__(self, reactor, tls_client_options_factory, srv_resolver, parsed_uri): self._reactor = reactor self._parsed_uri = parsed_uri # set up the TLS connection params # # XXX disabling TLS is really only supported here for the benefit of the # unit tests. We should make the UTs cope with TLS rather than having to make # the code support the unit tests. if tls_client_options_factory is None: self._tls_options = None else: self._tls_options = tls_client_options_factory.get_options( self._parsed_uri.host.decode("ascii") ) self._srv_resolver = srv_resolver
https://github.com/matrix-org/synapse/issues/5939
Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/synapse/http/federation/well_known_resolver.py", line 114, in _do_get_well_known self._well_known_agent.request(b"GET", uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 2126, in request deferred = self._agent.request(method, uri, headers, bodyProducer) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1732, in request endpoint = self._getEndpoint(parsedURI) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1715, in _getEndpoint return self._endpointFactory.endpointForURI(uri) File "/usr/local/lib/python3.7/site-packages/twisted/web/client.py", line 1590, in endpointForURI uri.port) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 155, in creatorForNetloc return self.get_options(hostname) File "/usr/local/lib/python3.7/site-packages/synapse/crypto/context_factory.py", line 124, in get_options if regex.match(host): TypeError: cannot use a string pattern on a bytes-like object
TypeError
def compute_state_delta( self, room_id, batch, sync_config, since_token, now_token, full_state ): """Works out the difference in state between the start of the timeline and the previous sync. Args: room_id(str): batch(synapse.handlers.sync.TimelineBatch): The timeline batch for the room that will be sent to the user. sync_config(synapse.handlers.sync.SyncConfig): since_token(str|None): Token of the end of the previous batch. May be None. now_token(str): Token of the end of the current batch. full_state(bool): Whether to force returning the full state. Returns: A deferred dict of (type, state_key) -> Event """ # TODO(mjark) Check if the state events were received by the server # after the previous sync, since we need to include those state # updates even if they occured logically before the previous event. # TODO(mjark) Check for new redactions in the state events. with Measure(self.clock, "compute_state_delta"): members_to_fetch = None lazy_load_members = sync_config.filter_collection.lazy_load_members() include_redundant_members = ( sync_config.filter_collection.include_redundant_members() ) if lazy_load_members: # We only request state for the members needed to display the # timeline: members_to_fetch = set( event.sender # FIXME: we also care about invite targets etc. for event in batch.events ) if full_state: # always make sure we LL ourselves so we know we're in the room # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 # We only need apply this on full state syncs given we disabled # LL for incr syncs in #3840. members_to_fetch.add(sync_config.user.to_string()) state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch) else: state_filter = StateFilter.all() timeline_state = { (event.type, event.state_key): event.event_id for event in batch.events if event.is_state() } if full_state: if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) else: current_state_ids = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) state_ids = current_state_ids state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_ids, previous={}, current=current_state_ids, lazy_load_members=lazy_load_members, ) elif batch.limited: if batch: state_at_timeline_start = yield self.store.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) else: # Its not clear how we get here, but empirically we do # (#5407). Logging has been added elsewhere to try and # figure out where this state comes from. state_at_timeline_start = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) # for now, we disable LL for gappy syncs - see # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 # N.B. this slows down incr syncs as we are now processing way # more state in the server than if we were LLing. # # We still have to filter timeline_start to LL entries (above) in order # for _calculate_state's LL logic to work, as we have to include LL # members for timeline senders in case they weren't loaded in the initial # sync. We do this by (counterintuitively) by filtering timeline_start # members to just be ones which were timeline senders, which then ensures # all of the rest get included in the state block (if we need to know # about them). state_filter = StateFilter.all() state_at_previous_sync = yield self.get_state_at( room_id, stream_position=since_token, state_filter=state_filter ) if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) else: # Its not clear how we get here, but empirically we do # (#5407). Logging has been added elsewhere to try and # figure out where this state comes from. current_state_ids = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_at_timeline_start, previous=state_at_previous_sync, current=current_state_ids, # we have to include LL members in case LL initial sync missed them lazy_load_members=lazy_load_members, ) else: state_ids = {} if lazy_load_members: if members_to_fetch and batch.events: # We're returning an incremental sync, with no # "gap" since the previous sync, so normally there would be # no state to return. # But we're lazy-loading, so the client might need some more # member events to understand the events in this timeline. # So we fish out all the member events corresponding to the # timeline here, and then dedupe any redundant ones below. state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, # we only want members! state_filter=StateFilter.from_types( (EventTypes.Member, member) for member in members_to_fetch ), ) if lazy_load_members and not include_redundant_members: cache_key = (sync_config.user.to_string(), sync_config.device_id) cache = self.get_lazy_loaded_members_cache(cache_key) # if it's a new sync sequence, then assume the client has had # amnesia and doesn't want any recent lazy-loaded members # de-duplicated. if since_token is None: logger.debug("clearing LruCache for %r", cache_key) cache.clear() else: # only send members which aren't in our LruCache (either # because they're new to this client or have been pushed out # of the cache) logger.debug("filtering state from %r...", state_ids) state_ids = { t: event_id for t, event_id in iteritems(state_ids) if cache.get(t[1]) != event_id } logger.debug("...to %r", state_ids) # add any member IDs we are about to send into our LruCache for t, event_id in itertools.chain( state_ids.items(), timeline_state.items() ): if t[0] == EventTypes.Member: cache.set(t[1], event_id) state = {} if state_ids: state = yield self.store.get_events(list(state_ids.values())) return { (e.type, e.state_key): e for e in sync_config.filter_collection.filter_room_state(list(state.values())) }
def compute_state_delta( self, room_id, batch, sync_config, since_token, now_token, full_state ): """Works out the difference in state between the start of the timeline and the previous sync. Args: room_id(str): batch(synapse.handlers.sync.TimelineBatch): The timeline batch for the room that will be sent to the user. sync_config(synapse.handlers.sync.SyncConfig): since_token(str|None): Token of the end of the previous batch. May be None. now_token(str): Token of the end of the current batch. full_state(bool): Whether to force returning the full state. Returns: A deferred dict of (type, state_key) -> Event """ # TODO(mjark) Check if the state events were received by the server # after the previous sync, since we need to include those state # updates even if they occured logically before the previous event. # TODO(mjark) Check for new redactions in the state events. with Measure(self.clock, "compute_state_delta"): members_to_fetch = None lazy_load_members = sync_config.filter_collection.lazy_load_members() include_redundant_members = ( sync_config.filter_collection.include_redundant_members() ) if lazy_load_members: # We only request state for the members needed to display the # timeline: members_to_fetch = set( event.sender # FIXME: we also care about invite targets etc. for event in batch.events ) if full_state: # always make sure we LL ourselves so we know we're in the room # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 # We only need apply this on full state syncs given we disabled # LL for incr syncs in #3840. members_to_fetch.add(sync_config.user.to_string()) state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch) else: state_filter = StateFilter.all() timeline_state = { (event.type, event.state_key): event.event_id for event in batch.events if event.is_state() } if full_state: if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) else: current_state_ids = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) state_ids = current_state_ids state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_ids, previous={}, current=current_state_ids, lazy_load_members=lazy_load_members, ) elif batch.limited: state_at_timeline_start = yield self.store.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) # for now, we disable LL for gappy syncs - see # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 # N.B. this slows down incr syncs as we are now processing way # more state in the server than if we were LLing. # # We still have to filter timeline_start to LL entries (above) in order # for _calculate_state's LL logic to work, as we have to include LL # members for timeline senders in case they weren't loaded in the initial # sync. We do this by (counterintuitively) by filtering timeline_start # members to just be ones which were timeline senders, which then ensures # all of the rest get included in the state block (if we need to know # about them). state_filter = StateFilter.all() state_at_previous_sync = yield self.get_state_at( room_id, stream_position=since_token, state_filter=state_filter ) current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_at_timeline_start, previous=state_at_previous_sync, current=current_state_ids, # we have to include LL members in case LL initial sync missed them lazy_load_members=lazy_load_members, ) else: state_ids = {} if lazy_load_members: if members_to_fetch and batch.events: # We're returning an incremental sync, with no # "gap" since the previous sync, so normally there would be # no state to return. # But we're lazy-loading, so the client might need some more # member events to understand the events in this timeline. # So we fish out all the member events corresponding to the # timeline here, and then dedupe any redundant ones below. state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, # we only want members! state_filter=StateFilter.from_types( (EventTypes.Member, member) for member in members_to_fetch ), ) if lazy_load_members and not include_redundant_members: cache_key = (sync_config.user.to_string(), sync_config.device_id) cache = self.get_lazy_loaded_members_cache(cache_key) # if it's a new sync sequence, then assume the client has had # amnesia and doesn't want any recent lazy-loaded members # de-duplicated. if since_token is None: logger.debug("clearing LruCache for %r", cache_key) cache.clear() else: # only send members which aren't in our LruCache (either # because they're new to this client or have been pushed out # of the cache) logger.debug("filtering state from %r...", state_ids) state_ids = { t: event_id for t, event_id in iteritems(state_ids) if cache.get(t[1]) != event_id } logger.debug("...to %r", state_ids) # add any member IDs we are about to send into our LruCache for t, event_id in itertools.chain( state_ids.items(), timeline_state.items() ): if t[0] == EventTypes.Member: cache.set(t[1], event_id) state = {} if state_ids: state = yield self.store.get_events(list(state_ids.values())) return { (e.type, e.state_key): e for e in sync_config.filter_collection.filter_room_state(list(state.values())) }
https://github.com/matrix-org/synapse/issues/5407
2019-06-09 19:03:46,101 - synapse.handlers.sync - 909 - INFO - GET-835 - Calculating sync response for @uelen:riot.firechicken.net between StreamToken(room_key='s707461', presence_key='26561430', typing_key='2874', receipt_key='634533', account_data_key='24909', push_rules_key='56', to_device_key='95', device_list_key='55442', groups_key='1') and StreamToken(room_key='s709258', presence_key=26599051, typing_key=57, receipt_key=636004, account_data_key=24914, push_rules_key=56, to_device_key=95, device_list_key=55442, groups_key=1) 2019-06-09 19:03:46,150 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,437 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,446 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !IFlHsKQHpcFgnpwgFn:matrix.org for user @uelen:riot.firechicken.net with 0 state events 2019-06-09 19:03:46,490 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !MbRaSiMIRhhxDtJENL:maunium.net for user @uelen:riot.firechicken.net with 2 state events 2019-06-09 19:03:46,493 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,500 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !iNmaIQExDMeqdITdHH:matrix.org for user @uelen:riot.firechicken.net with 2 state events 2019-06-09 19:03:46,505 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !tvlbVxTSVUgJbmelND:matrix.org for user @uelen:riot.firechicken.net with 3 state events 2019-06-09 19:03:46,553 - synapse.http.server - 112 - ERROR - GET-835 - Failed handle request via 'SyncRestServlet': <SynapseRequest at 0x7f955618ce10 method='GET' uri='/_matrix/client/r0/sync?filter=0&timeout=0&since=s707461_26561430_2874_634533_24909_56_95_55442_1' clientproto='HTTP/1.0' site=8083> Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/_base.py", line 527, in runWithConnection defer.returnValue(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [_EventDictReturn(event_id='$15600950391010258hrwAd:matrix.org', topological_ordering=None, stream_ordering=708773), _EventDictReturn(event_id='$15600950171010117CdspK:matrix.org', topological_ordering=None, stream_ordering=708772), _EventDictReturn(event_id='$15600949401009508lTpzl:matrix.org', topological_ordering=None, stream_ordering=708771), _EventDictReturn(event_id='$15600949261009398foetQ:matrix.org', topological_ordering=None, stream_ordering=708770), _EventDictReturn(event_id='$15600949091009307PONGY:matrix.org', topological_ordering=None, stream_ordering=708769), _EventDictReturn(event_id='$15600948911009164JkHeG:matrix.org', topological_ordering=None, stream_ordering=708768), _EventDictReturn(event_id='$15600948821009108qkrIj:matrix.org', topological_ordering=None, stream_ordering=708767), _EventDictReturn(event_id='$15600948591008929UoHxd:matrix.org', topological_ordering=None, stream_ordering=708766), _EventDictReturn(event_id='$15600948501008732joyAz:matrix.org', topological_ordering=None, stream_ordering=708765), _EventDictReturn(event_id='$15600948321008586DDtRZ:matrix.org', topological_ordering=None, stream_ordering=708764), _EventDictReturn(event_id='$15600948311008562BFdsx:matrix.org', topological_ordering=None, stream_ordering=708763), _EventDictReturn(event_id='$15600948311008555lylfW:matrix.org', topological_ordering=None, stream_ordering=708762), _EventDictReturn(event_id='$15600948051008364bsBFK:matrix.org', topological_ordering=None, stream_ordering=708761), _EventDictReturn(event_id='$15600947551008018jCiVq:matrix.org', topological_ordering=None, stream_ordering=708760), _EventDictReturn(event_id='$15600947491007978QDPLH:matrix.org', topological_ordering=None, stream_ordering=708759), _EventDictReturn(event_id='$15600947131007663SiNSd:matrix.org', topological_ordering=None, stream_ordering=708758), _EventDictReturn(event_id='$15600946391007033aolro:matrix.org', topological_ordering=None, stream_ordering=708755), _EventDictReturn(event_id='$15600946091006766ZzkSV:matrix.org', topological_ordering=None, stream_ordering=708754), _EventDictReturn(event_id='$15600945021005812lLBZU:matrix.org', topological_ordering=None, stream_ordering=708752), _EventDictReturn(event_id='$15600945001005804zAlqr:matrix.org', topological_ordering=None, stream_ordering=708751), _EventDictReturn(event_id='$15600945001005796CUSFf:matrix.org', topological_ordering=None, stream_ordering=708750)] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/_base.py", line 487, in runInteraction defer.returnValue(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [_EventDictReturn(event_id='$15600950391010258hrwAd:matrix.org', topological_ordering=None, stream_ordering=708773), _EventDictReturn(event_id='$15600950171010117CdspK:matrix.org', topological_ordering=None, stream_ordering=708772), _EventDictReturn(event_id='$15600949401009508lTpzl:matrix.org', topological_ordering=None, stream_ordering=708771), _EventDictReturn(event_id='$15600949261009398foetQ:matrix.org', topological_ordering=None, stream_ordering=708770), _EventDictReturn(event_id='$15600949091009307PONGY:matrix.org', topological_ordering=None, stream_ordering=708769), _EventDictReturn(event_id='$15600948911009164JkHeG:matrix.org', topological_ordering=None, stream_ordering=708768), _EventDictReturn(event_id='$15600948821009108qkrIj:matrix.org', topological_ordering=None, stream_ordering=708767), _EventDictReturn(event_id='$15600948591008929UoHxd:matrix.org', topological_ordering=None, stream_ordering=708766), _EventDictReturn(event_id='$15600948501008732joyAz:matrix.org', topological_ordering=None, stream_ordering=708765), _EventDictReturn(event_id='$15600948321008586DDtRZ:matrix.org', topological_ordering=None, stream_ordering=708764), _EventDictReturn(event_id='$15600948311008562BFdsx:matrix.org', topological_ordering=None, stream_ordering=708763), _EventDictReturn(event_id='$15600948311008555lylfW:matrix.org', topological_ordering=None, stream_ordering=708762), _EventDictReturn(event_id='$15600948051008364bsBFK:matrix.org', topological_ordering=None, stream_ordering=708761), _EventDictReturn(event_id='$15600947551008018jCiVq:matrix.org', topological_ordering=None, stream_ordering=708760), _EventDictReturn(event_id='$15600947491007978QDPLH:matrix.org', topological_ordering=None, stream_ordering=708759), _EventDictReturn(event_id='$15600947131007663SiNSd:matrix.org', topological_ordering=None, stream_ordering=708758), _EventDictReturn(event_id='$15600946391007033aolro:matrix.org', topological_ordering=None, stream_ordering=708755), _EventDictReturn(event_id='$15600946091006766ZzkSV:matrix.org', topological_ordering=None, stream_ordering=708754), _EventDictReturn(event_id='$15600945021005812lLBZU:matrix.org', topological_ordering=None, stream_ordering=708752), _EventDictReturn(event_id='$15600945001005804zAlqr:matrix.org', topological_ordering=None, stream_ordering=708751), _EventDictReturn(event_id='$15600945001005796CUSFf:matrix.org', topological_ordering=None, stream_ordering=708750)] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/stream.py", line 414, in get_room_events_stream_for_room defer.returnValue((ret, key)) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: ([<FrozenEvent event_id='$15600945001005796CUSFf:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600945001005804zAlqr:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600945021005812lLBZU:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600946091006766ZzkSV:matrix.org', type='m.room.member', state_key='@freenode_heatsink:matrix.org'>, <FrozenEvent event_id='$15600946391007033aolro:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947131007663SiNSd:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947491007978QDPLH:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947551008018jCiVq:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948051008364bsBFK:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948311008555lylfW:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948311008562BFdsx:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948321008586DDtRZ:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948501008732joyAz:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948591008929UoHxd:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948821009108qkrIj:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948911009164JkHeG:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949091009307PONGY:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949261009398foetQ:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949401009508lTpzl:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600950171010117CdspK:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600950391010258hrwAd:matrix.org', type='m.room.message', state_key='None'>], 's708750') During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 472, in _load_filtered_recents limited=limited or newly_joined_room File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: TimelineBatch(prev_batch=StreamToken(room_key='s708889', presence_key=26599051, typing_key=57, receipt_key=636004, account_data_key=24914, push_rules_key=56, to_device_key=95, device_list_key=55442, groups_key=1), events=[], limited=True) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler yield h(self, request) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/http/server.py", line 316, in _async_render callback_return = yield callback(request, **kwargs) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/rest/client/v2_alpha/sync.py", line 167, in on_GET full_state=full_state File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 242, in wait_for_sync_for_user sync_config, since_token, timeout, full_state, File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 264, in _wait_for_sync_for_user sync_config, since_token, full_state=full_state, File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 935, in generate_sync_result sync_result_builder, account_data_by_room File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 1316, in _generate_sync_entry_for_rooms yield concurrently_execute(handle_room_entries, room_entries, 10) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/util/async_helpers.py", line 149, in _concurrently_execute_inner yield func(next(it)) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 1752, in _generate_room_entry full_state=full_state File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 775, in compute_state_delta batch.events[0].event_id, state_filter=state_filter, IndexError: list index out of range 2019-06-09 19:03:46,556 - synapse.access.http.8083 - 302 - INFO - GET-835 - 127.0.0.1 - 8083 - {@uelen:riot.firechicken.net} Processed request: 0.457sec/0.001sec (0.185sec, 0.004sec) (0.186sec/0.941sec/32) 55B 500 "GET /_matrix/client/r0/sync?filter=0&timeout=0&since=s707461_26561430_2874_634533_24909_56_95_55442_1 HTTP/1.0" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36" [1 dbevts]
IndexError
def _generate_room_entry( self, sync_result_builder, ignored_users, room_builder, ephemeral, tags, account_data, always_include=False, ): """Populates the `joined` and `archived` section of `sync_result_builder` based on the `room_builder`. Args: sync_result_builder(SyncResultBuilder) ignored_users(set(str)): Set of users ignored by user. room_builder(RoomSyncResultBuilder) ephemeral(list): List of new ephemeral events for room tags(list): List of *all* tags for room, or None if there has been no change. account_data(list): List of new account data for room always_include(bool): Always include this room in the sync response, even if empty. """ newly_joined = room_builder.newly_joined full_state = ( room_builder.full_state or newly_joined or sync_result_builder.full_state ) events = room_builder.events # We want to shortcut out as early as possible. if not (always_include or account_data or ephemeral or full_state): if events == [] and tags is None: return now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config room_id = room_builder.room_id since_token = room_builder.since_token upto_token = room_builder.upto_token batch = yield self._load_filtered_recents( room_id, sync_config, now_token=upto_token, since_token=since_token, recents=events, newly_joined_room=newly_joined, ) if not batch and batch.limited: # This resulted in #5407, which is weird, so lets log! We do it # here as we have the maximum amount of information. user_id = sync_result_builder.sync_config.user.to_string() logger.info( "Issue #5407: Found limited batch with no events. user %s, room %s," " sync_config %s, newly_joined %s, events %s, batch %s.", user_id, room_id, sync_config, newly_joined, events, batch, ) if newly_joined: # debug for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger.debug( "Timeline events after filtering in newly-joined room %s: %r", room_id, batch, ) # When we join the room (or the client requests full_state), we should # send down any existing tags. Usually the user won't have tags in a # newly joined room, unless either a) they've joined before or b) the # tag was added by synapse e.g. for server notice rooms. if full_state: user_id = sync_result_builder.sync_config.user.to_string() tags = yield self.store.get_tags_for_room(user_id, room_id) # If there aren't any tags, don't send the empty tags list down # sync if not tags: tags = None account_data_events = [] if tags is not None: account_data_events.append({"type": "m.tag", "content": {"tags": tags}}) for account_data_type, content in account_data.items(): account_data_events.append({"type": account_data_type, "content": content}) account_data_events = sync_config.filter_collection.filter_room_account_data( account_data_events ) ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral) if not (always_include or batch or account_data_events or ephemeral or full_state): return state = yield self.compute_state_delta( room_id, batch, sync_config, since_token, now_token, full_state=full_state ) summary = {} # we include a summary in room responses when we're lazy loading # members (as the client otherwise doesn't have enough info to form # the name itself). if sync_config.filter_collection.lazy_load_members() and ( # we recalulate the summary: # if there are membership changes in the timeline, or # if membership has changed during a gappy sync, or # if this is an initial sync. any(ev.type == EventTypes.Member for ev in batch.events) or ( # XXX: this may include false positives in the form of LL # members which have snuck into state batch.limited and any(t == EventTypes.Member for (t, k) in state) ) or since_token is None ): summary = yield self.compute_summary( room_id, sync_config, batch, state, now_token ) if room_builder.rtype == "joined": unread_notifications = {} room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, state=state, ephemeral=ephemeral, account_data=account_data_events, unread_notifications=unread_notifications, summary=summary, ) if room_sync or always_include: notifs = yield self.unread_notifs_for_room_id(room_id, sync_config) if notifs is not None: unread_notifications["notification_count"] = notifs["notify_count"] unread_notifications["highlight_count"] = notifs["highlight_count"] sync_result_builder.joined.append(room_sync) if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.info( "Incremental gappy sync of %s for user %s with %d state events" % (room_id, user_id, len(state)) ) elif room_builder.rtype == "archived": room_sync = ArchivedSyncResult( room_id=room_id, timeline=batch, state=state, account_data=account_data_events, ) if room_sync or always_include: sync_result_builder.archived.append(room_sync) else: raise Exception("Unrecognized rtype: %r", room_builder.rtype)
def _generate_room_entry( self, sync_result_builder, ignored_users, room_builder, ephemeral, tags, account_data, always_include=False, ): """Populates the `joined` and `archived` section of `sync_result_builder` based on the `room_builder`. Args: sync_result_builder(SyncResultBuilder) ignored_users(set(str)): Set of users ignored by user. room_builder(RoomSyncResultBuilder) ephemeral(list): List of new ephemeral events for room tags(list): List of *all* tags for room, or None if there has been no change. account_data(list): List of new account data for room always_include(bool): Always include this room in the sync response, even if empty. """ newly_joined = room_builder.newly_joined full_state = ( room_builder.full_state or newly_joined or sync_result_builder.full_state ) events = room_builder.events # We want to shortcut out as early as possible. if not (always_include or account_data or ephemeral or full_state): if events == [] and tags is None: return now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config room_id = room_builder.room_id since_token = room_builder.since_token upto_token = room_builder.upto_token batch = yield self._load_filtered_recents( room_id, sync_config, now_token=upto_token, since_token=since_token, recents=events, newly_joined_room=newly_joined, ) if newly_joined: # debug for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger.debug( "Timeline events after filtering in newly-joined room %s: %r", room_id, batch, ) # When we join the room (or the client requests full_state), we should # send down any existing tags. Usually the user won't have tags in a # newly joined room, unless either a) they've joined before or b) the # tag was added by synapse e.g. for server notice rooms. if full_state: user_id = sync_result_builder.sync_config.user.to_string() tags = yield self.store.get_tags_for_room(user_id, room_id) # If there aren't any tags, don't send the empty tags list down # sync if not tags: tags = None account_data_events = [] if tags is not None: account_data_events.append({"type": "m.tag", "content": {"tags": tags}}) for account_data_type, content in account_data.items(): account_data_events.append({"type": account_data_type, "content": content}) account_data_events = sync_config.filter_collection.filter_room_account_data( account_data_events ) ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral) if not (always_include or batch or account_data_events or ephemeral or full_state): return state = yield self.compute_state_delta( room_id, batch, sync_config, since_token, now_token, full_state=full_state ) summary = {} # we include a summary in room responses when we're lazy loading # members (as the client otherwise doesn't have enough info to form # the name itself). if sync_config.filter_collection.lazy_load_members() and ( # we recalulate the summary: # if there are membership changes in the timeline, or # if membership has changed during a gappy sync, or # if this is an initial sync. any(ev.type == EventTypes.Member for ev in batch.events) or ( # XXX: this may include false positives in the form of LL # members which have snuck into state batch.limited and any(t == EventTypes.Member for (t, k) in state) ) or since_token is None ): summary = yield self.compute_summary( room_id, sync_config, batch, state, now_token ) if room_builder.rtype == "joined": unread_notifications = {} room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, state=state, ephemeral=ephemeral, account_data=account_data_events, unread_notifications=unread_notifications, summary=summary, ) if room_sync or always_include: notifs = yield self.unread_notifs_for_room_id(room_id, sync_config) if notifs is not None: unread_notifications["notification_count"] = notifs["notify_count"] unread_notifications["highlight_count"] = notifs["highlight_count"] sync_result_builder.joined.append(room_sync) if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.info( "Incremental gappy sync of %s for user %s with %d state events" % (room_id, user_id, len(state)) ) elif room_builder.rtype == "archived": room_sync = ArchivedSyncResult( room_id=room_id, timeline=batch, state=state, account_data=account_data_events, ) if room_sync or always_include: sync_result_builder.archived.append(room_sync) else: raise Exception("Unrecognized rtype: %r", room_builder.rtype)
https://github.com/matrix-org/synapse/issues/5407
2019-06-09 19:03:46,101 - synapse.handlers.sync - 909 - INFO - GET-835 - Calculating sync response for @uelen:riot.firechicken.net between StreamToken(room_key='s707461', presence_key='26561430', typing_key='2874', receipt_key='634533', account_data_key='24909', push_rules_key='56', to_device_key='95', device_list_key='55442', groups_key='1') and StreamToken(room_key='s709258', presence_key=26599051, typing_key=57, receipt_key=636004, account_data_key=24914, push_rules_key=56, to_device_key=95, device_list_key=55442, groups_key=1) 2019-06-09 19:03:46,150 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,437 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,446 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !IFlHsKQHpcFgnpwgFn:matrix.org for user @uelen:riot.firechicken.net with 0 state events 2019-06-09 19:03:46,490 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !MbRaSiMIRhhxDtJENL:maunium.net for user @uelen:riot.firechicken.net with 2 state events 2019-06-09 19:03:46,493 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,500 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !iNmaIQExDMeqdITdHH:matrix.org for user @uelen:riot.firechicken.net with 2 state events 2019-06-09 19:03:46,505 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !tvlbVxTSVUgJbmelND:matrix.org for user @uelen:riot.firechicken.net with 3 state events 2019-06-09 19:03:46,553 - synapse.http.server - 112 - ERROR - GET-835 - Failed handle request via 'SyncRestServlet': <SynapseRequest at 0x7f955618ce10 method='GET' uri='/_matrix/client/r0/sync?filter=0&timeout=0&since=s707461_26561430_2874_634533_24909_56_95_55442_1' clientproto='HTTP/1.0' site=8083> Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/_base.py", line 527, in runWithConnection defer.returnValue(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [_EventDictReturn(event_id='$15600950391010258hrwAd:matrix.org', topological_ordering=None, stream_ordering=708773), _EventDictReturn(event_id='$15600950171010117CdspK:matrix.org', topological_ordering=None, stream_ordering=708772), _EventDictReturn(event_id='$15600949401009508lTpzl:matrix.org', topological_ordering=None, stream_ordering=708771), _EventDictReturn(event_id='$15600949261009398foetQ:matrix.org', topological_ordering=None, stream_ordering=708770), _EventDictReturn(event_id='$15600949091009307PONGY:matrix.org', topological_ordering=None, stream_ordering=708769), _EventDictReturn(event_id='$15600948911009164JkHeG:matrix.org', topological_ordering=None, stream_ordering=708768), _EventDictReturn(event_id='$15600948821009108qkrIj:matrix.org', topological_ordering=None, stream_ordering=708767), _EventDictReturn(event_id='$15600948591008929UoHxd:matrix.org', topological_ordering=None, stream_ordering=708766), _EventDictReturn(event_id='$15600948501008732joyAz:matrix.org', topological_ordering=None, stream_ordering=708765), _EventDictReturn(event_id='$15600948321008586DDtRZ:matrix.org', topological_ordering=None, stream_ordering=708764), _EventDictReturn(event_id='$15600948311008562BFdsx:matrix.org', topological_ordering=None, stream_ordering=708763), _EventDictReturn(event_id='$15600948311008555lylfW:matrix.org', topological_ordering=None, stream_ordering=708762), _EventDictReturn(event_id='$15600948051008364bsBFK:matrix.org', topological_ordering=None, stream_ordering=708761), _EventDictReturn(event_id='$15600947551008018jCiVq:matrix.org', topological_ordering=None, stream_ordering=708760), _EventDictReturn(event_id='$15600947491007978QDPLH:matrix.org', topological_ordering=None, stream_ordering=708759), _EventDictReturn(event_id='$15600947131007663SiNSd:matrix.org', topological_ordering=None, stream_ordering=708758), _EventDictReturn(event_id='$15600946391007033aolro:matrix.org', topological_ordering=None, stream_ordering=708755), _EventDictReturn(event_id='$15600946091006766ZzkSV:matrix.org', topological_ordering=None, stream_ordering=708754), _EventDictReturn(event_id='$15600945021005812lLBZU:matrix.org', topological_ordering=None, stream_ordering=708752), _EventDictReturn(event_id='$15600945001005804zAlqr:matrix.org', topological_ordering=None, stream_ordering=708751), _EventDictReturn(event_id='$15600945001005796CUSFf:matrix.org', topological_ordering=None, stream_ordering=708750)] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/_base.py", line 487, in runInteraction defer.returnValue(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [_EventDictReturn(event_id='$15600950391010258hrwAd:matrix.org', topological_ordering=None, stream_ordering=708773), _EventDictReturn(event_id='$15600950171010117CdspK:matrix.org', topological_ordering=None, stream_ordering=708772), _EventDictReturn(event_id='$15600949401009508lTpzl:matrix.org', topological_ordering=None, stream_ordering=708771), _EventDictReturn(event_id='$15600949261009398foetQ:matrix.org', topological_ordering=None, stream_ordering=708770), _EventDictReturn(event_id='$15600949091009307PONGY:matrix.org', topological_ordering=None, stream_ordering=708769), _EventDictReturn(event_id='$15600948911009164JkHeG:matrix.org', topological_ordering=None, stream_ordering=708768), _EventDictReturn(event_id='$15600948821009108qkrIj:matrix.org', topological_ordering=None, stream_ordering=708767), _EventDictReturn(event_id='$15600948591008929UoHxd:matrix.org', topological_ordering=None, stream_ordering=708766), _EventDictReturn(event_id='$15600948501008732joyAz:matrix.org', topological_ordering=None, stream_ordering=708765), _EventDictReturn(event_id='$15600948321008586DDtRZ:matrix.org', topological_ordering=None, stream_ordering=708764), _EventDictReturn(event_id='$15600948311008562BFdsx:matrix.org', topological_ordering=None, stream_ordering=708763), _EventDictReturn(event_id='$15600948311008555lylfW:matrix.org', topological_ordering=None, stream_ordering=708762), _EventDictReturn(event_id='$15600948051008364bsBFK:matrix.org', topological_ordering=None, stream_ordering=708761), _EventDictReturn(event_id='$15600947551008018jCiVq:matrix.org', topological_ordering=None, stream_ordering=708760), _EventDictReturn(event_id='$15600947491007978QDPLH:matrix.org', topological_ordering=None, stream_ordering=708759), _EventDictReturn(event_id='$15600947131007663SiNSd:matrix.org', topological_ordering=None, stream_ordering=708758), _EventDictReturn(event_id='$15600946391007033aolro:matrix.org', topological_ordering=None, stream_ordering=708755), _EventDictReturn(event_id='$15600946091006766ZzkSV:matrix.org', topological_ordering=None, stream_ordering=708754), _EventDictReturn(event_id='$15600945021005812lLBZU:matrix.org', topological_ordering=None, stream_ordering=708752), _EventDictReturn(event_id='$15600945001005804zAlqr:matrix.org', topological_ordering=None, stream_ordering=708751), _EventDictReturn(event_id='$15600945001005796CUSFf:matrix.org', topological_ordering=None, stream_ordering=708750)] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/stream.py", line 414, in get_room_events_stream_for_room defer.returnValue((ret, key)) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: ([<FrozenEvent event_id='$15600945001005796CUSFf:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600945001005804zAlqr:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600945021005812lLBZU:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600946091006766ZzkSV:matrix.org', type='m.room.member', state_key='@freenode_heatsink:matrix.org'>, <FrozenEvent event_id='$15600946391007033aolro:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947131007663SiNSd:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947491007978QDPLH:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947551008018jCiVq:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948051008364bsBFK:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948311008555lylfW:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948311008562BFdsx:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948321008586DDtRZ:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948501008732joyAz:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948591008929UoHxd:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948821009108qkrIj:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948911009164JkHeG:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949091009307PONGY:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949261009398foetQ:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949401009508lTpzl:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600950171010117CdspK:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600950391010258hrwAd:matrix.org', type='m.room.message', state_key='None'>], 's708750') During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 472, in _load_filtered_recents limited=limited or newly_joined_room File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: TimelineBatch(prev_batch=StreamToken(room_key='s708889', presence_key=26599051, typing_key=57, receipt_key=636004, account_data_key=24914, push_rules_key=56, to_device_key=95, device_list_key=55442, groups_key=1), events=[], limited=True) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler yield h(self, request) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/http/server.py", line 316, in _async_render callback_return = yield callback(request, **kwargs) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/rest/client/v2_alpha/sync.py", line 167, in on_GET full_state=full_state File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 242, in wait_for_sync_for_user sync_config, since_token, timeout, full_state, File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 264, in _wait_for_sync_for_user sync_config, since_token, full_state=full_state, File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 935, in generate_sync_result sync_result_builder, account_data_by_room File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 1316, in _generate_sync_entry_for_rooms yield concurrently_execute(handle_room_entries, room_entries, 10) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/util/async_helpers.py", line 149, in _concurrently_execute_inner yield func(next(it)) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 1752, in _generate_room_entry full_state=full_state File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 775, in compute_state_delta batch.events[0].event_id, state_filter=state_filter, IndexError: list index out of range 2019-06-09 19:03:46,556 - synapse.access.http.8083 - 302 - INFO - GET-835 - 127.0.0.1 - 8083 - {@uelen:riot.firechicken.net} Processed request: 0.457sec/0.001sec (0.185sec, 0.004sec) (0.186sec/0.941sec/32) 55B 500 "GET /_matrix/client/r0/sync?filter=0&timeout=0&since=s707461_26561430_2874_634533_24909_56_95_55442_1 HTTP/1.0" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36" [1 dbevts]
IndexError
def compute_state_delta( self, room_id, batch, sync_config, since_token, now_token, full_state ): """Works out the difference in state between the start of the timeline and the previous sync. Args: room_id(str): batch(synapse.handlers.sync.TimelineBatch): The timeline batch for the room that will be sent to the user. sync_config(synapse.handlers.sync.SyncConfig): since_token(str|None): Token of the end of the previous batch. May be None. now_token(str): Token of the end of the current batch. full_state(bool): Whether to force returning the full state. Returns: A deferred dict of (type, state_key) -> Event """ # TODO(mjark) Check if the state events were received by the server # after the previous sync, since we need to include those state # updates even if they occured logically before the previous event. # TODO(mjark) Check for new redactions in the state events. with Measure(self.clock, "compute_state_delta"): members_to_fetch = None lazy_load_members = sync_config.filter_collection.lazy_load_members() include_redundant_members = ( sync_config.filter_collection.include_redundant_members() ) if lazy_load_members: # We only request state for the members needed to display the # timeline: members_to_fetch = set( event.sender # FIXME: we also care about invite targets etc. for event in batch.events ) if full_state: # always make sure we LL ourselves so we know we're in the room # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 # We only need apply this on full state syncs given we disabled # LL for incr syncs in #3840. members_to_fetch.add(sync_config.user.to_string()) state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch) else: state_filter = StateFilter.all() timeline_state = { (event.type, event.state_key): event.event_id for event in batch.events if event.is_state() } if full_state: if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) else: current_state_ids = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) state_ids = current_state_ids state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_ids, previous={}, current=current_state_ids, lazy_load_members=lazy_load_members, ) elif batch.limited: if batch: state_at_timeline_start = yield self.store.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) else: # We can get here if the user has ignored the senders of all # the recent events. state_at_timeline_start = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) # for now, we disable LL for gappy syncs - see # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 # N.B. this slows down incr syncs as we are now processing way # more state in the server than if we were LLing. # # We still have to filter timeline_start to LL entries (above) in order # for _calculate_state's LL logic to work, as we have to include LL # members for timeline senders in case they weren't loaded in the initial # sync. We do this by (counterintuitively) by filtering timeline_start # members to just be ones which were timeline senders, which then ensures # all of the rest get included in the state block (if we need to know # about them). state_filter = StateFilter.all() state_at_previous_sync = yield self.get_state_at( room_id, stream_position=since_token, state_filter=state_filter ) if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) else: # Its not clear how we get here, but empirically we do # (#5407). Logging has been added elsewhere to try and # figure out where this state comes from. current_state_ids = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_at_timeline_start, previous=state_at_previous_sync, current=current_state_ids, # we have to include LL members in case LL initial sync missed them lazy_load_members=lazy_load_members, ) else: state_ids = {} if lazy_load_members: if members_to_fetch and batch.events: # We're returning an incremental sync, with no # "gap" since the previous sync, so normally there would be # no state to return. # But we're lazy-loading, so the client might need some more # member events to understand the events in this timeline. # So we fish out all the member events corresponding to the # timeline here, and then dedupe any redundant ones below. state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, # we only want members! state_filter=StateFilter.from_types( (EventTypes.Member, member) for member in members_to_fetch ), ) if lazy_load_members and not include_redundant_members: cache_key = (sync_config.user.to_string(), sync_config.device_id) cache = self.get_lazy_loaded_members_cache(cache_key) # if it's a new sync sequence, then assume the client has had # amnesia and doesn't want any recent lazy-loaded members # de-duplicated. if since_token is None: logger.debug("clearing LruCache for %r", cache_key) cache.clear() else: # only send members which aren't in our LruCache (either # because they're new to this client or have been pushed out # of the cache) logger.debug("filtering state from %r...", state_ids) state_ids = { t: event_id for t, event_id in iteritems(state_ids) if cache.get(t[1]) != event_id } logger.debug("...to %r", state_ids) # add any member IDs we are about to send into our LruCache for t, event_id in itertools.chain( state_ids.items(), timeline_state.items() ): if t[0] == EventTypes.Member: cache.set(t[1], event_id) state = {} if state_ids: state = yield self.store.get_events(list(state_ids.values())) return { (e.type, e.state_key): e for e in sync_config.filter_collection.filter_room_state(list(state.values())) }
def compute_state_delta( self, room_id, batch, sync_config, since_token, now_token, full_state ): """Works out the difference in state between the start of the timeline and the previous sync. Args: room_id(str): batch(synapse.handlers.sync.TimelineBatch): The timeline batch for the room that will be sent to the user. sync_config(synapse.handlers.sync.SyncConfig): since_token(str|None): Token of the end of the previous batch. May be None. now_token(str): Token of the end of the current batch. full_state(bool): Whether to force returning the full state. Returns: A deferred dict of (type, state_key) -> Event """ # TODO(mjark) Check if the state events were received by the server # after the previous sync, since we need to include those state # updates even if they occured logically before the previous event. # TODO(mjark) Check for new redactions in the state events. with Measure(self.clock, "compute_state_delta"): members_to_fetch = None lazy_load_members = sync_config.filter_collection.lazy_load_members() include_redundant_members = ( sync_config.filter_collection.include_redundant_members() ) if lazy_load_members: # We only request state for the members needed to display the # timeline: members_to_fetch = set( event.sender # FIXME: we also care about invite targets etc. for event in batch.events ) if full_state: # always make sure we LL ourselves so we know we're in the room # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 # We only need apply this on full state syncs given we disabled # LL for incr syncs in #3840. members_to_fetch.add(sync_config.user.to_string()) state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch) else: state_filter = StateFilter.all() timeline_state = { (event.type, event.state_key): event.event_id for event in batch.events if event.is_state() } if full_state: if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) else: current_state_ids = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) state_ids = current_state_ids state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_ids, previous={}, current=current_state_ids, lazy_load_members=lazy_load_members, ) elif batch.limited: if batch: state_at_timeline_start = yield self.store.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) else: # Its not clear how we get here, but empirically we do # (#5407). Logging has been added elsewhere to try and # figure out where this state comes from. state_at_timeline_start = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) # for now, we disable LL for gappy syncs - see # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 # N.B. this slows down incr syncs as we are now processing way # more state in the server than if we were LLing. # # We still have to filter timeline_start to LL entries (above) in order # for _calculate_state's LL logic to work, as we have to include LL # members for timeline senders in case they weren't loaded in the initial # sync. We do this by (counterintuitively) by filtering timeline_start # members to just be ones which were timeline senders, which then ensures # all of the rest get included in the state block (if we need to know # about them). state_filter = StateFilter.all() state_at_previous_sync = yield self.get_state_at( room_id, stream_position=since_token, state_filter=state_filter ) if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) else: # Its not clear how we get here, but empirically we do # (#5407). Logging has been added elsewhere to try and # figure out where this state comes from. current_state_ids = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_at_timeline_start, previous=state_at_previous_sync, current=current_state_ids, # we have to include LL members in case LL initial sync missed them lazy_load_members=lazy_load_members, ) else: state_ids = {} if lazy_load_members: if members_to_fetch and batch.events: # We're returning an incremental sync, with no # "gap" since the previous sync, so normally there would be # no state to return. # But we're lazy-loading, so the client might need some more # member events to understand the events in this timeline. # So we fish out all the member events corresponding to the # timeline here, and then dedupe any redundant ones below. state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, # we only want members! state_filter=StateFilter.from_types( (EventTypes.Member, member) for member in members_to_fetch ), ) if lazy_load_members and not include_redundant_members: cache_key = (sync_config.user.to_string(), sync_config.device_id) cache = self.get_lazy_loaded_members_cache(cache_key) # if it's a new sync sequence, then assume the client has had # amnesia and doesn't want any recent lazy-loaded members # de-duplicated. if since_token is None: logger.debug("clearing LruCache for %r", cache_key) cache.clear() else: # only send members which aren't in our LruCache (either # because they're new to this client or have been pushed out # of the cache) logger.debug("filtering state from %r...", state_ids) state_ids = { t: event_id for t, event_id in iteritems(state_ids) if cache.get(t[1]) != event_id } logger.debug("...to %r", state_ids) # add any member IDs we are about to send into our LruCache for t, event_id in itertools.chain( state_ids.items(), timeline_state.items() ): if t[0] == EventTypes.Member: cache.set(t[1], event_id) state = {} if state_ids: state = yield self.store.get_events(list(state_ids.values())) return { (e.type, e.state_key): e for e in sync_config.filter_collection.filter_room_state(list(state.values())) }
https://github.com/matrix-org/synapse/issues/5407
2019-06-09 19:03:46,101 - synapse.handlers.sync - 909 - INFO - GET-835 - Calculating sync response for @uelen:riot.firechicken.net between StreamToken(room_key='s707461', presence_key='26561430', typing_key='2874', receipt_key='634533', account_data_key='24909', push_rules_key='56', to_device_key='95', device_list_key='55442', groups_key='1') and StreamToken(room_key='s709258', presence_key=26599051, typing_key=57, receipt_key=636004, account_data_key=24914, push_rules_key=56, to_device_key=95, device_list_key=55442, groups_key=1) 2019-06-09 19:03:46,150 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,437 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,446 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !IFlHsKQHpcFgnpwgFn:matrix.org for user @uelen:riot.firechicken.net with 0 state events 2019-06-09 19:03:46,490 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !MbRaSiMIRhhxDtJENL:maunium.net for user @uelen:riot.firechicken.net with 2 state events 2019-06-09 19:03:46,493 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,500 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !iNmaIQExDMeqdITdHH:matrix.org for user @uelen:riot.firechicken.net with 2 state events 2019-06-09 19:03:46,505 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !tvlbVxTSVUgJbmelND:matrix.org for user @uelen:riot.firechicken.net with 3 state events 2019-06-09 19:03:46,553 - synapse.http.server - 112 - ERROR - GET-835 - Failed handle request via 'SyncRestServlet': <SynapseRequest at 0x7f955618ce10 method='GET' uri='/_matrix/client/r0/sync?filter=0&timeout=0&since=s707461_26561430_2874_634533_24909_56_95_55442_1' clientproto='HTTP/1.0' site=8083> Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/_base.py", line 527, in runWithConnection defer.returnValue(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [_EventDictReturn(event_id='$15600950391010258hrwAd:matrix.org', topological_ordering=None, stream_ordering=708773), _EventDictReturn(event_id='$15600950171010117CdspK:matrix.org', topological_ordering=None, stream_ordering=708772), _EventDictReturn(event_id='$15600949401009508lTpzl:matrix.org', topological_ordering=None, stream_ordering=708771), _EventDictReturn(event_id='$15600949261009398foetQ:matrix.org', topological_ordering=None, stream_ordering=708770), _EventDictReturn(event_id='$15600949091009307PONGY:matrix.org', topological_ordering=None, stream_ordering=708769), _EventDictReturn(event_id='$15600948911009164JkHeG:matrix.org', topological_ordering=None, stream_ordering=708768), _EventDictReturn(event_id='$15600948821009108qkrIj:matrix.org', topological_ordering=None, stream_ordering=708767), _EventDictReturn(event_id='$15600948591008929UoHxd:matrix.org', topological_ordering=None, stream_ordering=708766), _EventDictReturn(event_id='$15600948501008732joyAz:matrix.org', topological_ordering=None, stream_ordering=708765), _EventDictReturn(event_id='$15600948321008586DDtRZ:matrix.org', topological_ordering=None, stream_ordering=708764), _EventDictReturn(event_id='$15600948311008562BFdsx:matrix.org', topological_ordering=None, stream_ordering=708763), _EventDictReturn(event_id='$15600948311008555lylfW:matrix.org', topological_ordering=None, stream_ordering=708762), _EventDictReturn(event_id='$15600948051008364bsBFK:matrix.org', topological_ordering=None, stream_ordering=708761), _EventDictReturn(event_id='$15600947551008018jCiVq:matrix.org', topological_ordering=None, stream_ordering=708760), _EventDictReturn(event_id='$15600947491007978QDPLH:matrix.org', topological_ordering=None, stream_ordering=708759), _EventDictReturn(event_id='$15600947131007663SiNSd:matrix.org', topological_ordering=None, stream_ordering=708758), _EventDictReturn(event_id='$15600946391007033aolro:matrix.org', topological_ordering=None, stream_ordering=708755), _EventDictReturn(event_id='$15600946091006766ZzkSV:matrix.org', topological_ordering=None, stream_ordering=708754), _EventDictReturn(event_id='$15600945021005812lLBZU:matrix.org', topological_ordering=None, stream_ordering=708752), _EventDictReturn(event_id='$15600945001005804zAlqr:matrix.org', topological_ordering=None, stream_ordering=708751), _EventDictReturn(event_id='$15600945001005796CUSFf:matrix.org', topological_ordering=None, stream_ordering=708750)] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/_base.py", line 487, in runInteraction defer.returnValue(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [_EventDictReturn(event_id='$15600950391010258hrwAd:matrix.org', topological_ordering=None, stream_ordering=708773), _EventDictReturn(event_id='$15600950171010117CdspK:matrix.org', topological_ordering=None, stream_ordering=708772), _EventDictReturn(event_id='$15600949401009508lTpzl:matrix.org', topological_ordering=None, stream_ordering=708771), _EventDictReturn(event_id='$15600949261009398foetQ:matrix.org', topological_ordering=None, stream_ordering=708770), _EventDictReturn(event_id='$15600949091009307PONGY:matrix.org', topological_ordering=None, stream_ordering=708769), _EventDictReturn(event_id='$15600948911009164JkHeG:matrix.org', topological_ordering=None, stream_ordering=708768), _EventDictReturn(event_id='$15600948821009108qkrIj:matrix.org', topological_ordering=None, stream_ordering=708767), _EventDictReturn(event_id='$15600948591008929UoHxd:matrix.org', topological_ordering=None, stream_ordering=708766), _EventDictReturn(event_id='$15600948501008732joyAz:matrix.org', topological_ordering=None, stream_ordering=708765), _EventDictReturn(event_id='$15600948321008586DDtRZ:matrix.org', topological_ordering=None, stream_ordering=708764), _EventDictReturn(event_id='$15600948311008562BFdsx:matrix.org', topological_ordering=None, stream_ordering=708763), _EventDictReturn(event_id='$15600948311008555lylfW:matrix.org', topological_ordering=None, stream_ordering=708762), _EventDictReturn(event_id='$15600948051008364bsBFK:matrix.org', topological_ordering=None, stream_ordering=708761), _EventDictReturn(event_id='$15600947551008018jCiVq:matrix.org', topological_ordering=None, stream_ordering=708760), _EventDictReturn(event_id='$15600947491007978QDPLH:matrix.org', topological_ordering=None, stream_ordering=708759), _EventDictReturn(event_id='$15600947131007663SiNSd:matrix.org', topological_ordering=None, stream_ordering=708758), _EventDictReturn(event_id='$15600946391007033aolro:matrix.org', topological_ordering=None, stream_ordering=708755), _EventDictReturn(event_id='$15600946091006766ZzkSV:matrix.org', topological_ordering=None, stream_ordering=708754), _EventDictReturn(event_id='$15600945021005812lLBZU:matrix.org', topological_ordering=None, stream_ordering=708752), _EventDictReturn(event_id='$15600945001005804zAlqr:matrix.org', topological_ordering=None, stream_ordering=708751), _EventDictReturn(event_id='$15600945001005796CUSFf:matrix.org', topological_ordering=None, stream_ordering=708750)] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/stream.py", line 414, in get_room_events_stream_for_room defer.returnValue((ret, key)) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: ([<FrozenEvent event_id='$15600945001005796CUSFf:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600945001005804zAlqr:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600945021005812lLBZU:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600946091006766ZzkSV:matrix.org', type='m.room.member', state_key='@freenode_heatsink:matrix.org'>, <FrozenEvent event_id='$15600946391007033aolro:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947131007663SiNSd:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947491007978QDPLH:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947551008018jCiVq:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948051008364bsBFK:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948311008555lylfW:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948311008562BFdsx:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948321008586DDtRZ:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948501008732joyAz:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948591008929UoHxd:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948821009108qkrIj:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948911009164JkHeG:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949091009307PONGY:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949261009398foetQ:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949401009508lTpzl:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600950171010117CdspK:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600950391010258hrwAd:matrix.org', type='m.room.message', state_key='None'>], 's708750') During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 472, in _load_filtered_recents limited=limited or newly_joined_room File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: TimelineBatch(prev_batch=StreamToken(room_key='s708889', presence_key=26599051, typing_key=57, receipt_key=636004, account_data_key=24914, push_rules_key=56, to_device_key=95, device_list_key=55442, groups_key=1), events=[], limited=True) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler yield h(self, request) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/http/server.py", line 316, in _async_render callback_return = yield callback(request, **kwargs) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/rest/client/v2_alpha/sync.py", line 167, in on_GET full_state=full_state File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 242, in wait_for_sync_for_user sync_config, since_token, timeout, full_state, File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 264, in _wait_for_sync_for_user sync_config, since_token, full_state=full_state, File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 935, in generate_sync_result sync_result_builder, account_data_by_room File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 1316, in _generate_sync_entry_for_rooms yield concurrently_execute(handle_room_entries, room_entries, 10) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/util/async_helpers.py", line 149, in _concurrently_execute_inner yield func(next(it)) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 1752, in _generate_room_entry full_state=full_state File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 775, in compute_state_delta batch.events[0].event_id, state_filter=state_filter, IndexError: list index out of range 2019-06-09 19:03:46,556 - synapse.access.http.8083 - 302 - INFO - GET-835 - 127.0.0.1 - 8083 - {@uelen:riot.firechicken.net} Processed request: 0.457sec/0.001sec (0.185sec, 0.004sec) (0.186sec/0.941sec/32) 55B 500 "GET /_matrix/client/r0/sync?filter=0&timeout=0&since=s707461_26561430_2874_634533_24909_56_95_55442_1 HTTP/1.0" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36" [1 dbevts]
IndexError
def _generate_room_entry( self, sync_result_builder, ignored_users, room_builder, ephemeral, tags, account_data, always_include=False, ): """Populates the `joined` and `archived` section of `sync_result_builder` based on the `room_builder`. Args: sync_result_builder(SyncResultBuilder) ignored_users(set(str)): Set of users ignored by user. room_builder(RoomSyncResultBuilder) ephemeral(list): List of new ephemeral events for room tags(list): List of *all* tags for room, or None if there has been no change. account_data(list): List of new account data for room always_include(bool): Always include this room in the sync response, even if empty. """ newly_joined = room_builder.newly_joined full_state = ( room_builder.full_state or newly_joined or sync_result_builder.full_state ) events = room_builder.events # We want to shortcut out as early as possible. if not (always_include or account_data or ephemeral or full_state): if events == [] and tags is None: return now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config room_id = room_builder.room_id since_token = room_builder.since_token upto_token = room_builder.upto_token batch = yield self._load_filtered_recents( room_id, sync_config, now_token=upto_token, since_token=since_token, recents=events, newly_joined_room=newly_joined, ) # Note: `batch` can be both empty and limited here in the case where # `_load_filtered_recents` can't find any events the user should see # (e.g. due to having ignored the sender of the last 50 events). if newly_joined: # debug for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger.debug( "Timeline events after filtering in newly-joined room %s: %r", room_id, batch, ) # When we join the room (or the client requests full_state), we should # send down any existing tags. Usually the user won't have tags in a # newly joined room, unless either a) they've joined before or b) the # tag was added by synapse e.g. for server notice rooms. if full_state: user_id = sync_result_builder.sync_config.user.to_string() tags = yield self.store.get_tags_for_room(user_id, room_id) # If there aren't any tags, don't send the empty tags list down # sync if not tags: tags = None account_data_events = [] if tags is not None: account_data_events.append({"type": "m.tag", "content": {"tags": tags}}) for account_data_type, content in account_data.items(): account_data_events.append({"type": account_data_type, "content": content}) account_data_events = sync_config.filter_collection.filter_room_account_data( account_data_events ) ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral) if not (always_include or batch or account_data_events or ephemeral or full_state): return state = yield self.compute_state_delta( room_id, batch, sync_config, since_token, now_token, full_state=full_state ) summary = {} # we include a summary in room responses when we're lazy loading # members (as the client otherwise doesn't have enough info to form # the name itself). if sync_config.filter_collection.lazy_load_members() and ( # we recalulate the summary: # if there are membership changes in the timeline, or # if membership has changed during a gappy sync, or # if this is an initial sync. any(ev.type == EventTypes.Member for ev in batch.events) or ( # XXX: this may include false positives in the form of LL # members which have snuck into state batch.limited and any(t == EventTypes.Member for (t, k) in state) ) or since_token is None ): summary = yield self.compute_summary( room_id, sync_config, batch, state, now_token ) if room_builder.rtype == "joined": unread_notifications = {} room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, state=state, ephemeral=ephemeral, account_data=account_data_events, unread_notifications=unread_notifications, summary=summary, ) if room_sync or always_include: notifs = yield self.unread_notifs_for_room_id(room_id, sync_config) if notifs is not None: unread_notifications["notification_count"] = notifs["notify_count"] unread_notifications["highlight_count"] = notifs["highlight_count"] sync_result_builder.joined.append(room_sync) if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.info( "Incremental gappy sync of %s for user %s with %d state events" % (room_id, user_id, len(state)) ) elif room_builder.rtype == "archived": room_sync = ArchivedSyncResult( room_id=room_id, timeline=batch, state=state, account_data=account_data_events, ) if room_sync or always_include: sync_result_builder.archived.append(room_sync) else: raise Exception("Unrecognized rtype: %r", room_builder.rtype)
def _generate_room_entry( self, sync_result_builder, ignored_users, room_builder, ephemeral, tags, account_data, always_include=False, ): """Populates the `joined` and `archived` section of `sync_result_builder` based on the `room_builder`. Args: sync_result_builder(SyncResultBuilder) ignored_users(set(str)): Set of users ignored by user. room_builder(RoomSyncResultBuilder) ephemeral(list): List of new ephemeral events for room tags(list): List of *all* tags for room, or None if there has been no change. account_data(list): List of new account data for room always_include(bool): Always include this room in the sync response, even if empty. """ newly_joined = room_builder.newly_joined full_state = ( room_builder.full_state or newly_joined or sync_result_builder.full_state ) events = room_builder.events # We want to shortcut out as early as possible. if not (always_include or account_data or ephemeral or full_state): if events == [] and tags is None: return now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config room_id = room_builder.room_id since_token = room_builder.since_token upto_token = room_builder.upto_token batch = yield self._load_filtered_recents( room_id, sync_config, now_token=upto_token, since_token=since_token, recents=events, newly_joined_room=newly_joined, ) if not batch and batch.limited: # This resulted in #5407, which is weird, so lets log! We do it # here as we have the maximum amount of information. user_id = sync_result_builder.sync_config.user.to_string() logger.info( "Issue #5407: Found limited batch with no events. user %s, room %s," " sync_config %s, newly_joined %s, events %s, batch %s.", user_id, room_id, sync_config, newly_joined, events, batch, ) if newly_joined: # debug for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger.debug( "Timeline events after filtering in newly-joined room %s: %r", room_id, batch, ) # When we join the room (or the client requests full_state), we should # send down any existing tags. Usually the user won't have tags in a # newly joined room, unless either a) they've joined before or b) the # tag was added by synapse e.g. for server notice rooms. if full_state: user_id = sync_result_builder.sync_config.user.to_string() tags = yield self.store.get_tags_for_room(user_id, room_id) # If there aren't any tags, don't send the empty tags list down # sync if not tags: tags = None account_data_events = [] if tags is not None: account_data_events.append({"type": "m.tag", "content": {"tags": tags}}) for account_data_type, content in account_data.items(): account_data_events.append({"type": account_data_type, "content": content}) account_data_events = sync_config.filter_collection.filter_room_account_data( account_data_events ) ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral) if not (always_include or batch or account_data_events or ephemeral or full_state): return state = yield self.compute_state_delta( room_id, batch, sync_config, since_token, now_token, full_state=full_state ) summary = {} # we include a summary in room responses when we're lazy loading # members (as the client otherwise doesn't have enough info to form # the name itself). if sync_config.filter_collection.lazy_load_members() and ( # we recalulate the summary: # if there are membership changes in the timeline, or # if membership has changed during a gappy sync, or # if this is an initial sync. any(ev.type == EventTypes.Member for ev in batch.events) or ( # XXX: this may include false positives in the form of LL # members which have snuck into state batch.limited and any(t == EventTypes.Member for (t, k) in state) ) or since_token is None ): summary = yield self.compute_summary( room_id, sync_config, batch, state, now_token ) if room_builder.rtype == "joined": unread_notifications = {} room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, state=state, ephemeral=ephemeral, account_data=account_data_events, unread_notifications=unread_notifications, summary=summary, ) if room_sync or always_include: notifs = yield self.unread_notifs_for_room_id(room_id, sync_config) if notifs is not None: unread_notifications["notification_count"] = notifs["notify_count"] unread_notifications["highlight_count"] = notifs["highlight_count"] sync_result_builder.joined.append(room_sync) if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.info( "Incremental gappy sync of %s for user %s with %d state events" % (room_id, user_id, len(state)) ) elif room_builder.rtype == "archived": room_sync = ArchivedSyncResult( room_id=room_id, timeline=batch, state=state, account_data=account_data_events, ) if room_sync or always_include: sync_result_builder.archived.append(room_sync) else: raise Exception("Unrecognized rtype: %r", room_builder.rtype)
https://github.com/matrix-org/synapse/issues/5407
2019-06-09 19:03:46,101 - synapse.handlers.sync - 909 - INFO - GET-835 - Calculating sync response for @uelen:riot.firechicken.net between StreamToken(room_key='s707461', presence_key='26561430', typing_key='2874', receipt_key='634533', account_data_key='24909', push_rules_key='56', to_device_key='95', device_list_key='55442', groups_key='1') and StreamToken(room_key='s709258', presence_key=26599051, typing_key=57, receipt_key=636004, account_data_key=24914, push_rules_key=56, to_device_key=95, device_list_key=55442, groups_key=1) 2019-06-09 19:03:46,150 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,437 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,446 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !IFlHsKQHpcFgnpwgFn:matrix.org for user @uelen:riot.firechicken.net with 0 state events 2019-06-09 19:03:46,490 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !MbRaSiMIRhhxDtJENL:maunium.net for user @uelen:riot.firechicken.net with 2 state events 2019-06-09 19:03:46,493 - synapse.metrics - 372 - INFO - - Collecting gc 0 2019-06-09 19:03:46,500 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !iNmaIQExDMeqdITdHH:matrix.org for user @uelen:riot.firechicken.net with 2 state events 2019-06-09 19:03:46,505 - synapse.handlers.sync - 1810 - INFO - GET-835 - Incremental gappy sync of !tvlbVxTSVUgJbmelND:matrix.org for user @uelen:riot.firechicken.net with 3 state events 2019-06-09 19:03:46,553 - synapse.http.server - 112 - ERROR - GET-835 - Failed handle request via 'SyncRestServlet': <SynapseRequest at 0x7f955618ce10 method='GET' uri='/_matrix/client/r0/sync?filter=0&timeout=0&since=s707461_26561430_2874_634533_24909_56_95_55442_1' clientproto='HTTP/1.0' site=8083> Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/_base.py", line 527, in runWithConnection defer.returnValue(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [_EventDictReturn(event_id='$15600950391010258hrwAd:matrix.org', topological_ordering=None, stream_ordering=708773), _EventDictReturn(event_id='$15600950171010117CdspK:matrix.org', topological_ordering=None, stream_ordering=708772), _EventDictReturn(event_id='$15600949401009508lTpzl:matrix.org', topological_ordering=None, stream_ordering=708771), _EventDictReturn(event_id='$15600949261009398foetQ:matrix.org', topological_ordering=None, stream_ordering=708770), _EventDictReturn(event_id='$15600949091009307PONGY:matrix.org', topological_ordering=None, stream_ordering=708769), _EventDictReturn(event_id='$15600948911009164JkHeG:matrix.org', topological_ordering=None, stream_ordering=708768), _EventDictReturn(event_id='$15600948821009108qkrIj:matrix.org', topological_ordering=None, stream_ordering=708767), _EventDictReturn(event_id='$15600948591008929UoHxd:matrix.org', topological_ordering=None, stream_ordering=708766), _EventDictReturn(event_id='$15600948501008732joyAz:matrix.org', topological_ordering=None, stream_ordering=708765), _EventDictReturn(event_id='$15600948321008586DDtRZ:matrix.org', topological_ordering=None, stream_ordering=708764), _EventDictReturn(event_id='$15600948311008562BFdsx:matrix.org', topological_ordering=None, stream_ordering=708763), _EventDictReturn(event_id='$15600948311008555lylfW:matrix.org', topological_ordering=None, stream_ordering=708762), _EventDictReturn(event_id='$15600948051008364bsBFK:matrix.org', topological_ordering=None, stream_ordering=708761), _EventDictReturn(event_id='$15600947551008018jCiVq:matrix.org', topological_ordering=None, stream_ordering=708760), _EventDictReturn(event_id='$15600947491007978QDPLH:matrix.org', topological_ordering=None, stream_ordering=708759), _EventDictReturn(event_id='$15600947131007663SiNSd:matrix.org', topological_ordering=None, stream_ordering=708758), _EventDictReturn(event_id='$15600946391007033aolro:matrix.org', topological_ordering=None, stream_ordering=708755), _EventDictReturn(event_id='$15600946091006766ZzkSV:matrix.org', topological_ordering=None, stream_ordering=708754), _EventDictReturn(event_id='$15600945021005812lLBZU:matrix.org', topological_ordering=None, stream_ordering=708752), _EventDictReturn(event_id='$15600945001005804zAlqr:matrix.org', topological_ordering=None, stream_ordering=708751), _EventDictReturn(event_id='$15600945001005796CUSFf:matrix.org', topological_ordering=None, stream_ordering=708750)] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/_base.py", line 487, in runInteraction defer.returnValue(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [_EventDictReturn(event_id='$15600950391010258hrwAd:matrix.org', topological_ordering=None, stream_ordering=708773), _EventDictReturn(event_id='$15600950171010117CdspK:matrix.org', topological_ordering=None, stream_ordering=708772), _EventDictReturn(event_id='$15600949401009508lTpzl:matrix.org', topological_ordering=None, stream_ordering=708771), _EventDictReturn(event_id='$15600949261009398foetQ:matrix.org', topological_ordering=None, stream_ordering=708770), _EventDictReturn(event_id='$15600949091009307PONGY:matrix.org', topological_ordering=None, stream_ordering=708769), _EventDictReturn(event_id='$15600948911009164JkHeG:matrix.org', topological_ordering=None, stream_ordering=708768), _EventDictReturn(event_id='$15600948821009108qkrIj:matrix.org', topological_ordering=None, stream_ordering=708767), _EventDictReturn(event_id='$15600948591008929UoHxd:matrix.org', topological_ordering=None, stream_ordering=708766), _EventDictReturn(event_id='$15600948501008732joyAz:matrix.org', topological_ordering=None, stream_ordering=708765), _EventDictReturn(event_id='$15600948321008586DDtRZ:matrix.org', topological_ordering=None, stream_ordering=708764), _EventDictReturn(event_id='$15600948311008562BFdsx:matrix.org', topological_ordering=None, stream_ordering=708763), _EventDictReturn(event_id='$15600948311008555lylfW:matrix.org', topological_ordering=None, stream_ordering=708762), _EventDictReturn(event_id='$15600948051008364bsBFK:matrix.org', topological_ordering=None, stream_ordering=708761), _EventDictReturn(event_id='$15600947551008018jCiVq:matrix.org', topological_ordering=None, stream_ordering=708760), _EventDictReturn(event_id='$15600947491007978QDPLH:matrix.org', topological_ordering=None, stream_ordering=708759), _EventDictReturn(event_id='$15600947131007663SiNSd:matrix.org', topological_ordering=None, stream_ordering=708758), _EventDictReturn(event_id='$15600946391007033aolro:matrix.org', topological_ordering=None, stream_ordering=708755), _EventDictReturn(event_id='$15600946091006766ZzkSV:matrix.org', topological_ordering=None, stream_ordering=708754), _EventDictReturn(event_id='$15600945021005812lLBZU:matrix.org', topological_ordering=None, stream_ordering=708752), _EventDictReturn(event_id='$15600945001005804zAlqr:matrix.org', topological_ordering=None, stream_ordering=708751), _EventDictReturn(event_id='$15600945001005796CUSFf:matrix.org', topological_ordering=None, stream_ordering=708750)] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/storage/stream.py", line 414, in get_room_events_stream_for_room defer.returnValue((ret, key)) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: ([<FrozenEvent event_id='$15600945001005796CUSFf:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600945001005804zAlqr:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600945021005812lLBZU:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600946091006766ZzkSV:matrix.org', type='m.room.member', state_key='@freenode_heatsink:matrix.org'>, <FrozenEvent event_id='$15600946391007033aolro:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947131007663SiNSd:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947491007978QDPLH:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600947551008018jCiVq:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948051008364bsBFK:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948311008555lylfW:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948311008562BFdsx:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948321008586DDtRZ:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948501008732joyAz:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948591008929UoHxd:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948821009108qkrIj:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600948911009164JkHeG:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949091009307PONGY:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949261009398foetQ:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600949401009508lTpzl:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600950171010117CdspK:matrix.org', type='m.room.message', state_key='None'>, <FrozenEvent event_id='$15600950391010258hrwAd:matrix.org', type='m.room.message', state_key='None'>], 's708750') During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 472, in _load_filtered_recents limited=limited or newly_joined_room File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: TimelineBatch(prev_batch=StreamToken(room_key='s708889', presence_key=26599051, typing_key=57, receipt_key=636004, account_data_key=24914, push_rules_key=56, to_device_key=95, device_list_key=55442, groups_key=1), events=[], limited=True) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/http/server.py", line 81, in wrapped_request_handler yield h(self, request) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/http/server.py", line 316, in _async_render callback_return = yield callback(request, **kwargs) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/rest/client/v2_alpha/sync.py", line 167, in on_GET full_state=full_state File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 242, in wait_for_sync_for_user sync_config, since_token, timeout, full_state, File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 264, in _wait_for_sync_for_user sync_config, since_token, full_state=full_state, File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 935, in generate_sync_result sync_result_builder, account_data_by_room File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 1316, in _generate_sync_entry_for_rooms yield concurrently_execute(handle_room_entries, room_entries, 10) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/util/async_helpers.py", line 149, in _concurrently_execute_inner yield func(next(it)) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 1752, in _generate_room_entry full_state=full_state File "/home/synapse/synapse/env3/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/synapse/synapse/env3/lib/python3.6/site-packages/synapse/handlers/sync.py", line 775, in compute_state_delta batch.events[0].event_id, state_filter=state_filter, IndexError: list index out of range 2019-06-09 19:03:46,556 - synapse.access.http.8083 - 302 - INFO - GET-835 - 127.0.0.1 - 8083 - {@uelen:riot.firechicken.net} Processed request: 0.457sec/0.001sec (0.185sec, 0.004sec) (0.186sec/0.941sec/32) 55B 500 "GET /_matrix/client/r0/sync?filter=0&timeout=0&since=s707461_26561430_2874_634533_24909_56_95_55442_1 HTTP/1.0" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36" [1 dbevts]
IndexError
def _populate_stats_process_rooms(self, progress, batch_size): if not self.stats_enabled: yield self._end_background_update("populate_stats_process_rooms") defer.returnValue(1) # If we don't have progress filed, delete everything. if not progress: yield self.delete_all_stats() def _get_next_batch(txn): # Only fetch 250 rooms, so we don't fetch too many at once, even # if those 250 rooms have less than batch_size state events. sql = """ SELECT room_id, events FROM %s_rooms ORDER BY events DESC LIMIT 250 """ % (TEMP_TABLE,) txn.execute(sql) rooms_to_work_on = txn.fetchall() if not rooms_to_work_on: return None # Get how many are left to process, so we can give status on how # far we are in processing txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms") progress["remaining"] = txn.fetchone()[0] return rooms_to_work_on rooms_to_work_on = yield self.runInteraction( "populate_stats_temp_read", _get_next_batch ) # No more rooms -- complete the transaction. if not rooms_to_work_on: yield self._end_background_update("populate_stats_process_rooms") defer.returnValue(1) logger.info( "Processing the next %d rooms of %d remaining", len(rooms_to_work_on), progress["remaining"], ) # Number of state events we've processed by going through each room processed_event_count = 0 for room_id, event_count in rooms_to_work_on: current_state_ids = yield self.get_current_state_ids(room_id) join_rules = yield self.get_event( current_state_ids.get((EventTypes.JoinRules, "")), allow_none=True ) history_visibility = yield self.get_event( current_state_ids.get((EventTypes.RoomHistoryVisibility, "")), allow_none=True, ) encryption = yield self.get_event( current_state_ids.get((EventTypes.RoomEncryption, "")), allow_none=True ) name = yield self.get_event( current_state_ids.get((EventTypes.Name, "")), allow_none=True ) topic = yield self.get_event( current_state_ids.get((EventTypes.Topic, "")), allow_none=True ) avatar = yield self.get_event( current_state_ids.get((EventTypes.RoomAvatar, "")), allow_none=True ) canonical_alias = yield self.get_event( current_state_ids.get((EventTypes.CanonicalAlias, "")), allow_none=True ) def _or_none(x, arg): if x: return x.content.get(arg) return None yield self.update_room_state( room_id, { "join_rules": _or_none(join_rules, "join_rule"), "history_visibility": _or_none( history_visibility, "history_visibility" ), "encryption": _or_none(encryption, "algorithm"), "name": _or_none(name, "name"), "topic": _or_none(topic, "topic"), "avatar": _or_none(avatar, "url"), "canonical_alias": _or_none(canonical_alias, "alias"), }, ) now = self.hs.get_reactor().seconds() # quantise time to the nearest bucket now = (now // self.stats_bucket_size) * self.stats_bucket_size def _fetch_data(txn): # Get the current token of the room current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn) current_state_events = len(current_state_ids) joined_members = self._get_user_count_in_room_txn( txn, room_id, Membership.JOIN ) invited_members = self._get_user_count_in_room_txn( txn, room_id, Membership.INVITE ) left_members = self._get_user_count_in_room_txn( txn, room_id, Membership.LEAVE ) banned_members = self._get_user_count_in_room_txn( txn, room_id, Membership.BAN ) total_state_events = self._get_total_state_event_counts_txn(txn, room_id) self._update_stats_txn( txn, "room", room_id, now, { "bucket_size": self.stats_bucket_size, "current_state_events": current_state_events, "joined_members": joined_members, "invited_members": invited_members, "left_members": left_members, "banned_members": banned_members, "state_events": total_state_events, }, ) self._simple_insert_txn( txn, "room_stats_earliest_token", {"room_id": room_id, "token": current_token}, ) yield self.runInteraction("update_room_stats", _fetch_data) # We've finished a room. Delete it from the table. yield self._simple_delete_one(TEMP_TABLE + "_rooms", {"room_id": room_id}) # Update the remaining counter. progress["remaining"] -= 1 yield self.runInteraction( "populate_stats", self._background_update_progress_txn, "populate_stats_process_rooms", progress, ) processed_event_count += event_count if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. defer.returnValue(processed_event_count) defer.returnValue(processed_event_count)
def _populate_stats_process_rooms(self, progress, batch_size): if not self.stats_enabled: yield self._end_background_update("populate_stats_process_rooms") defer.returnValue(1) # If we don't have progress filed, delete everything. if not progress: yield self.delete_all_stats() def _get_next_batch(txn): # Only fetch 250 rooms, so we don't fetch too many at once, even # if those 250 rooms have less than batch_size state events. sql = """ SELECT room_id, events FROM %s_rooms ORDER BY events DESC LIMIT 250 """ % (TEMP_TABLE,) txn.execute(sql) rooms_to_work_on = txn.fetchall() if not rooms_to_work_on: return None # Get how many are left to process, so we can give status on how # far we are in processing txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms") progress["remaining"] = txn.fetchone()[0] return rooms_to_work_on rooms_to_work_on = yield self.runInteraction( "populate_stats_temp_read", _get_next_batch ) # No more rooms -- complete the transaction. if not rooms_to_work_on: yield self._end_background_update("populate_stats_process_rooms") defer.returnValue(1) logger.info( "Processing the next %d rooms of %d remaining", (len(rooms_to_work_on), progress["remaining"]), ) # Number of state events we've processed by going through each room processed_event_count = 0 for room_id, event_count in rooms_to_work_on: current_state_ids = yield self.get_current_state_ids(room_id) join_rules = yield self.get_event( current_state_ids.get((EventTypes.JoinRules, "")), allow_none=True ) history_visibility = yield self.get_event( current_state_ids.get((EventTypes.RoomHistoryVisibility, "")), allow_none=True, ) encryption = yield self.get_event( current_state_ids.get((EventTypes.RoomEncryption, "")), allow_none=True ) name = yield self.get_event( current_state_ids.get((EventTypes.Name, "")), allow_none=True ) topic = yield self.get_event( current_state_ids.get((EventTypes.Topic, "")), allow_none=True ) avatar = yield self.get_event( current_state_ids.get((EventTypes.RoomAvatar, "")), allow_none=True ) canonical_alias = yield self.get_event( current_state_ids.get((EventTypes.CanonicalAlias, "")), allow_none=True ) def _or_none(x, arg): if x: return x.content.get(arg) return None yield self.update_room_state( room_id, { "join_rules": _or_none(join_rules, "join_rule"), "history_visibility": _or_none( history_visibility, "history_visibility" ), "encryption": _or_none(encryption, "algorithm"), "name": _or_none(name, "name"), "topic": _or_none(topic, "topic"), "avatar": _or_none(avatar, "url"), "canonical_alias": _or_none(canonical_alias, "alias"), }, ) now = self.hs.get_reactor().seconds() # quantise time to the nearest bucket now = (now // self.stats_bucket_size) * self.stats_bucket_size def _fetch_data(txn): # Get the current token of the room current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn) current_state_events = len(current_state_ids) joined_members = self._get_user_count_in_room_txn( txn, room_id, Membership.JOIN ) invited_members = self._get_user_count_in_room_txn( txn, room_id, Membership.INVITE ) left_members = self._get_user_count_in_room_txn( txn, room_id, Membership.LEAVE ) banned_members = self._get_user_count_in_room_txn( txn, room_id, Membership.BAN ) total_state_events = self._get_total_state_event_counts_txn(txn, room_id) self._update_stats_txn( txn, "room", room_id, now, { "bucket_size": self.stats_bucket_size, "current_state_events": current_state_events, "joined_members": joined_members, "invited_members": invited_members, "left_members": left_members, "banned_members": banned_members, "state_events": total_state_events, }, ) self._simple_insert_txn( txn, "room_stats_earliest_token", {"room_id": room_id, "token": current_token}, ) yield self.runInteraction("update_room_stats", _fetch_data) # We've finished a room. Delete it from the table. yield self._simple_delete_one(TEMP_TABLE + "_rooms", {"room_id": room_id}) # Update the remaining counter. progress["remaining"] -= 1 yield self.runInteraction( "populate_stats", self._background_update_progress_txn, "populate_stats_process_rooms", progress, ) processed_event_count += event_count if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. defer.returnValue(processed_event_count) defer.returnValue(processed_event_count)
https://github.com/matrix-org/synapse/issues/5238
2019-05-22 23:12:51,628 - twisted - 242 - ERROR - background_updates-0 - --- Logging error --- 2019-05-22 23:12:51,629 - twisted - 242 - ERROR - background_updates-0 - Traceback (most recent call last): 2019-05-22 23:12:51,629 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks 2019-05-22 23:12:51,629 - twisted - 242 - ERROR - background_updates-0 - result = g.send(result) 2019-05-22 23:12:51,629 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/storage/_base.py", line 527, in runWithConnection 2019-05-22 23:12:51,630 - twisted - 242 - ERROR - background_updates-0 - defer.returnValue(result) 2019-05-22 23:12:51,630 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1362, in returnValue 2019-05-22 23:12:51,630 - twisted - 242 - ERROR - background_updates-0 - raise _DefGen_Return(val) 2019-05-22 23:12:51,630 - twisted - 242 - ERROR - background_updates-0 - twisted.internet.defer._DefGen_Return: [('!redacted', 7), ... snipped ...] 2019-05-22 23:12:51,633 - twisted - 242 - ERROR - background_updates-0 - 2019-05-22 23:12:51,633 - twisted - 242 - ERROR - background_updates-0 - During handling of the above exception, another exception occurred: 2019-05-22 23:12:51,633 - twisted - 242 - ERROR - background_updates-0 - 2019-05-22 23:12:51,633 - twisted - 242 - ERROR - background_updates-0 - Traceback (most recent call last): 2019-05-22 23:12:51,633 - twisted - 242 - ERROR - background_updates-0 - File "/usr/lib/python3.6/logging/handlers.py", line 71, in emit 2019-05-22 23:12:51,633 - twisted - 242 - ERROR - background_updates-0 - if self.shouldRollover(record): 2019-05-22 23:12:51,634 - twisted - 242 - ERROR - background_updates-0 - File "/usr/lib/python3.6/logging/handlers.py", line 187, in shouldRollover 2019-05-22 23:12:51,634 - twisted - 242 - ERROR - background_updates-0 - msg = "%s\n" % self.format(record) 2019-05-22 23:12:51,634 - twisted - 242 - ERROR - background_updates-0 - File "/usr/lib/python3.6/logging/__init__.py", line 840, in format 2019-05-22 23:12:51,634 - twisted - 242 - ERROR - background_updates-0 - return fmt.format(record) 2019-05-22 23:12:51,634 - twisted - 242 - ERROR - background_updates-0 - File "/usr/lib/python3.6/logging/__init__.py", line 577, in format 2019-05-22 23:12:51,634 - twisted - 242 - ERROR - background_updates-0 - record.message = record.getMessage() 2019-05-22 23:12:51,635 - twisted - 242 - ERROR - background_updates-0 - File "/usr/lib/python3.6/logging/__init__.py", line 338, in getMessage 2019-05-22 23:12:51,635 - twisted - 242 - ERROR - background_updates-0 - msg = msg % self.args 2019-05-22 23:12:51,635 - twisted - 242 - ERROR - background_updates-0 - TypeError: %d format: a number is required, not tuple 2019-05-22 23:12:51,635 - twisted - 242 - ERROR - background_updates-0 - Call stack: 2019-05-22 23:12:51,635 - twisted - 242 - ERROR - background_updates-0 - File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main 2019-05-22 23:12:51,636 - twisted - 242 - ERROR - background_updates-0 - "__main__", mod_spec) 2019-05-22 23:12:51,636 - twisted - 242 - ERROR - background_updates-0 - File "/usr/lib/python3.6/runpy.py", line 85, in _run_code 2019-05-22 23:12:51,636 - twisted - 242 - ERROR - background_updates-0 - exec(code, run_globals) 2019-05-22 23:12:51,636 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/app/homeserver.py", line 663, in <module> 2019-05-22 23:12:51,636 - twisted - 242 - ERROR - background_updates-0 - main() 2019-05-22 23:12:51,636 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/app/homeserver.py", line 659, in main 2019-05-22 23:12:51,637 - twisted - 242 - ERROR - background_updates-0 - run(hs) 2019-05-22 23:12:51,637 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/app/homeserver.py", line 650, in run 2019-05-22 23:12:51,637 - twisted - 242 - ERROR - background_updates-0 - logger=logger, 2019-05-22 23:12:51,637 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/app/_base.py", line 144, in start_reactor 2019-05-22 23:12:51,637 - twisted - 242 - ERROR - background_updates-0 - daemon.start() 2019-05-22 23:12:51,637 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/daemonize.py", line 248, in start 2019-05-22 23:12:51,638 - twisted - 242 - ERROR - background_updates-0 - self.action(*privileged_action_result) 2019-05-22 23:12:51,638 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/app/_base.py", line 130, in run 2019-05-22 23:12:51,638 - twisted - 242 - ERROR - background_updates-0 - reactor.run() 2019-05-22 23:12:51,638 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/base.py", line 1267, in run 2019-05-22 23:12:51,638 - twisted - 242 - ERROR - background_updates-0 - self.mainLoop() 2019-05-22 23:12:51,638 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/base.py", line 1276, in mainLoop 2019-05-22 23:12:51,639 - twisted - 242 - ERROR - background_updates-0 - self.runUntilCurrent() 2019-05-22 23:12:51,639 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/metrics/__init__.py", line 347, in f 2019-05-22 23:12:51,639 - twisted - 242 - ERROR - background_updates-0 - ret = func(*args, **kwargs) 2019-05-22 23:12:51,639 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/base.py", line 875, in runUntilCurrent 2019-05-22 23:12:51,639 - twisted - 242 - ERROR - background_updates-0 - f(*a, **kw) 2019-05-22 23:12:51,639 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 460, in callback 2019-05-22 23:12:51,639 - twisted - 242 - ERROR - background_updates-0 - self._startRunCallbacks(result) 2019-05-22 23:12:51,640 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks 2019-05-22 23:12:51,640 - twisted - 242 - ERROR - background_updates-0 - self._runCallbacks() 2019-05-22 23:12:51,640 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks 2019-05-22 23:12:51,640 - twisted - 242 - ERROR - background_updates-0 - current.result = callback(current.result, *args, **kw) 2019-05-22 23:12:51,640 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1475, in gotResult 2019-05-22 23:12:51,640 - twisted - 242 - ERROR - background_updates-0 - _inlineCallbacks(r, g, status) 2019-05-22 23:12:51,640 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks 2019-05-22 23:12:51,641 - twisted - 242 - ERROR - background_updates-0 - status.deferred.callback(e.value) 2019-05-22 23:12:51,641 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 460, in callback 2019-05-22 23:12:51,641 - twisted - 242 - ERROR - background_updates-0 - self._startRunCallbacks(result) 2019-05-22 23:12:51,641 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks 2019-05-22 23:12:51,641 - twisted - 242 - ERROR - background_updates-0 - self._runCallbacks() 2019-05-22 23:12:51,641 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks 2019-05-22 23:12:51,642 - twisted - 242 - ERROR - background_updates-0 - current.result = callback(current.result, *args, **kw) 2019-05-22 23:12:51,642 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1475, in gotResult 2019-05-22 23:12:51,642 - twisted - 242 - ERROR - background_updates-0 - _inlineCallbacks(r, g, status) 2019-05-22 23:12:51,642 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks 2019-05-22 23:12:51,642 - twisted - 242 - ERROR - background_updates-0 - status.deferred.callback(e.value) 2019-05-22 23:12:51,642 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 460, in callback 2019-05-22 23:12:51,642 - twisted - 242 - ERROR - background_updates-0 - self._startRunCallbacks(result) 2019-05-22 23:12:51,643 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks 2019-05-22 23:12:51,643 - twisted - 242 - ERROR - background_updates-0 - self._runCallbacks() 2019-05-22 23:12:51,643 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks 2019-05-22 23:12:51,643 - twisted - 242 - ERROR - background_updates-0 - current.result = callback(current.result, *args, **kw) 2019-05-22 23:12:51,643 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1475, in gotResult 2019-05-22 23:12:51,643 - twisted - 242 - ERROR - background_updates-0 - _inlineCallbacks(r, g, status) 2019-05-22 23:12:51,643 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks 2019-05-22 23:12:51,644 - twisted - 242 - ERROR - background_updates-0 - result = g.send(result) 2019-05-22 23:12:51,644 - twisted - 242 - ERROR - background_updates-0 - File "/home/matrix/.synapse/local/lib/python3.6/site-packages/synapse/storage/stats.py", line 172, in _populate_stats_process_rooms 2019-05-22 23:12:51,644 - twisted - 242 - ERROR - background_updates-0 - (len(rooms_to_work_on), progress["remaining"]), 2019-05-22 23:12:51,644 - twisted - 242 - ERROR - background_updates-0 - Message: 'Processing the next %d rooms of %d remaining' 2019-05-22 23:12:51,644 - twisted - 242 - ERROR - background_updates-0 - Arguments: ((176, 176),)
TypeError
def _check_for_soft_fail(self, event, state, backfilled): """Checks if we should soft fail the event, if so marks the event as such. Args: event (FrozenEvent) state (dict|None): The state at the event if we don't have all the event's prev events backfilled (bool): Whether the event is from backfill Returns: Deferred """ # For new (non-backfilled and non-outlier) events we check if the event # passes auth based on the current state. If it doesn't then we # "soft-fail" the event. do_soft_fail_check = not backfilled and not event.internal_metadata.is_outlier() if do_soft_fail_check: extrem_ids = yield self.store.get_latest_event_ids_in_room( event.room_id, ) extrem_ids = set(extrem_ids) prev_event_ids = set(event.prev_event_ids()) if extrem_ids == prev_event_ids: # If they're the same then the current state is the same as the # state at the event, so no point rechecking auth for soft fail. do_soft_fail_check = False if do_soft_fail_check: room_version = yield self.store.get_room_version(event.room_id) # Calculate the "current state". if state is not None: # If we're explicitly given the state then we won't have all the # prev events, and so we have a gap in the graph. In this case # we want to be a little careful as we might have been down for # a while and have an incorrect view of the current state, # however we still want to do checks as gaps are easy to # maliciously manufacture. # # So we use a "current state" that is actually a state # resolution across the current forward extremities and the # given state at the event. This should correctly handle cases # like bans, especially with state res v2. state_sets = yield self.store.get_state_groups( event.room_id, extrem_ids, ) state_sets = list(state_sets.values()) state_sets.append(state) current_state_ids = yield self.state_handler.resolve_events( room_version, state_sets, event, ) current_state_ids = {k: e.event_id for k, e in iteritems(current_state_ids)} else: current_state_ids = yield self.state_handler.get_current_state_ids( event.room_id, latest_event_ids=extrem_ids, ) logger.debug( "Doing soft-fail check for %s: state %s", event.event_id, current_state_ids, ) # Now check if event pass auth against said current state auth_types = auth_types_for_event(event) current_state_ids = [ e for k, e in iteritems(current_state_ids) if k in auth_types ] current_auth_events = yield self.store.get_events(current_state_ids) current_auth_events = { (e.type, e.state_key): e for e in current_auth_events.values() } try: self.auth.check(room_version, event, auth_events=current_auth_events) except AuthError as e: logger.warn( "Soft-failing %r because %s", event, e, ) event.internal_metadata.soft_failed = True
def _check_for_soft_fail(self, event, state, backfilled): """Checks if we should soft fail the event, if so marks the event as such. Args: event (FrozenEvent) state (dict|None): The state at the event if we don't have all the event's prev events backfilled (bool): Whether the event is from backfill Returns: Deferred """ # For new (non-backfilled and non-outlier) events we check if the event # passes auth based on the current state. If it doesn't then we # "soft-fail" the event. do_soft_fail_check = not backfilled and not event.internal_metadata.is_outlier() if do_soft_fail_check: extrem_ids = yield self.store.get_latest_event_ids_in_room( event.room_id, ) extrem_ids = set(extrem_ids) prev_event_ids = set(event.prev_event_ids()) if extrem_ids == prev_event_ids: # If they're the same then the current state is the same as the # state at the event, so no point rechecking auth for soft fail. do_soft_fail_check = False if do_soft_fail_check: room_version = yield self.store.get_room_version(event.room_id) # Calculate the "current state". if state is not None: # If we're explicitly given the state then we won't have all the # prev events, and so we have a gap in the graph. In this case # we want to be a little careful as we might have been down for # a while and have an incorrect view of the current state, # however we still want to do checks as gaps are easy to # maliciously manufacture. # # So we use a "current state" that is actually a state # resolution across the current forward extremities and the # given state at the event. This should correctly handle cases # like bans, especially with state res v2. state_sets = yield self.store.get_state_groups( event.room_id, extrem_ids, ) state_sets = list(state_sets.values()) state_sets.append(state) current_state_ids = yield self.state_handler.resolve_events( room_version, state_sets, event, ) current_state_ids = {k: e.event_id for k, e in iteritems(current_state_ids)} else: current_state_ids = yield self.state_handler.get_current_state_ids( event.room_id, latest_event_ids=extrem_ids, ) # Now check if event pass auth against said current state auth_types = auth_types_for_event(event) current_state_ids = [ e for k, e in iteritems(current_state_ids) if k in auth_types ] current_auth_events = yield self.store.get_events(current_state_ids) current_auth_events = { (e.type, e.state_key): e for e in current_auth_events.values() } try: self.auth.check(room_version, event, auth_events=current_auth_events) except AuthError as e: logger.warn( "Failed current state auth resolution for %r because %s", event, e, ) event.internal_metadata.soft_failed = True
https://github.com/matrix-org/synapse/issues/5090
2019-04-22 01:18:17,013 - synapse.http.server - 112 - ERROR - POST-16014877 - Failed handle request via 'ReplicationFederationSendEventsRestServlet': <SynapseRequest at 0x7fc87ed2abe0 method='POST' uri='/_synapse/replication/fed_send_events/TloUSGYPDO' clientproto='HTTP/1.1' site=9092> Capture point (most recent call last): File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/synapse/src/synapse/app/homeserver.py", line 661, in <module> main() File "/home/synapse/src/synapse/app/homeserver.py", line 657, in main run(hs) File "/home/synapse/src/synapse/app/homeserver.py", line 648, in run logger=logger, File "/home/synapse/src/synapse/app/_base.py", line 141, in start_reactor daemon.start() File "/home/synapse/env-py37/lib/python3.7/site-packages/daemonize.py", line 248, in start self.action(*privileged_action_result) File "/home/synapse/src/synapse/app/_base.py", line 127, in run reactor.run() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 1272, in run self.mainLoop() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 1281, in mainLoop self.runUntilCurrent() File "/home/synapse/src/synapse/metrics/__init__.py", line 347, in f ret = func(*args, **kwargs) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 875, in runUntilCurrent f(*a, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/synapse/src/synapse/storage/events.py", line 166, in handle_queue_loop item.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/src/synapse/util/async_helpers.py", line 75, in errback self._observers.pop().errback(f) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1114, in _cbDeferred self.errback(failure.Failure(FirstError(result, index))) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/src/synapse/util/async_helpers.py", line 75, in errback self._observers.pop().errback(f) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/synapse/src/synapse/http/server.py", line 112, in wrapped_request_handler exc_info=(f.type, f.value, f.getTracebackObject()), File "/usr/local/lib/python3.7/logging/__init__.py", line 1412, in error self._log(ERROR, msg, args, **kwargs) File "/usr/local/lib/python3.7/logging/__init__.py", line 1519, in _log self.handle(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1529, in handle self.callHandlers(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1591, in callHandlers hdlr.handle(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 905, in handle self.emit(record) File "/usr/local/lib/python3.7/logging/handlers.py", line 1234, in emit self.flush() File "/usr/local/lib/python3.7/logging/handlers.py", line 1310, in flush self.target.handle(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 905, in handle self.emit(record) File "/usr/local/lib/python3.7/logging/handlers.py", line 70, in emit logging.FileHandler.emit(self, record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1132, in emit StreamHandler.emit(self, record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1034, in emit msg = self.format(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 880, in format return fmt.format(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 627, in format record.exc_text = self.formatException(record.exc_info) File "/home/synapse/src/synapse/util/logformatter.py", line 45, in formatException traceback.print_stack(tb.tb_frame.f_back, None, sio) Traceback (most recent call last): File "/home/synapse/src/synapse/http/server.py", line 81, in wrapped_request_handler yield h(self, request) File "/home/synapse/src/synapse/http/server.py", line 316, in _async_render callback_return = yield callback(request, **kwargs) File "/home/synapse/src/synapse/replication/http/federation.py", line 117, in _handle_request event_and_contexts, backfilled, File "/home/synapse/src/synapse/handlers/federation.py", line 2662, in persist_events_and_notify backfilled=backfilled, File "/home/synapse/src/synapse/storage/events.py", line 287, in persist_events defer.gatherResults(deferreds, consumeErrors=True) twisted.internet.defer.FirstError: FirstError[#0, [Failure instance: Traceback: <class 'AssertionError'>: No forward extremities left! /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:654:_runCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1475:gotResult /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator --- <exception caught here> --- /home/synapse/src/synapse/storage/events.py:163:handle_queue_loop /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /home/synapse/src/synapse/storage/events.py:324:persisting_queue /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /home/synapse/src/synapse/storage/events.py:205:f /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1418:_inlineCallbacks /home/synapse/src/synapse/storage/events.py:416:_persist_events ]]
twisted.internet.defer.FirstError
def _get_events_which_are_prevs(self, event_ids): """Filter the supplied list of event_ids to get those which are prev_events of existing (non-outlier/rejected) events. Args: event_ids (Iterable[str]): event ids to filter Returns: Deferred[List[str]]: filtered event ids """ results = [] def _get_events(txn, batch): sql = """ SELECT prev_event_id, internal_metadata FROM event_edges INNER JOIN events USING (event_id) LEFT JOIN rejections USING (event_id) LEFT JOIN event_json USING (event_id) WHERE prev_event_id IN (%s) AND NOT events.outlier AND rejections.event_id IS NULL """ % (",".join("?" for _ in batch),) txn.execute(sql, batch) results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed")) for chunk in batch_iter(event_ids, 100): yield self.runInteraction("_get_events_which_are_prevs", _get_events, chunk) defer.returnValue(results)
def _get_events_which_are_prevs(self, event_ids): """Filter the supplied list of event_ids to get those which are prev_events of existing (non-outlier/rejected) events. Args: event_ids (Iterable[str]): event ids to filter Returns: Deferred[List[str]]: filtered event ids """ results = [] def _get_events(txn, batch): sql = """ SELECT prev_event_id FROM event_edges INNER JOIN events USING (event_id) LEFT JOIN rejections USING (event_id) WHERE prev_event_id IN (%s) AND NOT events.outlier AND rejections.event_id IS NULL """ % (",".join("?" for _ in batch),) txn.execute(sql, batch) results.extend(r[0] for r in txn) for chunk in batch_iter(event_ids, 100): yield self.runInteraction("_get_events_which_are_prevs", _get_events, chunk) defer.returnValue(results)
https://github.com/matrix-org/synapse/issues/5090
2019-04-22 01:18:17,013 - synapse.http.server - 112 - ERROR - POST-16014877 - Failed handle request via 'ReplicationFederationSendEventsRestServlet': <SynapseRequest at 0x7fc87ed2abe0 method='POST' uri='/_synapse/replication/fed_send_events/TloUSGYPDO' clientproto='HTTP/1.1' site=9092> Capture point (most recent call last): File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/synapse/src/synapse/app/homeserver.py", line 661, in <module> main() File "/home/synapse/src/synapse/app/homeserver.py", line 657, in main run(hs) File "/home/synapse/src/synapse/app/homeserver.py", line 648, in run logger=logger, File "/home/synapse/src/synapse/app/_base.py", line 141, in start_reactor daemon.start() File "/home/synapse/env-py37/lib/python3.7/site-packages/daemonize.py", line 248, in start self.action(*privileged_action_result) File "/home/synapse/src/synapse/app/_base.py", line 127, in run reactor.run() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 1272, in run self.mainLoop() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 1281, in mainLoop self.runUntilCurrent() File "/home/synapse/src/synapse/metrics/__init__.py", line 347, in f ret = func(*args, **kwargs) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 875, in runUntilCurrent f(*a, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/synapse/src/synapse/storage/events.py", line 166, in handle_queue_loop item.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/src/synapse/util/async_helpers.py", line 75, in errback self._observers.pop().errback(f) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1114, in _cbDeferred self.errback(failure.Failure(FirstError(result, index))) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/src/synapse/util/async_helpers.py", line 75, in errback self._observers.pop().errback(f) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/synapse/src/synapse/http/server.py", line 112, in wrapped_request_handler exc_info=(f.type, f.value, f.getTracebackObject()), File "/usr/local/lib/python3.7/logging/__init__.py", line 1412, in error self._log(ERROR, msg, args, **kwargs) File "/usr/local/lib/python3.7/logging/__init__.py", line 1519, in _log self.handle(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1529, in handle self.callHandlers(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1591, in callHandlers hdlr.handle(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 905, in handle self.emit(record) File "/usr/local/lib/python3.7/logging/handlers.py", line 1234, in emit self.flush() File "/usr/local/lib/python3.7/logging/handlers.py", line 1310, in flush self.target.handle(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 905, in handle self.emit(record) File "/usr/local/lib/python3.7/logging/handlers.py", line 70, in emit logging.FileHandler.emit(self, record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1132, in emit StreamHandler.emit(self, record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1034, in emit msg = self.format(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 880, in format return fmt.format(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 627, in format record.exc_text = self.formatException(record.exc_info) File "/home/synapse/src/synapse/util/logformatter.py", line 45, in formatException traceback.print_stack(tb.tb_frame.f_back, None, sio) Traceback (most recent call last): File "/home/synapse/src/synapse/http/server.py", line 81, in wrapped_request_handler yield h(self, request) File "/home/synapse/src/synapse/http/server.py", line 316, in _async_render callback_return = yield callback(request, **kwargs) File "/home/synapse/src/synapse/replication/http/federation.py", line 117, in _handle_request event_and_contexts, backfilled, File "/home/synapse/src/synapse/handlers/federation.py", line 2662, in persist_events_and_notify backfilled=backfilled, File "/home/synapse/src/synapse/storage/events.py", line 287, in persist_events defer.gatherResults(deferreds, consumeErrors=True) twisted.internet.defer.FirstError: FirstError[#0, [Failure instance: Traceback: <class 'AssertionError'>: No forward extremities left! /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:654:_runCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1475:gotResult /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator --- <exception caught here> --- /home/synapse/src/synapse/storage/events.py:163:handle_queue_loop /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /home/synapse/src/synapse/storage/events.py:324:persisting_queue /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /home/synapse/src/synapse/storage/events.py:205:f /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1418:_inlineCallbacks /home/synapse/src/synapse/storage/events.py:416:_persist_events ]]
twisted.internet.defer.FirstError
def _get_events(txn, batch): sql = """ SELECT prev_event_id, internal_metadata FROM event_edges INNER JOIN events USING (event_id) LEFT JOIN rejections USING (event_id) LEFT JOIN event_json USING (event_id) WHERE prev_event_id IN (%s) AND NOT events.outlier AND rejections.event_id IS NULL """ % (",".join("?" for _ in batch),) txn.execute(sql, batch) results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed"))
def _get_events(txn, batch): sql = """ SELECT prev_event_id FROM event_edges INNER JOIN events USING (event_id) LEFT JOIN rejections USING (event_id) WHERE prev_event_id IN (%s) AND NOT events.outlier AND rejections.event_id IS NULL """ % (",".join("?" for _ in batch),) txn.execute(sql, batch) results.extend(r[0] for r in txn)
https://github.com/matrix-org/synapse/issues/5090
2019-04-22 01:18:17,013 - synapse.http.server - 112 - ERROR - POST-16014877 - Failed handle request via 'ReplicationFederationSendEventsRestServlet': <SynapseRequest at 0x7fc87ed2abe0 method='POST' uri='/_synapse/replication/fed_send_events/TloUSGYPDO' clientproto='HTTP/1.1' site=9092> Capture point (most recent call last): File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/synapse/src/synapse/app/homeserver.py", line 661, in <module> main() File "/home/synapse/src/synapse/app/homeserver.py", line 657, in main run(hs) File "/home/synapse/src/synapse/app/homeserver.py", line 648, in run logger=logger, File "/home/synapse/src/synapse/app/_base.py", line 141, in start_reactor daemon.start() File "/home/synapse/env-py37/lib/python3.7/site-packages/daemonize.py", line 248, in start self.action(*privileged_action_result) File "/home/synapse/src/synapse/app/_base.py", line 127, in run reactor.run() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 1272, in run self.mainLoop() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 1281, in mainLoop self.runUntilCurrent() File "/home/synapse/src/synapse/metrics/__init__.py", line 347, in f ret = func(*args, **kwargs) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/base.py", line 875, in runUntilCurrent f(*a, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/synapse/src/synapse/storage/events.py", line 166, in handle_queue_loop item.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/src/synapse/util/async_helpers.py", line 75, in errback self._observers.pop().errback(f) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1114, in _cbDeferred self.errback(failure.Failure(FirstError(result, index))) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/src/synapse/util/async_helpers.py", line 75, in errback self._observers.pop().errback(f) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py", line 512, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/synapse/src/synapse/http/server.py", line 112, in wrapped_request_handler exc_info=(f.type, f.value, f.getTracebackObject()), File "/usr/local/lib/python3.7/logging/__init__.py", line 1412, in error self._log(ERROR, msg, args, **kwargs) File "/usr/local/lib/python3.7/logging/__init__.py", line 1519, in _log self.handle(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1529, in handle self.callHandlers(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1591, in callHandlers hdlr.handle(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 905, in handle self.emit(record) File "/usr/local/lib/python3.7/logging/handlers.py", line 1234, in emit self.flush() File "/usr/local/lib/python3.7/logging/handlers.py", line 1310, in flush self.target.handle(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 905, in handle self.emit(record) File "/usr/local/lib/python3.7/logging/handlers.py", line 70, in emit logging.FileHandler.emit(self, record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1132, in emit StreamHandler.emit(self, record) File "/usr/local/lib/python3.7/logging/__init__.py", line 1034, in emit msg = self.format(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 880, in format return fmt.format(record) File "/usr/local/lib/python3.7/logging/__init__.py", line 627, in format record.exc_text = self.formatException(record.exc_info) File "/home/synapse/src/synapse/util/logformatter.py", line 45, in formatException traceback.print_stack(tb.tb_frame.f_back, None, sio) Traceback (most recent call last): File "/home/synapse/src/synapse/http/server.py", line 81, in wrapped_request_handler yield h(self, request) File "/home/synapse/src/synapse/http/server.py", line 316, in _async_render callback_return = yield callback(request, **kwargs) File "/home/synapse/src/synapse/replication/http/federation.py", line 117, in _handle_request event_and_contexts, backfilled, File "/home/synapse/src/synapse/handlers/federation.py", line 2662, in persist_events_and_notify backfilled=backfilled, File "/home/synapse/src/synapse/storage/events.py", line 287, in persist_events defer.gatherResults(deferreds, consumeErrors=True) twisted.internet.defer.FirstError: FirstError[#0, [Failure instance: Traceback: <class 'AssertionError'>: No forward extremities left! /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:654:_runCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1475:gotResult /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator --- <exception caught here> --- /home/synapse/src/synapse/storage/events.py:163:handle_queue_loop /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /home/synapse/src/synapse/storage/events.py:324:persisting_queue /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1416:_inlineCallbacks /home/synapse/env-py37/lib/python3.7/site-packages/twisted/python/failure.py:512:throwExceptionIntoGenerator /home/synapse/src/synapse/storage/events.py:205:f /home/synapse/env-py37/lib/python3.7/site-packages/twisted/internet/defer.py:1418:_inlineCallbacks /home/synapse/src/synapse/storage/events.py:416:_persist_events ]]
twisted.internet.defer.FirstError
def _handle_state_delta(self, deltas): """Process current state deltas to find new joins that need to be handled. """ for delta in deltas: typ = delta["type"] state_key = delta["state_key"] room_id = delta["room_id"] event_id = delta["event_id"] prev_event_id = delta["prev_event_id"] logger.debug("Handling: %r %r, %s", typ, state_key, event_id) if typ != EventTypes.Member: continue if event_id is None: # state has been deleted, so this is not a join. We only care about # joins. continue event = yield self.store.get_event(event_id) if event.content.get("membership") != Membership.JOIN: # We only care about joins continue if prev_event_id: prev_event = yield self.store.get_event(prev_event_id) if prev_event.content.get("membership") == Membership.JOIN: # Ignore changes to join events. continue yield self._on_user_joined_room(room_id, state_key)
def _handle_state_delta(self, deltas): """Process current state deltas to find new joins that need to be handled. """ for delta in deltas: typ = delta["type"] state_key = delta["state_key"] room_id = delta["room_id"] event_id = delta["event_id"] prev_event_id = delta["prev_event_id"] logger.debug("Handling: %r %r, %s", typ, state_key, event_id) if typ != EventTypes.Member: continue event = yield self.store.get_event(event_id) if event.content.get("membership") != Membership.JOIN: # We only care about joins continue if prev_event_id: prev_event = yield self.store.get_event(prev_event_id) if prev_event.content.get("membership") == Membership.JOIN: # Ignore changes to join events. continue yield self._on_user_joined_room(room_id, state_key)
https://github.com/matrix-org/synapse/issues/5102
2019-04-26 06:18:26,267 - synapse.metrics.background_process_metrics - 203 - ERROR - presence.notify_new_event-13001- Background process 'presence.notify_new_event' threw an exception Capture point (most recent call last): File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main "__main__", mod_spec) File "/usr/lib/python3.5/runpy.py", line 85, in _run_code exec(code, run_globals) File "/opt/synapse/synapse/synapse/app/homeserver.py", line 661, in <module> main() File "/opt/synapse/synapse/synapse/app/homeserver.py", line 657, in main run(hs) File "/opt/synapse/synapse/synapse/app/homeserver.py", line 648, in run logger=logger, File "/opt/synapse/synapse/synapse/app/_base.py", line 143, in start_reactor run() File "/opt/synapse/synapse/synapse/app/_base.py", line 127, in run reactor.run() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/base.py", line 1267, in run self.mainLoop() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/base.py", line 1276, in mainLoop self.runUntilCurrent() File "/opt/synapse/synapse/synapse/metrics/__init__.py", line 347, in f ret = func(*args, **kwargs) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/base.py", line 875, in runUntilCurrent f(*a, **kw) File "/opt/synapse/synapse/synapse/storage/events_worker.py", line 349, in fire d.callback([res[i] for i in ids if i in res]) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) return g.throw(self.type, self.value, self.tb) Traceback (most recent call last): File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/opt/synapse/synapse/synapse/storage/events_worker.py", line 419, in _enqueue_events defer.returnValue({e.event.event_id: e for e in res if e}) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: {} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/opt/synapse/synapse/synapse/storage/events_worker.py", line 263, in _get_events defer.returnValue(events) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/synapse/synapse/synapse/metrics/background_process_metrics.py", line 201, in run yield func(*args, **kwargs) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/opt/synapse/synapse/synapse/handlers/presence.py", line 790, in _process_presence yield self._unsafe_process() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/opt/synapse/synapse/synapse/handlers/presence.py", line 805, in _unsafe_process yield self._handle_state_delta(deltas) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/opt/synapse/synapse/synapse/handlers/presence.py", line 831, in _handle_state_delta event = yield self.store.get_event(event_id) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/opt/synapse/synapse/synapse/storage/events_worker.py", line 120, in get_event raise NotFoundError("Could not find event %s" % (event_id,)) synapse.api.errors.NotFoundError: 404: Could not find event None
synapse.api.errors.NotFoundError
def get_current_state_deltas(self, prev_stream_id): """Fetch a list of room state changes since the given stream id Each entry in the result contains the following fields: - stream_id (int) - room_id (str) - type (str): event type - state_key (str): - event_id (str|None): new event_id for this state key. None if the state has been deleted. - prev_event_id (str|None): previous event_id for this state key. None if it's new state. Args: prev_stream_id (int): point to get changes since (exclusive) Returns: Deferred[list[dict]]: results """ prev_stream_id = int(prev_stream_id) if not self._curr_state_delta_stream_cache.has_any_entity_changed(prev_stream_id): return [] def get_current_state_deltas_txn(txn): # First we calculate the max stream id that will give us less than # N results. # We arbitarily limit to 100 stream_id entries to ensure we don't # select toooo many. sql = """ SELECT stream_id, count(*) FROM current_state_delta_stream WHERE stream_id > ? GROUP BY stream_id ORDER BY stream_id ASC LIMIT 100 """ txn.execute(sql, (prev_stream_id,)) total = 0 max_stream_id = prev_stream_id for max_stream_id, count in txn: total += count if total > 100: # We arbitarily limit to 100 entries to ensure we don't # select toooo many. break # Now actually get the deltas sql = """ SELECT stream_id, room_id, type, state_key, event_id, prev_event_id FROM current_state_delta_stream WHERE ? < stream_id AND stream_id <= ? ORDER BY stream_id ASC """ txn.execute(sql, (prev_stream_id, max_stream_id)) return self.cursor_to_dict(txn) return self.runInteraction("get_current_state_deltas", get_current_state_deltas_txn)
def get_current_state_deltas(self, prev_stream_id): prev_stream_id = int(prev_stream_id) if not self._curr_state_delta_stream_cache.has_any_entity_changed(prev_stream_id): return [] def get_current_state_deltas_txn(txn): # First we calculate the max stream id that will give us less than # N results. # We arbitarily limit to 100 stream_id entries to ensure we don't # select toooo many. sql = """ SELECT stream_id, count(*) FROM current_state_delta_stream WHERE stream_id > ? GROUP BY stream_id ORDER BY stream_id ASC LIMIT 100 """ txn.execute(sql, (prev_stream_id,)) total = 0 max_stream_id = prev_stream_id for max_stream_id, count in txn: total += count if total > 100: # We arbitarily limit to 100 entries to ensure we don't # select toooo many. break # Now actually get the deltas sql = """ SELECT stream_id, room_id, type, state_key, event_id, prev_event_id FROM current_state_delta_stream WHERE ? < stream_id AND stream_id <= ? ORDER BY stream_id ASC """ txn.execute(sql, (prev_stream_id, max_stream_id)) return self.cursor_to_dict(txn) return self.runInteraction("get_current_state_deltas", get_current_state_deltas_txn)
https://github.com/matrix-org/synapse/issues/5102
2019-04-26 06:18:26,267 - synapse.metrics.background_process_metrics - 203 - ERROR - presence.notify_new_event-13001- Background process 'presence.notify_new_event' threw an exception Capture point (most recent call last): File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main "__main__", mod_spec) File "/usr/lib/python3.5/runpy.py", line 85, in _run_code exec(code, run_globals) File "/opt/synapse/synapse/synapse/app/homeserver.py", line 661, in <module> main() File "/opt/synapse/synapse/synapse/app/homeserver.py", line 657, in main run(hs) File "/opt/synapse/synapse/synapse/app/homeserver.py", line 648, in run logger=logger, File "/opt/synapse/synapse/synapse/app/_base.py", line 143, in start_reactor run() File "/opt/synapse/synapse/synapse/app/_base.py", line 127, in run reactor.run() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/base.py", line 1267, in run self.mainLoop() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/base.py", line 1276, in mainLoop self.runUntilCurrent() File "/opt/synapse/synapse/synapse/metrics/__init__.py", line 347, in f ret = func(*args, **kwargs) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/base.py", line 875, in runUntilCurrent f(*a, **kw) File "/opt/synapse/synapse/synapse/storage/events_worker.py", line 349, in fire d.callback([res[i] for i in ids if i in res]) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1461, in _inlineCallbacks status.deferred.callback(e.value) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 460, in callback self._startRunCallbacks(result) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1464, in _inlineCallbacks status.deferred.errback() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 501, in errback self._startRunCallbacks(fail) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks self._runCallbacks() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1475, in gotResult _inlineCallbacks(r, g, status) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) return g.throw(self.type, self.value, self.tb) Traceback (most recent call last): File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/opt/synapse/synapse/synapse/storage/events_worker.py", line 419, in _enqueue_events defer.returnValue({e.event.event_id: e for e in res if e}) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: {} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/opt/synapse/synapse/synapse/storage/events_worker.py", line 263, in _get_events defer.returnValue(events) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: [] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/synapse/synapse/synapse/metrics/background_process_metrics.py", line 201, in run yield func(*args, **kwargs) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/opt/synapse/synapse/synapse/handlers/presence.py", line 790, in _process_presence yield self._unsafe_process() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/opt/synapse/synapse/synapse/handlers/presence.py", line 805, in _unsafe_process yield self._handle_state_delta(deltas) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1416, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/failure.py", line 491, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/opt/synapse/synapse/synapse/handlers/presence.py", line 831, in _handle_state_delta event = yield self.store.get_event(event_id) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/opt/synapse/synapse/synapse/storage/events_worker.py", line 120, in get_event raise NotFoundError("Could not find event %s" % (event_id,)) synapse.api.errors.NotFoundError: 404: Could not find event None
synapse.api.errors.NotFoundError
def subscribe_to_stream(self, stream_name, token): """Subscribe the remote to a stream. This invloves checking if they've missed anything and sending those updates down if they have. During that time new updates for the stream are queued and sent once we've sent down any missed updates. """ self.replication_streams.discard(stream_name) self.connecting_streams.add(stream_name) try: # Get missing updates updates, current_token = yield self.streamer.get_stream_updates( stream_name, token, ) # Send all the missing updates for update in updates: token, row = update[0], update[1] self.send_command(RdataCommand(stream_name, token, row)) # We send a POSITION command to ensure that they have an up to # date token (especially useful if we didn't send any updates # above) self.send_command(PositionCommand(stream_name, current_token)) # Now we can send any updates that came in while we were subscribing pending_rdata = self.pending_rdata.pop(stream_name, []) updates = [] for token, update in pending_rdata: # If the token is null, it is part of a batch update. Batches # are multiple updates that share a single token. To denote # this, the token is set to None for all tokens in the batch # except for the last. If we find a None token, we keep looking # through tokens until we find one that is not None and then # process all previous updates in the batch as if they had the # final token. if token is None: # Store this update as part of a batch updates.append(update) continue if token <= current_token: # This update or batch of updates is older than # current_token, dismiss it updates = [] continue updates.append(update) # Send all updates that are part of this batch with the # found token for update in updates: self.send_command(RdataCommand(stream_name, token, update)) # Clear stored updates updates = [] # They're now fully subscribed self.replication_streams.add(stream_name) except Exception as e: logger.exception("[%s] Failed to handle REPLICATE command", self.id()) self.send_error("failed to handle replicate: %r", e) finally: self.connecting_streams.discard(stream_name)
def subscribe_to_stream(self, stream_name, token): """Subscribe the remote to a streams. This invloves checking if they've missed anything and sending those updates down if they have. During that time new updates for the stream are queued and sent once we've sent down any missed updates. """ self.replication_streams.discard(stream_name) self.connecting_streams.add(stream_name) try: # Get missing updates updates, current_token = yield self.streamer.get_stream_updates( stream_name, token, ) # Send all the missing updates for update in updates: token, row = update[0], update[1] self.send_command(RdataCommand(stream_name, token, row)) # We send a POSITION command to ensure that they have an up to # date token (especially useful if we didn't send any updates # above) self.send_command(PositionCommand(stream_name, current_token)) # Now we can send any updates that came in while we were subscribing pending_rdata = self.pending_rdata.pop(stream_name, []) for token, update in pending_rdata: # Only send updates newer than the current token if token > current_token: self.send_command(RdataCommand(stream_name, token, update)) # They're now fully subscribed self.replication_streams.add(stream_name) except Exception as e: logger.exception("[%s] Failed to handle REPLICATE command", self.id()) self.send_error("failed to handle replicate: %r", e) finally: self.connecting_streams.discard(stream_name)
https://github.com/matrix-org/synapse/issues/4705
Traceback (most recent call last): File "/home/matrix/synapse/synapse/replication/tcp/protocol.py", line 468, in subscribe_to_stream if token > current_token: TypeError: '>' not supported between instances of 'NoneType' and 'int'
TypeError
def read_config(self, config): consent_config = config.get("user_consent") if consent_config is None: return self.user_consent_version = str(consent_config["version"]) self.user_consent_template_dir = self.abspath(consent_config["template_dir"]) if not path.isdir(self.user_consent_template_dir): raise ConfigError( "Could not find template directory '%s'" % (self.user_consent_template_dir,), ) self.user_consent_server_notice_content = consent_config.get( "server_notice_content", ) self.block_events_without_consent_error = consent_config.get( "block_events_error", ) self.user_consent_server_notice_to_guests = bool( consent_config.get( "send_server_notice_to_guests", False, ) ) self.user_consent_at_registration = bool( consent_config.get( "require_at_registration", False, ) ) self.user_consent_policy_name = consent_config.get( "policy_name", "Privacy Policy", )
def read_config(self, config): consent_config = config.get("user_consent") if consent_config is None: return self.user_consent_version = str(consent_config["version"]) self.user_consent_template_dir = consent_config["template_dir"] self.user_consent_server_notice_content = consent_config.get( "server_notice_content", ) self.block_events_without_consent_error = consent_config.get( "block_events_error", ) self.user_consent_server_notice_to_guests = bool( consent_config.get( "send_server_notice_to_guests", False, ) ) self.user_consent_at_registration = bool( consent_config.get( "require_at_registration", False, ) ) self.user_consent_policy_name = consent_config.get( "policy_name", "Privacy Policy", )
https://github.com/matrix-org/synapse/issues/4500
2019-01-28 21:17:00,100 - twisted - 242 - ERROR - - Traceback (most recent call last): 2019-01-28 21:17:00,101 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 394, in start 2019-01-28 21:17:00,101 - twisted - 242 - ERROR - - hs.start_listening() 2019-01-28 21:17:00,101 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 245, in start_listening 2019-01-28 21:17:00,101 - twisted - 242 - ERROR - - self._listener_http(config, listener) 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 101, in _listener_http 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - name, res.get("compress", False), 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 183, in _configure_named_resource 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - consent_resource = ConsentResource(self) 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/rest/consent/consent_resource.py", line 111, in __init__ 2019-01-28 21:17:00,103 - twisted - 242 - ERROR - - consent_template_directory, 2019-01-28 21:17:00,103 - twisted - 242 - ERROR - - ConfigError: Could not find template directory '/res/templates/privacy' 2019-01-28 21:17:00,104 - synapse.handlers.deactivate_account - 144 - INFO - user_parter_loop-0 - User parter finished: stopping 2019-01-28 21:17:00,111 - synapse.metrics - 374 - INFO - - Collecting gc 0 2019-01-28 21:17:00,144 - twisted - 242 - CRITICAL - - Unhandled error in Deferred: 2019-01-28 21:17:00,145 - twisted - 242 - CRITICAL - - Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 407, in start sys.exit(1) SystemExit: 1
ConfigError
def __init__(self, hs): """ Args: hs (synapse.server.HomeServer): homeserver """ Resource.__init__(self) self.hs = hs self.store = hs.get_datastore() self.registration_handler = hs.get_handlers().registration_handler # this is required by the request_handler wrapper self.clock = hs.get_clock() self._default_consent_version = hs.config.user_consent_version if self._default_consent_version is None: raise ConfigError( "Consent resource is enabled but user_consent section is " "missing in config file.", ) consent_template_directory = hs.config.user_consent_template_dir loader = jinja2.FileSystemLoader(consent_template_directory) self._jinja_env = jinja2.Environment( loader=loader, autoescape=jinja2.select_autoescape(["html", "htm", "xml"]), ) if hs.config.form_secret is None: raise ConfigError( "Consent resource is enabled but form_secret is not set in " "config file. It should be set to an arbitrary secret string.", ) self._hmac_secret = hs.config.form_secret.encode("utf-8")
def __init__(self, hs): """ Args: hs (synapse.server.HomeServer): homeserver """ Resource.__init__(self) self.hs = hs self.store = hs.get_datastore() self.registration_handler = hs.get_handlers().registration_handler # this is required by the request_handler wrapper self.clock = hs.get_clock() self._default_consent_version = hs.config.user_consent_version if self._default_consent_version is None: raise ConfigError( "Consent resource is enabled but user_consent section is " "missing in config file.", ) # daemonize changes the cwd to /, so make the path absolute now. consent_template_directory = path.abspath( hs.config.user_consent_template_dir, ) if not path.isdir(consent_template_directory): raise ConfigError( "Could not find template directory '%s'" % (consent_template_directory,), ) loader = jinja2.FileSystemLoader(consent_template_directory) self._jinja_env = jinja2.Environment( loader=loader, autoescape=jinja2.select_autoescape(["html", "htm", "xml"]), ) if hs.config.form_secret is None: raise ConfigError( "Consent resource is enabled but form_secret is not set in " "config file. It should be set to an arbitrary secret string.", ) self._hmac_secret = hs.config.form_secret.encode("utf-8")
https://github.com/matrix-org/synapse/issues/4500
2019-01-28 21:17:00,100 - twisted - 242 - ERROR - - Traceback (most recent call last): 2019-01-28 21:17:00,101 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 394, in start 2019-01-28 21:17:00,101 - twisted - 242 - ERROR - - hs.start_listening() 2019-01-28 21:17:00,101 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 245, in start_listening 2019-01-28 21:17:00,101 - twisted - 242 - ERROR - - self._listener_http(config, listener) 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 101, in _listener_http 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - name, res.get("compress", False), 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 183, in _configure_named_resource 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - consent_resource = ConsentResource(self) 2019-01-28 21:17:00,102 - twisted - 242 - ERROR - - File "/home/matrix/.synapse/local/lib/python2.7/site-packages/synapse/rest/consent/consent_resource.py", line 111, in __init__ 2019-01-28 21:17:00,103 - twisted - 242 - ERROR - - consent_template_directory, 2019-01-28 21:17:00,103 - twisted - 242 - ERROR - - ConfigError: Could not find template directory '/res/templates/privacy' 2019-01-28 21:17:00,104 - synapse.handlers.deactivate_account - 144 - INFO - user_parter_loop-0 - User parter finished: stopping 2019-01-28 21:17:00,111 - synapse.metrics - 374 - INFO - - Collecting gc 0 2019-01-28 21:17:00,144 - twisted - 242 - CRITICAL - - Unhandled error in Deferred: 2019-01-28 21:17:00,145 - twisted - 242 - CRITICAL - - Traceback (most recent call last): File "/home/matrix/.synapse/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/matrix/.synapse/lib/python2.7/site-packages/synapse/app/homeserver.py", line 407, in start sys.exit(1) SystemExit: 1
ConfigError
def _new_transaction( self, conn, desc, after_callbacks, exception_callbacks, func, *args, **kwargs ): start = time.time() txn_id = self._TXN_ID # We don't really need these to be unique, so lets stop it from # growing really large. self._TXN_ID = (self._TXN_ID + 1) % (MAX_TXN_ID) name = "%s-%x" % ( desc, txn_id, ) transaction_logger.debug("[TXN START] {%s}", name) try: i = 0 N = 5 while True: try: txn = conn.cursor() txn = LoggingTransaction( txn, name, self.database_engine, after_callbacks, exception_callbacks, ) r = func(txn, *args, **kwargs) conn.commit() return r except self.database_engine.module.OperationalError as e: # This can happen if the database disappears mid # transaction. logger.warning( "[TXN OPERROR] {%s} %s %d/%d", name, exception_to_unicode(e), i, N ) if i < N: i += 1 try: conn.rollback() except self.database_engine.module.Error as e1: logger.warning( "[TXN EROLL] {%s} %s", name, exception_to_unicode(e1), ) continue raise except self.database_engine.module.DatabaseError as e: if self.database_engine.is_deadlock(e): logger.warning("[TXN DEADLOCK] {%s} %d/%d", name, i, N) if i < N: i += 1 try: conn.rollback() except self.database_engine.module.Error as e1: logger.warning( "[TXN EROLL] {%s} %s", name, exception_to_unicode(e1), ) continue raise except Exception as e: logger.debug("[TXN FAIL] {%s} %s", name, e) raise finally: end = time.time() duration = end - start LoggingContext.current_context().add_database_transaction(duration) transaction_logger.debug("[TXN END] {%s} %f sec", name, duration) self._current_txn_total_time += duration self._txn_perf_counters.update(desc, start, end) sql_txn_timer.labels(desc).observe(duration)
def _new_transaction( self, conn, desc, after_callbacks, exception_callbacks, func, *args, **kwargs ): start = time.time() txn_id = self._TXN_ID # We don't really need these to be unique, so lets stop it from # growing really large. self._TXN_ID = (self._TXN_ID + 1) % (MAX_TXN_ID) name = "%s-%x" % ( desc, txn_id, ) transaction_logger.debug("[TXN START] {%s}", name) try: i = 0 N = 5 while True: try: txn = conn.cursor() txn = LoggingTransaction( txn, name, self.database_engine, after_callbacks, exception_callbacks, ) r = func(txn, *args, **kwargs) conn.commit() return r except self.database_engine.module.OperationalError as e: # This can happen if the database disappears mid # transaction. logger.warn("[TXN OPERROR] {%s} %s %d/%d", name, e, i, N) if i < N: i += 1 try: conn.rollback() except self.database_engine.module.Error as e1: logger.warn( "[TXN EROLL] {%s} %s", name, e1, ) continue raise except self.database_engine.module.DatabaseError as e: if self.database_engine.is_deadlock(e): logger.warn("[TXN DEADLOCK] {%s} %d/%d", name, i, N) if i < N: i += 1 try: conn.rollback() except self.database_engine.module.Error as e1: logger.warn( "[TXN EROLL] {%s} %s", name, e1, ) continue raise except Exception as e: logger.debug("[TXN FAIL] {%s} %s", name, e) raise finally: end = time.time() duration = end - start LoggingContext.current_context().add_database_transaction(duration) transaction_logger.debug("[TXN END] {%s} %f sec", name, duration) self._current_txn_total_time += duration self._txn_perf_counters.update(desc, start, end) sql_txn_timer.labels(desc).observe(duration)
https://github.com/matrix-org/synapse/issues/4252
2018-10-25 15:58:33,973 - twisted - 243 - ERROR - POST-299240- Traceback (most recent call last): 2018-10-25 15:58:33,973 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/handlers.py", line 76, in emit 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- if self.shouldRollover(record): 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/handlers.py", line 156, in shouldRollover 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- msg = "%s\n" % self.format(record) 2018-10-25 15:58:33,974 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 734, in format 2018-10-25 15:58:33,975 - twisted - 243 - ERROR - POST-299240- return fmt.format(record) 2018-10-25 15:58:33,975 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 476, in format 2018-10-25 15:58:33,975 - twisted - 243 - ERROR - POST-299240- raise e 2018-10-25 15:58:33,977 - twisted - 243 - ERROR - POST-299240- UnicodeDecodeError: 'ascii' codec can't decode byte 0xd0 in position 46: ordinal not in range(128) 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- Logged from file _base.py, line 254 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- Traceback (most recent call last): 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 861, in emit 2018-10-25 15:58:33,978 - twisted - 243 - ERROR - POST-299240- msg = self.format(record) 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 734, in format 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- return fmt.format(record) 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- File "/usr/lib/python2.7/logging/__init__.py", line 476, in format 2018-10-25 15:58:33,979 - twisted - 243 - ERROR - POST-299240- raise e 2018-10-25 15:58:33,980 - twisted - 243 - ERROR - POST-299240- UnicodeDecodeError: 'ascii' codec can't decode byte 0xd0 in position 46: ordinal not in range(128) 2018-10-25 15:58:33,980 - twisted - 243 - ERROR - POST-299240- Logged from file _base.py, line 254
UnicodeDecodeError
def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=time): cache_entry = cache.get(service_name, None) if cache_entry: if all(s.expires > int(clock.time()) for s in cache_entry): servers = list(cache_entry) defer.returnValue(servers) servers = [] try: try: answers, _, _ = yield dns_client.lookupService(service_name) except DNSNameError: defer.returnValue([]) if ( len(answers) == 1 and answers[0].type == dns.SRV and answers[0].payload and answers[0].payload.target == dns.Name(".") ): raise ConnectError("Service %s unavailable" % service_name) for answer in answers: if answer.type != dns.SRV or not answer.payload: continue payload = answer.payload servers.append( _Server( host=str(payload.target), port=int(payload.port), priority=int(payload.priority), weight=int(payload.weight), expires=int(clock.time()) + answer.ttl, ) ) servers.sort() cache[service_name] = list(servers) except DomainError as e: # We failed to resolve the name (other than a NameError) # Try something in the cache, else rereaise cache_entry = cache.get(service_name, None) if cache_entry: logger.warn( "Failed to resolve %r, falling back to cache. %r", service_name, e ) servers = list(cache_entry) else: raise e defer.returnValue(servers)
def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=time): cache_entry = cache.get(service_name, None) if cache_entry: if all(s.expires > int(clock.time()) for s in cache_entry): servers = list(cache_entry) defer.returnValue(servers) servers = [] try: try: answers, _, _ = yield dns_client.lookupService(service_name) except DNSNameError: defer.returnValue([]) if ( len(answers) == 1 and answers[0].type == dns.SRV and answers[0].payload and answers[0].payload.target == dns.Name(".") ): raise ConnectError("Service %s unavailable" % service_name) for answer in answers: if answer.type != dns.SRV or not answer.payload: continue payload = answer.payload hosts = yield _get_hosts_for_srv_record(dns_client, str(payload.target)) for ip, ttl in hosts: host_ttl = min(answer.ttl, ttl) servers.append( _Server( host=ip, port=int(payload.port), priority=int(payload.priority), weight=int(payload.weight), expires=int(clock.time()) + host_ttl, ) ) servers.sort() cache[service_name] = list(servers) except DomainError as e: # We failed to resolve the name (other than a NameError) # Try something in the cache, else rereaise cache_entry = cache.get(service_name, None) if cache_entry: logger.warn( "Failed to resolve %r, falling back to cache. %r", service_name, e ) servers = list(cache_entry) else: raise e defer.returnValue(servers)
https://github.com/matrix-org/synapse/issues/2850
2018-02-05 19:14:51,585 - synapse.access.http.8008 - 59 - INFO - GET-131021- - - 8008 - Received request: GET /_matrix/client/r0/directory/room/%23test%3Amatrix.org?access_token=<redacted> 2018-02-05 19:14:51,589 - synapse.http.outbound - 154 - INFO - GET-131021- {GET-O-1317} [matrix.org] Sending request: GET matrix://matrix.org/_matrix/federation/v1/query/directory?room_alias=%23test%3Amatrix.org 2018-02-05 19:14:51,590 - synapse.http.endpoint - 264 - INFO - - Connecting to 2a04:3541:1000:500:6866:a4ff:fe4c:21ac:8448 2018-02-05 19:14:51,592 - twisted - 131 - INFO - - Starting factory _HTTP11ClientFactory(<function quiescentCallback at 0x7f9d7826ad70>, <synapse.http.endpoint._WrappingEndpointFac object at 0x7f9d9c993190>) 2018-02-05 19:14:51,593 - twisted - 131 - INFO - - Stopping factory _HTTP11ClientFactory(<function quiescentCallback at 0x7f9d7826ad70>, <synapse.http.endpoint._WrappingEndpointFac object at 0x7f9d9c993190>) 2018-02-05 19:14:51,892 - synapse.http.matrixfederationclient - 212 - WARNING - GET-131021- {GET-O-1317} Sending request failed to matrix.org: GET matrix://matrix.org/_matrix/federation/v1/query/directory?room_alias=%23test%3Amatrix.org: NoRouteError('Network is unreachable',) 2018-02-05 19:14:51,892 - synapse.http.outbound - 236 - INFO - GET-131021- {GET-O-1317} [matrix.org] Result: NoRouteError('Network is unreachable',) 2018-02-05 19:14:51,893 - synapse.http.server - 183 - ERROR - GET-131021- Failed handle request synapse.http.server._async_render on <synapse.rest.ClientRestResource object at 0x7f9dae55b590>: <XForwardedForRequest at 0x7f9d78220bd8 method=GET uri=/_matrix/client/r0/directory/room/%23test%3Amatrix.org?access_token=<redacted> clientproto=HTTP/1.1 site=8008>: Traceback (most recent call last): Failure: twisted.internet.error.NoRouteError: No route to host: 101: Network is unreachable. 2018-02-05 19:14:51,895 - synapse.access.http.8008 - 93 - INFO - GET-131021- - - 8008 - {None} Processed request: 309ms (4ms, 0ms) (1ms/1ms/1) 69B 500 "GET /_matrix/client/r0/directory/room/%23test%3Amatrix.org?access_token=<redacted> HTTP/1.1" "curl/7.47.0"
NoRouteError
def update_manifest(ref=None): """ Given a git reference in the Noto repo, such as a git commit hash or tag, extract information about the fonts available for use and save that information to the manifest file. The Noto repo currently contains both an older style and the newer "Phase 3" fonts. Phase 3 fonts have more consistent internal metrics which makes them amenable to being merged together, which we make use of. The older fonts are still usable, but cannot be merged together. Noto also contains both standard and "UI" variants of many fonts. When a font has a UI variant, it means that some of the glyphs in the standard variant are very tall and might overflow a typical line of text; the UI variant has the glypsh redrawn to fit. When searching for fonts to include, we take all language fonts that have both a regular and a bold variant, with preference given to Phase 3 and UI variants. """ # grab the head of master if not ref: logging.info("Using head of master") ref = _request("git/refs/heads/master")["object"]["sha"] logging.info("Generating new manifest for reference '{}'".format(ref)) git_tree = _request("git/trees/{}?recursive=1".format(ref)) # backups font_info = _font_info(git_tree, ref, OLD_STYLE_PATH, _old_download_url) # prefer phase 3, replacing old-styles when possible font_info.update(_font_info(git_tree, ref, PHASE_3_PATH, _p3_download_url)) new_manifest = {KEY_REF: ref, KEY_FONTS: font_info} utils.json_dump_formatted(new_manifest, FONTS_SOURCE, FONT_MANIFEST_NAME)
def update_manifest(ref=None): """ Given a git reference in the Noto repo, such as a git commit hash or tag, extract information about the fonts available for use and save that information to the manifest file. The Noto repo currently contains both an older style and the newer "Phase 3" fonts. Phase 3 fonts have more consistent internal metrics which makes them amenable to being merged together, which we make use of. The older fonts are still usable, but cannot be merged together. Noto also contains both standard and "UI" variants of many fonts. When a font has a UI variant, it means that some of the glyphs in the standard variant are very tall and might overflow a typical line of text; the UI variant has the glypsh redrawn to fit. When searching for fonts to include, we take all language fonts that have both a regular and a bold variant, with preference given to Phase 3 and UI variants. """ # grab the head of master if not ref: logging.info("Using head of master") ref = _request("git/refs/heads/master")["object"]["sha"] logging.info("Generating new manifest for reference '{}'".format(ref)) git_tree = _request("git/trees/{}?recursive=1".format(ref)) # backups font_info = _font_info(git_tree, ref, OLD_STYLE_PATH, _old_download_url) # prefer phase 3, replacing old-styles when possible font_info.update(_font_info(git_tree, ref, PHASE_3_PATH, _p3_download_url)) new_manifest = {KEY_REF: ref, KEY_FONTS: font_info} utils.json_dump_formatted(new_manifest, FONT_MANIFEST_PATH)
https://github.com/learningequality/kolibri/issues/6796
Traceback (most recent call last): File "build_tools/i18n/fonts.py", line 641, in <module> main() File "build_tools/i18n/fonts.py", line 627, in main command_update_font_manifest(args.ref) File "build_tools/i18n/fonts.py", line 560, in command_update_font_manifest noto_source.update_manifest(ref) File "/Users/jon/Github/kolibri/build_tools/i18n/noto_source.py", line 191, in update_manifest utils.json_dump_formatted(new_manifest, FONT_MANIFEST_PATH) TypeError: json_dump_formatted() missing 1 required positional argument: 'file_name'
TypeError
def add_arguments(self, parser): parser.add_argument( "--interval", action="store", dest="interval", help="Number of minutes to wait after a successful ping before the next ping.", ) parser.add_argument( "--checkrate", action="store", dest="checkrate", help="Number of minutes to wait between failed ping attempts.", ) parser.add_argument( "--server", action="store", dest="server", help="Base URL of the server to connect to.", ) parser.add_argument( "--once", action="store_true", dest="once", help="Only try to ping once, then exit", )
def add_arguments(self, parser): parser.add_argument( "--interval", action="store", dest="interval", help="Number of minutes to wait after a successful ping before the next ping.", ) parser.add_argument( "--checkrate", action="store", dest="checkrate", help="Number of minutes to wait between failed ping attempts.", ) parser.add_argument( "--server", action="store", dest="server", help="Base URL of the server to connect to.", )
https://github.com/learningequality/kolibri/issues/4414
INFO:kolibri.core.analytics.management.commands.ping:Ping succeeded! (response: {'id': 87135}) Exception in thread Thread-1: Traceback (most recent call last): File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/utils/server.py", line 109, in run call_command("ping") File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 131, in call_command return command.execute(*args, **defaults) File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/base.py", line 330, in execute output = self.handle(*args, **options) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 58, in handle self.perform_statistics(server, data["id"]) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 123, in perform_statistics jsondata = dump_zipped_json(data) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/utils.py", line 37, in dump_zipped_json jsondata = json.dumps(data) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py", line 231, in dumps return _default_encoder.encode(obj) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 180, in default o.__class__.__name__) TypeError: Object of type 'bytes' is not JSON serializable
TypeError
def handle(self, *args, **options): interval = float(options.get("interval") or DEFAULT_PING_INTERVAL) checkrate = float(options.get("checkrate") or DEFAULT_PING_CHECKRATE) server = options.get("server") or DEFAULT_SERVER_URL once = options.get("once") or False self.started = datetime.now() while True: try: logger.info("Attempting a ping.") with vacuum_db_lock: data = self.perform_ping(server) logger.info("Ping succeeded! (response: {})".format(data)) if "id" in data: self.perform_statistics(server, data["id"]) if once: break logger.info("Sleeping for {} minutes.".format(interval)) time.sleep(interval * 60) continue except ConnectionError: logger.warn( "Ping failed (could not connect). Trying again in {} minutes.".format( checkrate ) ) except Timeout: logger.warn( "Ping failed (connection timed out). Trying again in {} minutes.".format( checkrate ) ) except RequestException as e: logger.warn( "Ping failed ({})! Trying again in {} minutes.".format(e, checkrate) ) if once: break time.sleep(checkrate * 60)
def handle(self, *args, **options): interval = float(options.get("interval") or DEFAULT_PING_INTERVAL) checkrate = float(options.get("checkrate") or DEFAULT_PING_CHECKRATE) server = options.get("server") or DEFAULT_SERVER_URL self.started = datetime.now() while True: try: logger.info("Attempting a ping.") with vacuum_db_lock: data = self.perform_ping(server) logger.info("Ping succeeded! (response: {})".format(data)) if "id" in data: self.perform_statistics(server, data["id"]) logger.info("Sleeping for {} minutes.".format(interval)) time.sleep(interval * 60) continue except ConnectionError: logger.warn( "Ping failed (could not connect). Trying again in {} minutes.".format( checkrate ) ) except Timeout: logger.warn( "Ping failed (connection timed out). Trying again in {} minutes.".format( checkrate ) ) except RequestException as e: logger.warn( "Ping failed ({})! Trying again in {} minutes.".format(e, checkrate) ) time.sleep(checkrate * 60)
https://github.com/learningequality/kolibri/issues/4414
INFO:kolibri.core.analytics.management.commands.ping:Ping succeeded! (response: {'id': 87135}) Exception in thread Thread-1: Traceback (most recent call last): File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/utils/server.py", line 109, in run call_command("ping") File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 131, in call_command return command.execute(*args, **defaults) File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/base.py", line 330, in execute output = self.handle(*args, **options) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 58, in handle self.perform_statistics(server, data["id"]) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 123, in perform_statistics jsondata = dump_zipped_json(data) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/utils.py", line 37, in dump_zipped_json jsondata = json.dumps(data) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py", line 231, in dumps return _default_encoder.encode(obj) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 180, in default o.__class__.__name__) TypeError: Object of type 'bytes' is not JSON serializable
TypeError
def perform_ping(self, server): url = urljoin(server, "/api/v1/pingback") instance, _ = InstanceIDModel.get_or_create_current_instance() devicesettings = DeviceSettings.objects.first() language = devicesettings.language_id if devicesettings else "" try: timezone = get_current_timezone().zone except Exception: timezone = "" data = { "instance_id": instance.id, "version": kolibri.__version__, "mode": os.environ.get("KOLIBRI_RUN_MODE", ""), "platform": instance.platform, "sysversion": instance.sysversion, "database_id": instance.database.id, "system_id": instance.system_id, "node_id": instance.node_id, "language": language, "timezone": timezone, "uptime": int((datetime.now() - self.started).total_seconds() / 60), } logger.debug("Pingback data: {}".format(data)) jsondata = dump_zipped_json(data) response = requests.post(url, data=jsondata, timeout=60) response.raise_for_status() return json.loads(response.content.decode() or "{}")
def perform_ping(self, server): url = urljoin(server, "/api/v1/pingback") instance, _ = InstanceIDModel.get_or_create_current_instance() devicesettings = DeviceSettings.objects.first() language = devicesettings.language_id if devicesettings else "" try: timezone = get_current_timezone().zone except Exception: timezone = "" data = { "instance_id": instance.id, "version": kolibri.__version__, "mode": os.environ.get("KOLIBRI_RUN_MODE", ""), "platform": instance.platform, "sysversion": instance.sysversion, "database_id": instance.database.id, "system_id": instance.system_id, "node_id": instance.node_id, "language": language, "timezone": timezone, "uptime": int((datetime.now() - self.started).total_seconds() / 60), } logger.debug("Pingback data: {}".format(data)) jsondata = dump_zipped_json(data) response = requests.post(url, data=jsondata, timeout=60) response.raise_for_status() return json.loads(response.content or "{}")
https://github.com/learningequality/kolibri/issues/4414
INFO:kolibri.core.analytics.management.commands.ping:Ping succeeded! (response: {'id': 87135}) Exception in thread Thread-1: Traceback (most recent call last): File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/utils/server.py", line 109, in run call_command("ping") File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 131, in call_command return command.execute(*args, **defaults) File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/base.py", line 330, in execute output = self.handle(*args, **options) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 58, in handle self.perform_statistics(server, data["id"]) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 123, in perform_statistics jsondata = dump_zipped_json(data) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/utils.py", line 37, in dump_zipped_json jsondata = json.dumps(data) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py", line 231, in dumps return _default_encoder.encode(obj) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 180, in default o.__class__.__name__) TypeError: Object of type 'bytes' is not JSON serializable
TypeError
def perform_statistics(self, server, pingback_id): url = urljoin(server, "/api/v1/statistics") channels = [extract_channel_statistics(c) for c in ChannelMetadata.objects.all()] facilities = [extract_facility_statistics(f) for f in Facility.objects.all()] data = { "pi": pingback_id, "c": channels, "f": facilities, } logger.debug("Statistics data: {}".format(data)) jsondata = dump_zipped_json(data) response = requests.post(url, data=jsondata, timeout=60) response.raise_for_status() return json.loads(response.content.decode() or "{}")
def perform_statistics(self, server, pingback_id): url = urljoin(server, "/api/v1/statistics") channels = [extract_channel_statistics(c) for c in ChannelMetadata.objects.all()] facilities = [extract_facility_statistics(f) for f in Facility.objects.all()] data = { "pi": pingback_id, "c": channels, "f": facilities, } logger.debug("Statistics data: {}".format(data)) jsondata = dump_zipped_json(data) response = requests.post(url, data=jsondata, timeout=60) response.raise_for_status() return json.loads(response.content or "{}")
https://github.com/learningequality/kolibri/issues/4414
INFO:kolibri.core.analytics.management.commands.ping:Ping succeeded! (response: {'id': 87135}) Exception in thread Thread-1: Traceback (most recent call last): File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/utils/server.py", line 109, in run call_command("ping") File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 131, in call_command return command.execute(*args, **defaults) File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/base.py", line 330, in execute output = self.handle(*args, **options) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 58, in handle self.perform_statistics(server, data["id"]) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 123, in perform_statistics jsondata = dump_zipped_json(data) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/utils.py", line 37, in dump_zipped_json jsondata = json.dumps(data) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py", line 231, in dumps return _default_encoder.encode(obj) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 180, in default o.__class__.__name__) TypeError: Object of type 'bytes' is not JSON serializable
TypeError
def extract_facility_statistics(facility): dataset_id = facility.dataset_id settings = { name: getattr(facility.dataset, name) for name in facility_settings if hasattr(facility.dataset, name) } learners = FacilityUser.objects.filter(dataset_id=dataset_id).exclude( roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH] ) coaches = FacilityUser.objects.filter( dataset_id=dataset_id, roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH] ) usersessions = UserSessionLog.objects.filter(dataset_id=dataset_id) contsessions = ContentSessionLog.objects.filter( dataset_id=dataset_id, time_spent__lt=3600 * 2 ) # the aggregates below are used to calculate the first and most recent times this device was used usersess_agg = usersessions.filter( start_timestamp__gt=datetime.datetime(2016, 1, 1) ).aggregate(first=Min("start_timestamp"), last=Max("last_interaction_timestamp")) contsess_agg = contsessions.filter( start_timestamp__gt=datetime.datetime(2016, 1, 1) ).aggregate(first=Min("start_timestamp"), last=Max("end_timestamp")) # since newly provisioned devices won't have logs, we don't know whether we have an available datetime object first_interaction_timestamp = getattr( min(usersess_agg["first"], contsess_agg["first"]), "strftime", None ) last_interaction_timestamp = getattr( max(usersess_agg["last"], contsess_agg["last"]), "strftime", None ) sesslogs_by_kind = ( contsessions.order_by("kind").values("kind").annotate(count=Count("kind")) ) sesslogs_by_kind = {log["kind"]: log["count"] for log in sesslogs_by_kind} summarylogs = ContentSummaryLog.objects.filter(dataset_id=dataset_id) contsessions_user = contsessions.exclude(user=None) contsessions_anon = contsessions.filter(user=None) return { # facility_id "fi": base64.encodestring(hashlib.md5(facility.id.encode()).digest())[ :10 ].decode(), # settings "s": settings, # learners_count "lc": learners.count(), # learner_login_count "llc": usersessions.exclude( user__roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH] ) .distinct() .count(), # coaches_count "cc": coaches.count(), # coach_login_count "clc": usersessions.filter( user__roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH] ) .distinct() .count(), # first "f": first_interaction_timestamp("%Y-%m-%d") if first_interaction_timestamp else None, # last "l": last_interaction_timestamp("%Y-%m-%d") if last_interaction_timestamp else None, # summ_started "ss": summarylogs.count(), # summ_complete "sc": summarylogs.exclude(completion_timestamp=None).count(), # sess_kinds "sk": sesslogs_by_kind, # lesson_count "lec": Lesson.objects.filter(dataset_id=dataset_id).count(), # exam_count "ec": Exam.objects.filter(dataset_id=dataset_id).count(), # exam_log_count "elc": ExamLog.objects.filter(dataset_id=dataset_id).count(), # att_log_count "alc": AttemptLog.objects.filter(dataset_id=dataset_id).count(), # exam_att_log_count "ealc": ExamAttemptLog.objects.filter(dataset_id=dataset_id).count(), # sess_user_count "suc": contsessions_user.count(), # sess_anon_count "sac": contsessions_anon.count(), # sess_user_time "sut": int( ( contsessions_user.aggregate(total_time=Sum("time_spent"))["total_time"] or 0 ) / 60 ), # sess_anon_time "sat": int( ( contsessions_anon.aggregate(total_time=Sum("time_spent"))["total_time"] or 0 ) / 60 ), }
def extract_facility_statistics(facility): dataset_id = facility.dataset_id settings = { name: getattr(facility.dataset, name) for name in facility_settings if hasattr(facility.dataset, name) } learners = FacilityUser.objects.filter(dataset_id=dataset_id).exclude( roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH] ) coaches = FacilityUser.objects.filter( dataset_id=dataset_id, roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH] ) usersessions = UserSessionLog.objects.filter(dataset_id=dataset_id) contsessions = ContentSessionLog.objects.filter( dataset_id=dataset_id, time_spent__lt=3600 * 2 ) # the aggregates below are used to calculate the first and most recent times this device was used usersess_agg = usersessions.filter( start_timestamp__gt=datetime.datetime(2016, 1, 1) ).aggregate(first=Min("start_timestamp"), last=Max("last_interaction_timestamp")) contsess_agg = contsessions.filter( start_timestamp__gt=datetime.datetime(2016, 1, 1) ).aggregate(first=Min("start_timestamp"), last=Max("end_timestamp")) # since newly provisioned devices won't have logs, we don't know whether we have an available datetime object first_interaction_timestamp = getattr( min(usersess_agg["first"], contsess_agg["first"]), "strftime", None ) last_interaction_timestamp = getattr( max(usersess_agg["last"], contsess_agg["last"]), "strftime", None ) sesslogs_by_kind = ( contsessions.order_by("kind").values("kind").annotate(count=Count("kind")) ) sesslogs_by_kind = {log["kind"]: log["count"] for log in sesslogs_by_kind} summarylogs = ContentSummaryLog.objects.filter(dataset_id=dataset_id) contsessions_user = contsessions.exclude(user=None) contsessions_anon = contsessions.filter(user=None) return { # facility_id "fi": base64.encodestring(hashlib.md5(facility.id.encode()).digest())[:10], # settings "s": settings, # learners_count "lc": learners.count(), # learner_login_count "llc": usersessions.exclude( user__roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH] ) .distinct() .count(), # coaches_count "cc": coaches.count(), # coach_login_count "clc": usersessions.filter( user__roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH] ) .distinct() .count(), # first "f": first_interaction_timestamp("%Y-%m-%d") if first_interaction_timestamp else None, # last "l": last_interaction_timestamp("%Y-%m-%d") if last_interaction_timestamp else None, # summ_started "ss": summarylogs.count(), # summ_complete "sc": summarylogs.exclude(completion_timestamp=None).count(), # sess_kinds "sk": sesslogs_by_kind, # lesson_count "lec": Lesson.objects.filter(dataset_id=dataset_id).count(), # exam_count "ec": Exam.objects.filter(dataset_id=dataset_id).count(), # exam_log_count "elc": ExamLog.objects.filter(dataset_id=dataset_id).count(), # att_log_count "alc": AttemptLog.objects.filter(dataset_id=dataset_id).count(), # exam_att_log_count "ealc": ExamAttemptLog.objects.filter(dataset_id=dataset_id).count(), # sess_user_count "suc": contsessions_user.count(), # sess_anon_count "sac": contsessions_anon.count(), # sess_user_time "sut": int( ( contsessions_user.aggregate(total_time=Sum("time_spent"))["total_time"] or 0 ) / 60 ), # sess_anon_time "sat": int( ( contsessions_anon.aggregate(total_time=Sum("time_spent"))["total_time"] or 0 ) / 60 ), }
https://github.com/learningequality/kolibri/issues/4414
INFO:kolibri.core.analytics.management.commands.ping:Ping succeeded! (response: {'id': 87135}) Exception in thread Thread-1: Traceback (most recent call last): File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/utils/server.py", line 109, in run call_command("ping") File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 131, in call_command return command.execute(*args, **defaults) File "/Users/d/Projects/learning_equality/repos/k011x/.venv/lib/python3.6/site-packages/django/core/management/base.py", line 330, in execute output = self.handle(*args, **options) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 58, in handle self.perform_statistics(server, data["id"]) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/management/commands/ping.py", line 123, in perform_statistics jsondata = dump_zipped_json(data) File "/Users/d/Projects/learning_equality/repos/k011x/kolibri/core/analytics/utils.py", line 37, in dump_zipped_json jsondata = json.dumps(data) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py", line 231, in dumps return _default_encoder.encode(obj) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/encoder.py", line 180, in default o.__class__.__name__) TypeError: Object of type 'bytes' is not JSON serializable
TypeError
def get_files_to_transfer(channel_id, node_ids, exclude_node_ids, available): files_to_transfer = LocalFile.objects.filter( files__contentnode__channel_id=channel_id, available=available ) if node_ids: leaf_node_ids = _get_leaf_node_ids(node_ids) files_to_transfer = files_to_transfer.filter( files__contentnode__in=leaf_node_ids ) if exclude_node_ids: exclude_leaf_node_ids = _get_leaf_node_ids(exclude_node_ids) files_to_transfer = files_to_transfer.exclude( files__contentnode__in=exclude_leaf_node_ids ) # Make sure the files are unique, to avoid duplicating downloads files_to_transfer = files_to_transfer.distinct() total_bytes_to_transfer = ( files_to_transfer.aggregate(Sum("file_size"))["file_size__sum"] or 0 ) return files_to_transfer, total_bytes_to_transfer
def get_files_to_transfer(channel_id, node_ids, exclude_node_ids, available): files_to_transfer = LocalFile.objects.filter( files__contentnode__channel_id=channel_id, available=available ) if node_ids: leaf_node_ids = _get_leaves_ids(node_ids) files_to_transfer = files_to_transfer.filter( files__contentnode__in=leaf_node_ids ) if exclude_node_ids: exclude_leaf_node_ids = _get_leaves_ids(exclude_node_ids) files_to_transfer = files_to_transfer.exclude( files__contentnode__in=exclude_leaf_node_ids ) # Make sure the files are unique, to avoid duplicating downloads files_to_transfer = files_to_transfer.distinct() total_bytes_to_transfer = ( files_to_transfer.aggregate(Sum("file_size"))["file_size__sum"] or 0 ) return files_to_transfer, total_bytes_to_transfer
https://github.com/learningequality/kolibri/issues/3110
�[33mWARNING Job d416c174faeb4648b22bd1420a75f2a0 raised an exception: Traceback (most recent call last): File "c:\python27\lib\site-packages\kolibri\dist\iceqube\worker\backends\inmem.py", line 75, in handle_finished_future result = future.result() File "c:\python27\lib\site-packages\kolibri\dist\concurrent\futures\_base.py", line 455, in result return self.__get_result() File "c:\python27\lib\site-packages\kolibri\dist\concurrent\futures\thread.py", line 63, in run result = self.fn(*self.args, **self.kwargs) File "c:\python27\lib\site-packages\kolibri\dist\iceqube\worker\backends\inmem.py", line 149, in wrap raise e OperationalError: too many SQL variables �[0m WARNING:iceqube.worker.backends.base:Job d416c174faeb4648b22bd1420a75f2a0 raised an exception: Traceback (most recent call last): File "c:\python27\lib\site-packages\kolibri\dist\iceqube\worker\backends\inmem.py", line 75, in handle_finished_future result = future.result() File "c:\python27\lib\site-packages\kolibri\dist\concurrent\futures\_base.py", line 455, in result return self.__get_result() File "c:\python27\lib\site-packages\kolibri\dist\concurrent\futures\thread.py", line 63, in run result = self.fn(*self.args, **self.kwargs) File "c:\python27\lib\site-packages\kolibri\dist\iceqube\worker\backends\inmem.py", line 149, in wrap raise e OperationalError: too many SQL variables
OperationalError
def _job_to_response(job): if not job: return { "type": None, "started_by": None, "status": State.SCHEDULED, "percentage": 0, "progress": [], "id": None, "cancellable": False, } else: return { "type": getattr(job, "extra_metadata", {}).get("type"), "started_by": getattr(job, "extra_metadata", {}).get("started_by"), "status": job.state, "exception": str(job.exception), "traceback": str(job.traceback), "percentage": job.percentage_progress, "id": job.job_id, "cancellable": job.cancellable, }
def _job_to_response(job): if not job: return { "type": None, "started_by": None, "status": State.SCHEDULED, "percentage": 0, "progress": [], "id": None, "cancellable": False, } else: return { "type": job.extra_metadata.get("type"), "started_by": job.extra_metadata.get("started_by"), "status": job.state, "exception": str(job.exception), "traceback": str(job.traceback), "percentage": job.percentage_progress, "id": job.job_id, "cancellable": job.cancellable, }
https://github.com/learningequality/kolibri/issues/2729
ERROR Internal Server Error: /api/tasks/ Traceback (most recent call last): File "/Users/d/PythonEnvs/le2/lib/python2.7/site-packages/django/core/handlers/base.py", line 149, in get_response response = self.process_exception_by_middleware(e, request) File "/Users/d/PythonEnvs/le2/lib/python2.7/site-packages/django/core/handlers/base.py", line 147, in get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/Users/d/PythonEnvs/le2/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view return view_func(*args, **kwargs) File "/Users/d/PythonEnvs/le2/lib/python2.7/site-packages/rest_framework/viewsets.py", line 87, in view return self.dispatch(request, *args, **kwargs) File "/Users/d/PythonEnvs/le2/lib/python2.7/site-packages/rest_framework/views.py", line 466, in dispatch response = self.handle_exception(exc) File "/Users/d/PythonEnvs/le2/lib/python2.7/site-packages/rest_framework/views.py", line 463, in dispatch response = handler(request, *args, **kwargs) File "kolibri/tasks/api.py", line 42, in list jobs_response = [_job_to_response(j) for j in get_client().all_jobs()] File "kolibri/tasks/api.py", line 427, in _job_to_response "type": job.extra_metadata.get("type"), AttributeError: 'Job' object has no attribute 'extra_metadata'
AttributeError
def handle(self, *args, **options): engine = create_engine(get_default_db_string(), convert_unicode=True) metadata = MetaData() app_config = apps.get_app_config("content") # Exclude channelmetadatacache in case we are reflecting an older version of Kolibri table_names = [ model._meta.db_table for name, model in app_config.models.items() if name != "channelmetadatacache" ] metadata.reflect(bind=engine, only=table_names) Base = automap_base(metadata=metadata) # TODO map relationship backreferences using the django names Base.prepare() session = sessionmaker(bind=engine, autoflush=False)() # Load fixture data into the test database with Django call_command("loaddata", "content_import_test.json", interactive=False) def get_dict(item): value = { key: value for key, value in item.__dict__.items() if key != "_sa_instance_state" } return value data = {} for table_name, record in Base.classes.items(): data[table_name] = [get_dict(r) for r in session.query(record).all()] with open(SCHEMA_PATH_TEMPLATE.format(name=options["version"]), "wb") as f: pickle.dump(metadata, f, protocol=2) with open(DATA_PATH_TEMPLATE.format(name=options["version"]), "w") as f: json.dump(data, f)
def handle(self, *args, **options): engine = create_engine(get_default_db_string(), convert_unicode=True) metadata = MetaData() app_config = apps.get_app_config("content") # Exclude channelmetadatacache in case we are reflecting an older version of Kolibri table_names = [ model._meta.db_table for name, model in app_config.models.items() if name != "channelmetadatacache" ] metadata.reflect(bind=engine, only=table_names) Base = automap_base(metadata=metadata) # TODO map relationship backreferences using the django names Base.prepare() session = sessionmaker(bind=engine, autoflush=False)() # Load fixture data into the test database with Django call_command("loaddata", "content_import_test.json", interactive=False) def get_dict(item): value = { key: value for key, value in item.__dict__.items() if key != "_sa_instance_state" } return value data = {} for table_name, record in Base.classes.items(): data[table_name] = [get_dict(r) for r in session.query(record).all()] with open(SCHEMA_PATH_TEMPLATE.format(name=options["version"]), "wb") as f: pickle.dump(metadata, f, protocol=2) with open(DATA_PATH_TEMPLATE.format(name=options["version"]), "wb") as f: json.dump(data, f)
https://github.com/learningequality/kolibri/issues/2381
didi@didi-VirtualBox:~/Desktop$ ./kolibri-0.6.dev020171007224512-git.pex manage importchannel -- network fb51dae6df7545af8455aa3a0c32048d INFO Version was 0.6.dev020171009005906-export, new version: 0.6.dev020171009010641-export INFO Running update routines for new version... 0 static files copied to '/home/didi/.kolibri/static', 405 unmodified. Operations to perform: Apply all migrations: sessions, admin, kolibriauth, morango, auth, content, contenttypes, exams, device, logger Running migrations: No migrations to apply. Your models have changes that are not yet reflected in a migration, and so won't be applied. Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them. INFO Downloading data for channel id fb51dae6df7545af8455aa3a0c32048d 100%|███████████████████████████████████████████████████████████████████████████████| 421888/421888 [00:00<00:00, 902015.67it/s]INFO Importing ContentTag data INFO:kolibri.content.utils.channel_import:Importing ContentTag data INFO Importing ContentNode_has_prerequisite data INFO:kolibri.content.utils.channel_import:Importing ContentNode_has_prerequisite data INFO Importing ContentNode_related data INFO:kolibri.content.utils.channel_import:Importing ContentNode_related data INFO Importing ContentNode_tags data INFO:kolibri.content.utils.channel_import:Importing ContentNode_tags data INFO Importing ContentNode data INFO:kolibri.content.utils.channel_import:Importing ContentNode data Traceback (most recent call last): File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 361, in execute File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 289, in _wrap_coverage File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 321, in _wrap_profiling File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 404, in _execute File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 462, in execute_entry File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 467, in execute_module File "/usr/lib/python2.7/runpy.py", line 192, in run_module fname, loader, pkg_name) File "/usr/lib/python2.7/runpy.py", line 72, in _run_code exec code in run_globals File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/__main__.py", line 8, in <module> main(args=sys.argv[1:]) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/utils/cli.py", line 616, in main manage(command, args=django_args) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/utils/cli.py", line 447, in manage execute_from_command_line(argv=argv) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/__init__.py", line 353, in execute_from_command_line utility.execute() File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/__init__.py", line 345, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/base.py", line 348, in run_from_argv self.execute(*args, **cmd_options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/base.py", line 399, in execute output = self.handle(*args, **options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/tasks/management/commands/base.py", line 99, in handle return self.handle_async(*args, **options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 89, in handle_async self.download_channel(options["channel_id"]) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 42, in download_channel self._transfer(DOWNLOAD_METHOD, channel_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 79, in _transfer channel_import.import_channel_from_local_db(channel_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 326, in import_channel_from_local_db import_manager.import_channel_data() File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 197, in import_channel_data unflushed_rows = self.table_import(model, row_mapper, table_mapper, unflushed_rows) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 167, in table_import str(column): row_mapper(record, column) for column in columns if row_mapper(record, column) is not None File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 167, in <dictcomp> str(column): row_mapper(record, column) for column in columns if row_mapper(record, column) is not None File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 123, in mapper return getattr(self, col_map)(record) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 297, in get_license_description license = self.get_license(SourceRecord) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 288, in get_license license = self.source.session.query(LicenseRecord).get(license_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/sqlalchemy/orm/query.py", line 864, in get return self._get_impl(ident, loading.load_on_ident) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/sqlalchemy/orm/query.py", line 875, in _get_impl if len(ident) != len(mapper.primary_key): TypeError: object of type 'NoneType' has no len() didi@didi-VirtualBox:~/Desktop$
TypeError
def get_license(self, SourceRecord): license_id = SourceRecord.license_id if not license_id: return None if license_id not in self.licenses: LicenseRecord = self.source.get_class(License) license = self.source.session.query(LicenseRecord).get(license_id) self.licenses[license_id] = license return self.licenses[license_id]
def get_license(self, SourceRecord): license_id = SourceRecord.license_id if license_id not in self.licenses: LicenseRecord = self.source.get_class(License) license = self.source.session.query(LicenseRecord).get(license_id) self.licenses[license_id] = license return self.licenses[license_id]
https://github.com/learningequality/kolibri/issues/2381
didi@didi-VirtualBox:~/Desktop$ ./kolibri-0.6.dev020171007224512-git.pex manage importchannel -- network fb51dae6df7545af8455aa3a0c32048d INFO Version was 0.6.dev020171009005906-export, new version: 0.6.dev020171009010641-export INFO Running update routines for new version... 0 static files copied to '/home/didi/.kolibri/static', 405 unmodified. Operations to perform: Apply all migrations: sessions, admin, kolibriauth, morango, auth, content, contenttypes, exams, device, logger Running migrations: No migrations to apply. Your models have changes that are not yet reflected in a migration, and so won't be applied. Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them. INFO Downloading data for channel id fb51dae6df7545af8455aa3a0c32048d 100%|███████████████████████████████████████████████████████████████████████████████| 421888/421888 [00:00<00:00, 902015.67it/s]INFO Importing ContentTag data INFO:kolibri.content.utils.channel_import:Importing ContentTag data INFO Importing ContentNode_has_prerequisite data INFO:kolibri.content.utils.channel_import:Importing ContentNode_has_prerequisite data INFO Importing ContentNode_related data INFO:kolibri.content.utils.channel_import:Importing ContentNode_related data INFO Importing ContentNode_tags data INFO:kolibri.content.utils.channel_import:Importing ContentNode_tags data INFO Importing ContentNode data INFO:kolibri.content.utils.channel_import:Importing ContentNode data Traceback (most recent call last): File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 361, in execute File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 289, in _wrap_coverage File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 321, in _wrap_profiling File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 404, in _execute File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 462, in execute_entry File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 467, in execute_module File "/usr/lib/python2.7/runpy.py", line 192, in run_module fname, loader, pkg_name) File "/usr/lib/python2.7/runpy.py", line 72, in _run_code exec code in run_globals File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/__main__.py", line 8, in <module> main(args=sys.argv[1:]) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/utils/cli.py", line 616, in main manage(command, args=django_args) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/utils/cli.py", line 447, in manage execute_from_command_line(argv=argv) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/__init__.py", line 353, in execute_from_command_line utility.execute() File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/__init__.py", line 345, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/base.py", line 348, in run_from_argv self.execute(*args, **cmd_options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/base.py", line 399, in execute output = self.handle(*args, **options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/tasks/management/commands/base.py", line 99, in handle return self.handle_async(*args, **options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 89, in handle_async self.download_channel(options["channel_id"]) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 42, in download_channel self._transfer(DOWNLOAD_METHOD, channel_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 79, in _transfer channel_import.import_channel_from_local_db(channel_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 326, in import_channel_from_local_db import_manager.import_channel_data() File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 197, in import_channel_data unflushed_rows = self.table_import(model, row_mapper, table_mapper, unflushed_rows) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 167, in table_import str(column): row_mapper(record, column) for column in columns if row_mapper(record, column) is not None File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 167, in <dictcomp> str(column): row_mapper(record, column) for column in columns if row_mapper(record, column) is not None File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 123, in mapper return getattr(self, col_map)(record) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 297, in get_license_description license = self.get_license(SourceRecord) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 288, in get_license license = self.source.session.query(LicenseRecord).get(license_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/sqlalchemy/orm/query.py", line 864, in get return self._get_impl(ident, loading.load_on_ident) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/sqlalchemy/orm/query.py", line 875, in _get_impl if len(ident) != len(mapper.primary_key): TypeError: object of type 'NoneType' has no len() didi@didi-VirtualBox:~/Desktop$
TypeError
def get_license_name(self, SourceRecord): license = self.get_license(SourceRecord) if not license: return None return license.license_name
def get_license_name(self, SourceRecord): license = self.get_license(SourceRecord) return license.license_name
https://github.com/learningequality/kolibri/issues/2381
didi@didi-VirtualBox:~/Desktop$ ./kolibri-0.6.dev020171007224512-git.pex manage importchannel -- network fb51dae6df7545af8455aa3a0c32048d INFO Version was 0.6.dev020171009005906-export, new version: 0.6.dev020171009010641-export INFO Running update routines for new version... 0 static files copied to '/home/didi/.kolibri/static', 405 unmodified. Operations to perform: Apply all migrations: sessions, admin, kolibriauth, morango, auth, content, contenttypes, exams, device, logger Running migrations: No migrations to apply. Your models have changes that are not yet reflected in a migration, and so won't be applied. Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them. INFO Downloading data for channel id fb51dae6df7545af8455aa3a0c32048d 100%|███████████████████████████████████████████████████████████████████████████████| 421888/421888 [00:00<00:00, 902015.67it/s]INFO Importing ContentTag data INFO:kolibri.content.utils.channel_import:Importing ContentTag data INFO Importing ContentNode_has_prerequisite data INFO:kolibri.content.utils.channel_import:Importing ContentNode_has_prerequisite data INFO Importing ContentNode_related data INFO:kolibri.content.utils.channel_import:Importing ContentNode_related data INFO Importing ContentNode_tags data INFO:kolibri.content.utils.channel_import:Importing ContentNode_tags data INFO Importing ContentNode data INFO:kolibri.content.utils.channel_import:Importing ContentNode data Traceback (most recent call last): File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 361, in execute File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 289, in _wrap_coverage File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 321, in _wrap_profiling File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 404, in _execute File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 462, in execute_entry File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 467, in execute_module File "/usr/lib/python2.7/runpy.py", line 192, in run_module fname, loader, pkg_name) File "/usr/lib/python2.7/runpy.py", line 72, in _run_code exec code in run_globals File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/__main__.py", line 8, in <module> main(args=sys.argv[1:]) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/utils/cli.py", line 616, in main manage(command, args=django_args) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/utils/cli.py", line 447, in manage execute_from_command_line(argv=argv) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/__init__.py", line 353, in execute_from_command_line utility.execute() File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/__init__.py", line 345, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/base.py", line 348, in run_from_argv self.execute(*args, **cmd_options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/base.py", line 399, in execute output = self.handle(*args, **options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/tasks/management/commands/base.py", line 99, in handle return self.handle_async(*args, **options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 89, in handle_async self.download_channel(options["channel_id"]) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 42, in download_channel self._transfer(DOWNLOAD_METHOD, channel_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 79, in _transfer channel_import.import_channel_from_local_db(channel_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 326, in import_channel_from_local_db import_manager.import_channel_data() File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 197, in import_channel_data unflushed_rows = self.table_import(model, row_mapper, table_mapper, unflushed_rows) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 167, in table_import str(column): row_mapper(record, column) for column in columns if row_mapper(record, column) is not None File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 167, in <dictcomp> str(column): row_mapper(record, column) for column in columns if row_mapper(record, column) is not None File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 123, in mapper return getattr(self, col_map)(record) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 297, in get_license_description license = self.get_license(SourceRecord) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 288, in get_license license = self.source.session.query(LicenseRecord).get(license_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/sqlalchemy/orm/query.py", line 864, in get return self._get_impl(ident, loading.load_on_ident) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/sqlalchemy/orm/query.py", line 875, in _get_impl if len(ident) != len(mapper.primary_key): TypeError: object of type 'NoneType' has no len() didi@didi-VirtualBox:~/Desktop$
TypeError
def get_license_description(self, SourceRecord): license = self.get_license(SourceRecord) if not license: return None return license.license_description
def get_license_description(self, SourceRecord): license = self.get_license(SourceRecord) return license.license_description
https://github.com/learningequality/kolibri/issues/2381
didi@didi-VirtualBox:~/Desktop$ ./kolibri-0.6.dev020171007224512-git.pex manage importchannel -- network fb51dae6df7545af8455aa3a0c32048d INFO Version was 0.6.dev020171009005906-export, new version: 0.6.dev020171009010641-export INFO Running update routines for new version... 0 static files copied to '/home/didi/.kolibri/static', 405 unmodified. Operations to perform: Apply all migrations: sessions, admin, kolibriauth, morango, auth, content, contenttypes, exams, device, logger Running migrations: No migrations to apply. Your models have changes that are not yet reflected in a migration, and so won't be applied. Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them. INFO Downloading data for channel id fb51dae6df7545af8455aa3a0c32048d 100%|███████████████████████████████████████████████████████████████████████████████| 421888/421888 [00:00<00:00, 902015.67it/s]INFO Importing ContentTag data INFO:kolibri.content.utils.channel_import:Importing ContentTag data INFO Importing ContentNode_has_prerequisite data INFO:kolibri.content.utils.channel_import:Importing ContentNode_has_prerequisite data INFO Importing ContentNode_related data INFO:kolibri.content.utils.channel_import:Importing ContentNode_related data INFO Importing ContentNode_tags data INFO:kolibri.content.utils.channel_import:Importing ContentNode_tags data INFO Importing ContentNode data INFO:kolibri.content.utils.channel_import:Importing ContentNode data Traceback (most recent call last): File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 361, in execute File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 289, in _wrap_coverage File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 321, in _wrap_profiling File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 404, in _execute File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 462, in execute_entry File "/home/didi/Desktop/kolibri-0.6.dev020171007224512-git.pex/.bootstrap/_pex/pex.py", line 467, in execute_module File "/usr/lib/python2.7/runpy.py", line 192, in run_module fname, loader, pkg_name) File "/usr/lib/python2.7/runpy.py", line 72, in _run_code exec code in run_globals File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/__main__.py", line 8, in <module> main(args=sys.argv[1:]) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/utils/cli.py", line 616, in main manage(command, args=django_args) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/utils/cli.py", line 447, in manage execute_from_command_line(argv=argv) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/__init__.py", line 353, in execute_from_command_line utility.execute() File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/__init__.py", line 345, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/base.py", line 348, in run_from_argv self.execute(*args, **cmd_options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/django/core/management/base.py", line 399, in execute output = self.handle(*args, **options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/tasks/management/commands/base.py", line 99, in handle return self.handle_async(*args, **options) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 89, in handle_async self.download_channel(options["channel_id"]) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 42, in download_channel self._transfer(DOWNLOAD_METHOD, channel_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/management/commands/importchannel.py", line 79, in _transfer channel_import.import_channel_from_local_db(channel_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 326, in import_channel_from_local_db import_manager.import_channel_data() File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 197, in import_channel_data unflushed_rows = self.table_import(model, row_mapper, table_mapper, unflushed_rows) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 167, in table_import str(column): row_mapper(record, column) for column in columns if row_mapper(record, column) is not None File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 167, in <dictcomp> str(column): row_mapper(record, column) for column in columns if row_mapper(record, column) is not None File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 123, in mapper return getattr(self, col_map)(record) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 297, in get_license_description license = self.get_license(SourceRecord) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/content/utils/channel_import.py", line 288, in get_license license = self.source.session.query(LicenseRecord).get(license_id) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/sqlalchemy/orm/query.py", line 864, in get return self._get_impl(ident, loading.load_on_ident) File "/home/didi/.pex/install/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl.937a9da87197a4a2349e3f6bdc8ebe4334b02233/kolibri-0.6.dev020171007224512_git-py2.py3-none-any.whl/kolibri/dist/sqlalchemy/orm/query.py", line 875, in _get_impl if len(ident) != len(mapper.primary_key): TypeError: object of type 'NoneType' has no len() didi@didi-VirtualBox:~/Desktop$
TypeError
def ready(self): global client client = SimpleClient(app="kolibri", storage_path=settings.QUEUE_JOB_STORAGE_PATH) client.clear(force=True)
def ready(self): from kolibri.tasks.api import client client.clear(force=True)
https://github.com/learningequality/kolibri/issues/1786
osboxes@osboxes:~$ cd Desktop/ osboxes@osboxes:~/Desktop$ ./kolibri-v0.5.0-beta1.pex start INFO Kolibri running for the first time. INFO We don't yet use pre-migrated database seeds, so you're going to have to wait a bit while we create a blank database... Operations to perform: Apply all migrations: sessions, admin, kolibriauth, morango, auth, content, contenttypes, exams, logger Running migrations: Rendering model states... DONE Applying kolibriauth.0001_initial... OK Applying contenttypes.0001_initial... OK Applying admin.0001_initial... OK Applying admin.0002_logentry_remove_auto_add... OK Applying contenttypes.0002_remove_content_type_name... OK Applying auth.0001_initial... OK Applying auth.0002_alter_permission_name_max_length... OK Applying auth.0003_alter_user_email_max_length... OK Applying auth.0004_alter_user_username_opts... OK Applying auth.0005_alter_user_last_login_null... OK Applying auth.0006_require_contenttypes_0002... OK Applying auth.0007_alter_validators_add_error_messages... OK Applying content.0001_initial... OK Applying content.0002_channelmetadatacache_last_updated... OK Applying content.0003_auto_20170607_1212... OK Applying exams.0001_initial... OK Applying kolibriauth.0002_auto_20170608_2125... OK Applying kolibriauth.0003_auto_20170621_0958... OK Applying logger.0001_initial... OK Applying logger.0002_auto_20170518_1031... OK Applying logger.0003_auto_20170531_1140... OK Applying morango.0001_initial... OK Applying morango.0002_auto_20170511_0400... OK Applying morango.0003_auto_20170519_0543... OK Applying morango.0004_auto_20170520_2112... OK Applying morango.0005_auto_20170629_2139... OK Applying morango.0006_instanceidmodel_system_id... OK Applying sessions.0001_initial... OK Operations to perform: Apply all migrations: sessions, admin, kolibriauth, morango, auth, content, contenttypes, exams, logger Running migrations: Rendering model states... DONE Applying kolibriauth.0001_initial... OK Applying contenttypes.0001_initial... OK Applying admin.0001_initial... OK Applying admin.0002_logentry_remove_auto_add... OK Applying contenttypes.0002_remove_content_type_name... OK Applying auth.0001_initial... OK Applying auth.0002_alter_permission_name_max_length... OK Applying auth.0003_alter_user_email_max_length... OK Applying auth.0004_alter_user_username_opts... OK Applying auth.0005_alter_user_last_login_null... OK Applying auth.0006_require_contenttypes_0002... OK Applying auth.0007_alter_validators_add_error_messages... OK Applying content.0001_initial... OK Applying content.0002_channelmetadatacache_last_updated... OK Applying content.0003_auto_20170607_1212... OK Applying exams.0001_initial... OK Applying kolibriauth.0002_auto_20170608_2125... OK Applying kolibriauth.0003_auto_20170621_0958... OK Applying logger.0001_initial... OK Applying logger.0002_auto_20170518_1031... OK Applying logger.0003_auto_20170531_1140... OK Applying morango.0001_initial... OK Applying morango.0002_auto_20170511_0400... OK Applying morango.0003_auto_20170519_0543... OK Applying morango.0004_auto_20170520_2112... OK Applying morango.0005_auto_20170629_2139... OK Applying morango.0006_instanceidmodel_system_id... OK Applying sessions.0001_initial... OK INFO Automatically enabling applications. INFO:kolibri.utils.cli:Automatically enabling applications. INFO Running update routines for new version... INFO:kolibri.utils.cli:Running update routines for new version... Copying '/home/osboxes/.pex/install/kolibri-0.5.0b1-py2.py3-none-any.whl.b0cce2f7791692dbc00cbf6d667637d4c24cc0b5/kolibri-0.5.0b1-py2.py3-none-any.whl/kolibri/core/static/default_frontend/3-0.5.0beta1.js' Copying '/home/osboxes/.pex/install/kolibri-0.5.0b1-py2.py3-none-any.whl.b0cce2f7791692dbc00cbf6d667637d4c24cc0b5/kolibri-0.5.0b1-py2.py3-none-any.whl/kolibri/core/static/default_frontend/4-0.5.0beta1.js' ... Copying '/home/osboxes/.pex/install/kolibri-0.5.0b1-py2.py3-none-any.whl.b0cce2f7791692dbc00cbf6d667637d4c24cc0b5/kolibri-0.5.0b1-py2.py3-none-any.whl/kolibri/dist/kolibri_exercise_perseus_plugin/static/mathjax/2.1/config/KAthJax-e76365d62c31e1cbafe939bf94aa8497.js' 377 static files copied to '/home/osboxes/.kolibri/static'. Operations to perform: Apply all migrations: sessions, admin, kolibriauth, morango, auth, content, contenttypes, exams, logger Running migrations: No migrations to apply. Your models have changes that are not yet reflected in a migration, and so won't be applied. Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them. Operations to perform: Apply all migrations: sessions, admin, kolibriauth, morango, auth, content, contenttypes, exams, logger Running migrations: No migrations to apply. Your models have changes that are not yet reflected in a migration, and so won't be applied. Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them. INFO Running 'kolibri start' as daemon (system service) INFO:kolibri.utils.cli:Running 'kolibri start' as daemon (system service) INFO Going to daemon mode, logging to /home/osboxes/.kolibri/server.log INFO:kolibri.utils.cli:Going to daemon mode, logging to /home/osboxes/.kolibri/server.log Exception in thread MESSAGEPROCESSOR-bluebird-uniform-yellow-fifteen (most likely raised during interpreter shutdown):Exception in thread WORKERMESSAGEHANDLER-missouri-pip-football-moon (most likely raised during interpreter shutdown):Exception in thread SCHEDULER-wolfram-mobile-victor-gee (most likely raised during interpreter shutdown): Traceback (most recent call last):Traceback (most recent call last): File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner File "/home/osboxes/.pex/install/kolibri-0.5.0b1-py2.py3-none-any.whl.b0cce2f7791692dbc00cbf6d667637d4c24cc0b5/kolibri-0.5.0b1-py2.py3-none-any.whl/kolibri/dist/barbequeue/common/utils.py", line 53, in runTraceback (most recent call last): File "/home/osboxes/.pex/install/kolibri-0.5.0b1-py2.py3-none-any.whl.b0cce2f7791692dbc00cbf6d667637d4c24cc0b5/kolibri-0.5.0b1-py2.py3-none-any.whl/kolibri/dist/barbequeue/common/utils.py", line 53, in run File "/usr/lib/python2.7/threading.py", line 614, in wait File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner File "/usr/lib/python2.7/threading.py", line 614, in wait File "/usr/lib/python2.7/threading.py", line 355, in wait File "/home/osboxes/.pex/install/kolibri-0.5.0b1-py2.py3-none-any.whl.b0cce2f7791692dbc00cbf6d667637d4c24cc0b5/kolibri-0.5.0b1-py2.py3-none-any.whl/kolibri/dist/barbequeue/common/utils.py", line 53, in run File "/usr/lib/python2.7/threading.py", line 355, in wait <type 'exceptions.TypeError'>: 'NoneType' object is not callable File "/usr/lib/python2.7/threading.py", line 614, in wait<type 'exceptions.TypeError'>: 'NoneType' object is not callable File "/usr/lib/python2.7/threading.py", line 355, in wait <type 'exceptions.TypeError'>: 'NoneType' object is not callable osboxes@osboxes:~/Desktop$
exceptions.TypeError
def error_received(self, exc): # pragma: no cover if self.recvfrom and not self.recvfrom.done(): self.recvfrom.set_exception(exc)
def error_received(self, exc): # pragma: no cover if self.recvfrom: self.recvfrom.set_exception(exc)
https://github.com/rthalley/dnspython/issues/572
this assertion passes: this assertion passes, but it's noisy: Exception in callback _SelectorTransport._call_connection_lost(None) handle: <Handle _SelectorTransport._call_connection_lost(None)> Traceback (most recent call last): File "C:\Miniconda3\envs\py37\lib\asyncio\events.py", line 88, in _run self._context.run(self._callback, *self._args) File "C:\Miniconda3\envs\py37\lib\asyncio\selector_events.py", line 700, in _call_connection_lost self._protocol.connection_lost(exc) File "C:\Miniconda3\envs\py37\lib\site-packages\dns\_asyncio_backend.py", line 38, in connection_lost self.recvfrom.set_exception(exc) asyncio.base_futures.InvalidStateError: invalid state Exception in callback _SelectorTransport._call_connection_lost(None) handle: <Handle _SelectorTransport._call_connection_lost(None)> Traceback (most recent call last): File "C:\Miniconda3\envs\py37\lib\asyncio\events.py", line 88, in _run self._context.run(self._callback, *self._args) File "C:\Miniconda3\envs\py37\lib\asyncio\selector_events.py", line 700, in _call_connection_lost self._protocol.connection_lost(exc) File "C:\Miniconda3\envs\py37\lib\site-packages\dns\_asyncio_backend.py", line 38, in connection_lost self.recvfrom.set_exception(exc) asyncio.base_futures.InvalidStateError: invalid state Exception in callback _SelectorTransport._call_connection_lost(None) handle: <Handle _SelectorTransport._call_connection_lost(None)> Traceback (most recent call last): File "C:\Miniconda3\envs\py37\lib\asyncio\events.py", line 88, in _run self._context.run(self._callback, *self._args) File "C:\Miniconda3\envs\py37\lib\asyncio\selector_events.py", line 700, in _call_connection_lost self._protocol.connection_lost(exc) File "C:\Miniconda3\envs\py37\lib\site-packages\dns\_asyncio_backend.py", line 38, in connection_lost self.recvfrom.set_exception(exc) asyncio.base_futures.InvalidStateError: invalid state passed
asyncio.base_futures.InvalidStateError
def connection_lost(self, exc): if self.recvfrom and not self.recvfrom.done(): self.recvfrom.set_exception(exc)
def connection_lost(self, exc): if self.recvfrom: self.recvfrom.set_exception(exc)
https://github.com/rthalley/dnspython/issues/572
this assertion passes: this assertion passes, but it's noisy: Exception in callback _SelectorTransport._call_connection_lost(None) handle: <Handle _SelectorTransport._call_connection_lost(None)> Traceback (most recent call last): File "C:\Miniconda3\envs\py37\lib\asyncio\events.py", line 88, in _run self._context.run(self._callback, *self._args) File "C:\Miniconda3\envs\py37\lib\asyncio\selector_events.py", line 700, in _call_connection_lost self._protocol.connection_lost(exc) File "C:\Miniconda3\envs\py37\lib\site-packages\dns\_asyncio_backend.py", line 38, in connection_lost self.recvfrom.set_exception(exc) asyncio.base_futures.InvalidStateError: invalid state Exception in callback _SelectorTransport._call_connection_lost(None) handle: <Handle _SelectorTransport._call_connection_lost(None)> Traceback (most recent call last): File "C:\Miniconda3\envs\py37\lib\asyncio\events.py", line 88, in _run self._context.run(self._callback, *self._args) File "C:\Miniconda3\envs\py37\lib\asyncio\selector_events.py", line 700, in _call_connection_lost self._protocol.connection_lost(exc) File "C:\Miniconda3\envs\py37\lib\site-packages\dns\_asyncio_backend.py", line 38, in connection_lost self.recvfrom.set_exception(exc) asyncio.base_futures.InvalidStateError: invalid state Exception in callback _SelectorTransport._call_connection_lost(None) handle: <Handle _SelectorTransport._call_connection_lost(None)> Traceback (most recent call last): File "C:\Miniconda3\envs\py37\lib\asyncio\events.py", line 88, in _run self._context.run(self._callback, *self._args) File "C:\Miniconda3\envs\py37\lib\asyncio\selector_events.py", line 700, in _call_connection_lost self._protocol.connection_lost(exc) File "C:\Miniconda3\envs\py37\lib\site-packages\dns\_asyncio_backend.py", line 38, in connection_lost self.recvfrom.set_exception(exc) asyncio.base_futures.InvalidStateError: invalid state passed
asyncio.base_futures.InvalidStateError
def _destination_and_source(af, where, port, source, source_port, default_to_inet=True): # Apply defaults and compute destination and source tuples # suitable for use in connect(), sendto(), or bind(). if af is None: try: af = dns.inet.af_for_address(where) except Exception: if default_to_inet: af = dns.inet.AF_INET if af == dns.inet.AF_INET: destination = (where, port) if source is not None or source_port != 0: if source is None: source = "0.0.0.0" source = (source, source_port) elif af == dns.inet.AF_INET6: ai_flags = socket.AI_NUMERICHOST ((*_, destination), *_) = socket.getaddrinfo(where, port, flags=ai_flags) if source is not None or source_port != 0: if source is None: source = "::" ((*_, source), *_) = socket.getaddrinfo(source, source_port, flags=ai_flags) else: source = None destination = None return (af, destination, source)
def _destination_and_source(af, where, port, source, source_port, default_to_inet=True): # Apply defaults and compute destination and source tuples # suitable for use in connect(), sendto(), or bind(). if af is None: try: af = dns.inet.af_for_address(where) except Exception: if default_to_inet: af = dns.inet.AF_INET if af == dns.inet.AF_INET: destination = (where, port) if source is not None or source_port != 0: if source is None: source = "0.0.0.0" source = (source, source_port) elif af == dns.inet.AF_INET6: destination = (where, port, 0, 0) if source is not None or source_port != 0: if source is None: source = "::" source = (source, source_port, 0, 0) else: source = None destination = None return (af, destination, source)
https://github.com/rthalley/dnspython/issues/283
% python3 Python 3.6.3 (default, Oct 7 2017, 10:06:24) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.37)] on darwin Type "help", "copyright", "credits" or "license" for more information. import dns.resolver r=dns.resolver.Resolver() r.nameservers=['fe80::20c:29ff:fe66:3eac%en1'] r.lifetime=2 r.query('yahoo.com') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/farrokhi/workspace/dnsdiag/.venv/lib/python3.6/site-packages/dns/resolver.py", line 949, in query timeout = self._compute_timeout(start) File "/Users/farrokhi/workspace/dnsdiag/.venv/lib/python3.6/site-packages/dns/resolver.py", line 858, in _compute_timeout raise Timeout(timeout=duration) dns.exception.Timeout: The DNS operation timed out after 2.0031638145446777 seconds
dns.exception.Timeout
def xfr( where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN, timeout=None, port=53, keyring=None, keyname=None, relativize=True, af=None, lifetime=None, source=None, source_port=0, serial=0, use_udp=False, keyalgorithm=dns.tsig.default_algorithm, ): """Return a generator for the responses to a zone transfer. *where*. If the inference attempt fails, AF_INET is used. This parameter is historical; you need never set it. *zone*, a ``dns.name.Name`` or ``text``, the name of the zone to transfer. *rdtype*, an ``int`` or ``text``, the type of zone transfer. The default is ``dns.rdatatype.AXFR``. ``dns.rdatatype.IXFR`` can be used to do an incremental transfer instead. *rdclass*, an ``int`` or ``text``, the class of the zone transfer. The default is ``dns.rdataclass.IN``. *timeout*, a ``float``, the number of seconds to wait for each response message. If None, the default, wait forever. *port*, an ``int``, the port send the message to. The default is 53. *keyring*, a ``dict``, the keyring to use for TSIG. *keyname*, a ``dns.name.Name`` or ``text``, the name of the TSIG key to use. *relativize*, a ``bool``. If ``True``, all names in the zone will be relativized to the zone origin. It is essential that the relativize setting matches the one specified to ``dns.zone.from_xfr()`` if using this generator to make a zone. *af*, an ``int``, the address family to use. The default is ``None``, which causes the address family to use to be inferred from the form of *where*. If the inference attempt fails, AF_INET is used. This parameter is historical; you need never set it. *lifetime*, a ``float``, the total number of seconds to spend doing the transfer. If ``None``, the default, then there is no limit on the time the transfer may take. *source*, a ``text`` containing an IPv4 or IPv6 address, specifying the source address. The default is the wildcard address. *source_port*, an ``int``, the port from which to send the message. The default is 0. *serial*, an ``int``, the SOA serial number to use as the base for an IXFR diff sequence (only meaningful if *rdtype* is ``dns.rdatatype.IXFR``). *use_udp*, a ``bool``. If ``True``, use UDP (only meaningful for IXFR). *keyalgorithm*, a ``dns.name.Name`` or ``text``, the TSIG algorithm to use. Raises on errors, and so does the generator. Returns a generator of ``dns.message.Message`` objects. """ if isinstance(zone, str): zone = dns.name.from_text(zone) if isinstance(rdtype, str): rdtype = dns.rdatatype.from_text(rdtype) q = dns.message.make_query(zone, rdtype, rdclass) if rdtype == dns.rdatatype.IXFR: rrset = dns.rrset.from_text(zone, 0, "IN", "SOA", ". . %u 0 0 0 0" % serial) q.authority.append(rrset) if keyring is not None: q.use_tsig(keyring, keyname, algorithm=keyalgorithm) wire = q.to_wire() (af, destination, source) = _destination_and_source( af, where, port, source, source_port ) if use_udp: if rdtype != dns.rdatatype.IXFR: raise ValueError("cannot do a UDP AXFR") s = socket_factory(af, socket.SOCK_DGRAM, 0) else: s = socket_factory(af, socket.SOCK_STREAM, 0) s.setblocking(0) if source is not None: s.bind(source) expiration = _compute_expiration(lifetime) _connect(s, destination, expiration) l = len(wire) if use_udp: _wait_for_writable(s, expiration) s.send(wire) else: tcpmsg = struct.pack("!H", l) + wire _net_write(s, tcpmsg, expiration) done = False delete_mode = True expecting_SOA = False soa_rrset = None if relativize: origin = zone oname = dns.name.empty else: origin = None oname = zone tsig_ctx = None first = True while not done: mexpiration = _compute_expiration(timeout) if mexpiration is None or (expiration is not None and mexpiration > expiration): mexpiration = expiration if use_udp: _wait_for_readable(s, expiration) (wire, from_address) = s.recvfrom(65535) else: ldata = _net_read(s, 2, mexpiration) (l,) = struct.unpack("!H", ldata) wire = _net_read(s, l, mexpiration) is_ixfr = rdtype == dns.rdatatype.IXFR r = dns.message.from_wire( wire, keyring=q.keyring, request_mac=q.mac, xfr=True, origin=origin, tsig_ctx=tsig_ctx, multi=True, first=first, one_rr_per_rrset=is_ixfr, ) rcode = r.rcode() if rcode != dns.rcode.NOERROR: raise TransferError(rcode) tsig_ctx = r.tsig_ctx first = False answer_index = 0 if soa_rrset is None: if not r.answer or r.answer[0].name != oname: raise dns.exception.FormError("No answer or RRset not for qname") rrset = r.answer[0] if rrset.rdtype != dns.rdatatype.SOA: raise dns.exception.FormError("first RRset is not an SOA") answer_index = 1 soa_rrset = rrset.copy() if rdtype == dns.rdatatype.IXFR: if soa_rrset[0].serial <= serial: # # We're already up-to-date. # done = True else: expecting_SOA = True # # Process SOAs in the answer section (other than the initial # SOA in the first message). # for rrset in r.answer[answer_index:]: if done: raise dns.exception.FormError("answers after final SOA") if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname: if expecting_SOA: if rrset[0].serial != serial: raise dns.exception.FormError("IXFR base serial mismatch") expecting_SOA = False elif rdtype == dns.rdatatype.IXFR: delete_mode = not delete_mode # # If this SOA RRset is equal to the first we saw then we're # finished. If this is an IXFR we also check that we're seeing # the record in the expected part of the response. # if rrset == soa_rrset and ( rdtype == dns.rdatatype.AXFR or (rdtype == dns.rdatatype.IXFR and delete_mode) ): done = True elif expecting_SOA: # # We made an IXFR request and are expecting another # SOA RR, but saw something else, so this must be an # AXFR response. # rdtype = dns.rdatatype.AXFR expecting_SOA = False if done and q.keyring and not r.had_tsig: raise dns.exception.FormError("missing TSIG") yield r s.close()
def xfr( where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN, timeout=None, port=53, keyring=None, keyname=None, relativize=True, af=None, lifetime=None, source=None, source_port=0, serial=0, use_udp=False, keyalgorithm=dns.tsig.default_algorithm, ): """Return a generator for the responses to a zone transfer. *where*. If the inference attempt fails, AF_INET is used. This parameter is historical; you need never set it. *zone*, a ``dns.name.Name`` or ``text``, the name of the zone to transfer. *rdtype*, an ``int`` or ``text``, the type of zone transfer. The default is ``dns.rdatatype.AXFR``. ``dns.rdatatype.IXFR`` can be used to do an incremental transfer instead. *rdclass*, an ``int`` or ``text``, the class of the zone transfer. The default is ``dns.rdataclass.IN``. *timeout*, a ``float``, the number of seconds to wait for each response message. If None, the default, wait forever. *port*, an ``int``, the port send the message to. The default is 53. *keyring*, a ``dict``, the keyring to use for TSIG. *keyname*, a ``dns.name.Name`` or ``text``, the name of the TSIG key to use. *relativize*, a ``bool``. If ``True``, all names in the zone will be relativized to the zone origin. It is essential that the relativize setting matches the one specified to ``dns.zone.from_xfr()`` if using this generator to make a zone. *af*, an ``int``, the address family to use. The default is ``None``, which causes the address family to use to be inferred from the form of *where*. If the inference attempt fails, AF_INET is used. This parameter is historical; you need never set it. *lifetime*, a ``float``, the total number of seconds to spend doing the transfer. If ``None``, the default, then there is no limit on the time the transfer may take. *source*, a ``text`` containing an IPv4 or IPv6 address, specifying the source address. The default is the wildcard address. *source_port*, an ``int``, the port from which to send the message. The default is 0. *serial*, an ``int``, the SOA serial number to use as the base for an IXFR diff sequence (only meaningful if *rdtype* is ``dns.rdatatype.IXFR``). *use_udp*, a ``bool``. If ``True``, use UDP (only meaningful for IXFR). *keyalgorithm*, a ``dns.name.Name`` or ``text``, the TSIG algorithm to use. Raises on errors, and so does the generator. Returns a generator of ``dns.message.Message`` objects. """ if isinstance(zone, str): zone = dns.name.from_text(zone) if isinstance(rdtype, str): rdtype = dns.rdatatype.from_text(rdtype) q = dns.message.make_query(zone, rdtype, rdclass) if rdtype == dns.rdatatype.IXFR: rrset = dns.rrset.from_text(zone, 0, "IN", "SOA", ". . %u 0 0 0 0" % serial) q.authority.append(rrset) if keyring is not None: q.use_tsig(keyring, keyname, algorithm=keyalgorithm) wire = q.to_wire() (af, destination, source) = _destination_and_source( af, where, port, source, source_port ) if use_udp: if rdtype != dns.rdatatype.IXFR: raise ValueError("cannot do a UDP AXFR") s = socket_factory(af, socket.SOCK_DGRAM, 0) else: s = socket_factory(af, socket.SOCK_STREAM, 0) s.setblocking(0) if source is not None: s.bind(source) expiration = _compute_expiration(lifetime) _connect(s, destination, expiration) l = len(wire) if use_udp: _wait_for_writable(s, expiration) s.send(wire) else: tcpmsg = struct.pack("!H", l) + wire _net_write(s, tcpmsg, expiration) done = False delete_mode = True expecting_SOA = False soa_rrset = None if relativize: origin = zone oname = dns.name.empty else: origin = None oname = zone tsig_ctx = None first = True while not done: mexpiration = _compute_expiration(timeout) if mexpiration is None or mexpiration > expiration: mexpiration = expiration if use_udp: _wait_for_readable(s, expiration) (wire, from_address) = s.recvfrom(65535) else: ldata = _net_read(s, 2, mexpiration) (l,) = struct.unpack("!H", ldata) wire = _net_read(s, l, mexpiration) is_ixfr = rdtype == dns.rdatatype.IXFR r = dns.message.from_wire( wire, keyring=q.keyring, request_mac=q.mac, xfr=True, origin=origin, tsig_ctx=tsig_ctx, multi=True, first=first, one_rr_per_rrset=is_ixfr, ) rcode = r.rcode() if rcode != dns.rcode.NOERROR: raise TransferError(rcode) tsig_ctx = r.tsig_ctx first = False answer_index = 0 if soa_rrset is None: if not r.answer or r.answer[0].name != oname: raise dns.exception.FormError("No answer or RRset not for qname") rrset = r.answer[0] if rrset.rdtype != dns.rdatatype.SOA: raise dns.exception.FormError("first RRset is not an SOA") answer_index = 1 soa_rrset = rrset.copy() if rdtype == dns.rdatatype.IXFR: if soa_rrset[0].serial <= serial: # # We're already up-to-date. # done = True else: expecting_SOA = True # # Process SOAs in the answer section (other than the initial # SOA in the first message). # for rrset in r.answer[answer_index:]: if done: raise dns.exception.FormError("answers after final SOA") if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname: if expecting_SOA: if rrset[0].serial != serial: raise dns.exception.FormError("IXFR base serial mismatch") expecting_SOA = False elif rdtype == dns.rdatatype.IXFR: delete_mode = not delete_mode # # If this SOA RRset is equal to the first we saw then we're # finished. If this is an IXFR we also check that we're seeing # the record in the expected part of the response. # if rrset == soa_rrset and ( rdtype == dns.rdatatype.AXFR or (rdtype == dns.rdatatype.IXFR and delete_mode) ): done = True elif expecting_SOA: # # We made an IXFR request and are expecting another # SOA RR, but saw something else, so this must be an # AXFR response. # rdtype = dns.rdatatype.AXFR expecting_SOA = False if done and q.keyring and not r.had_tsig: raise dns.exception.FormError("missing TSIG") yield r s.close()
https://github.com/rthalley/dnspython/issues/390
list(dns.query.xfr('127.0.0.1', 'example', port=12345, timeout=10)) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/bwelling/Library/Python/3.6/lib/python/site-packages/dns/query.py", line 470, in xfr if mexpiration is None or mexpiration > expiration: TypeError: '>' not supported between instances of 'float' and 'NoneType'
TypeError
def __init__(self, rdclass, rdtype, strings): super(TXTBase, self).__init__(rdclass, rdtype) if isinstance(strings, binary_type) or isinstance(strings, string_types): strings = [strings] self.strings = [] for string in strings: if isinstance(string, string_types): string = string.encode() self.strings.append(string)
def __init__(self, rdclass, rdtype, strings): super(TXTBase, self).__init__(rdclass, rdtype) if isinstance(strings, str): strings = [strings] self.strings = strings[:]
https://github.com/rthalley/dnspython/issues/239
from dns.rdtypes.txtbase import TXTBase tb = TXTBase(rdtype='bar', rdclass='ham', strings='foo') tb.to_digestable() Traceback (most recent call last): ..../python3.5/site-packages/dns/rdtypes/txtbase.py", line 75, in to_wire file.write(s) TypeError: a bytes-like object is required, not 'str'
TypeError
def tcp( q, where, timeout=None, port=53, af=None, source=None, source_port=0, one_rr_per_rrset=False, ): """Return the response obtained after sending a query via TCP. *q*, a ``dns.message.message``, the query to send *where*, a ``text`` containing an IPv4 or IPv6 address, where to send the message. *timeout*, a ``float`` or ``None``, the number of seconds to wait before the query times out. If ``None``, the default, wait forever. *port*, an ``int``, the port send the message to. The default is 53. *af*, an ``int``, the address family to use. The default is ``None``, which causes the address family to use to be inferred from the form of *where*. If the inference attempt fails, AF_INET is used. This parameter is historical; you need never set it. *source*, a ``text`` containing an IPv4 or IPv6 address, specifying the source address. The default is the wildcard address. *source_port*, an ``int``, the port from which to send the message. The default is 0. *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own RRset. Returns a ``dns.message.Message``. """ wire = q.to_wire() (af, destination, source) = _destination_and_source( af, where, port, source, source_port ) s = socket_factory(af, socket.SOCK_STREAM, 0) begin_time = None received_time = None try: expiration = _compute_expiration(timeout) s.setblocking(0) begin_time = time.time() if source is not None: s.bind(source) _connect(s, destination) send_tcp(s, wire, expiration) (r, received_time) = receive_tcp( s, expiration, one_rr_per_rrset, q.keyring, q.request_mac ) finally: if begin_time is None or received_time is None: response_time = 0 else: response_time = received_time - begin_time s.close() r.time = response_time if not q.is_response(r): raise BadResponse return r
def tcp( q, where, timeout=None, port=53, af=None, source=None, source_port=0, one_rr_per_rrset=False, ): """Return the response obtained after sending a query via TCP. *q*, a ``dns.message.message``, the query to send *where*, a ``text`` containing an IPv4 or IPv6 address, where to send the message. *timeout*, a ``float`` or ``None``, the number of seconds to wait before the query times out. If ``None``, the default, wait forever. *port*, an ``int``, the port send the message to. The default is 53. *af*, an ``int``, the address family to use. The default is ``None``, which causes the address family to use to be inferred from the form of *where*. If the inference attempt fails, AF_INET is used. This parameter is historical; you need never set it. *source*, a ``text`` containing an IPv4 or IPv6 address, specifying the source address. The default is the wildcard address. *source_port*, an ``int``, the port from which to send the message. The default is 0. *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own RRset. Returns a ``dns.message.Message``. """ wire = q.to_wire() (af, destination, source) = _destination_and_source( af, where, port, source, source_port ) s = socket_factory(af, socket.SOCK_STREAM, 0) begin_time = None try: expiration = _compute_expiration(timeout) s.setblocking(0) begin_time = time.time() if source is not None: s.bind(source) _connect(s, destination) send_tcp(s, wire, expiration) (r, received_time) = receive_tcp( s, expiration, one_rr_per_rrset, q.keyring, q.request_mac ) finally: if begin_time is None or received_time is None: response_time = 0 else: response_time = received_time - begin_time s.close() r.time = response_time if not q.is_response(r): raise BadResponse return r
https://github.com/rthalley/dnspython/issues/228
Traceback (most recent call last): File "/home/frost/.local/lib/python3.5/site-packages/dns/query.py", line 464, in tcp q.keyring, q.request_mac) File "/home/frost/.local/lib/python3.5/site-packages/dns/query.py", line 402, in receive_tcp one_rr_per_rrset=one_rr_per_rrset) File "/home/frost/.local/lib/python3.5/site-packages/dns/message.py", line 821, in from_wire reader.read() File "/home/frost/.local/lib/python3.5/site-packages/dns/message.py", line 749, in read self._get_section(self.message.additional, adcount) File "/home/frost/.local/lib/python3.5/site-packages/dns/message.py", line 701, in _get_section self.message.first) File "/home/frost/.local/lib/python3.5/site-packages/dns/tsig.py", line 198, in validate raise BadSignature dns.tsig.BadSignature: The TSIG signature fails to verify. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "./runtests.py", line 195, in main importlib.import_module("%s.%s.%s.test" % (TESTS_DIR, test, case)) File "/usr/lib/python3.5/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 986, in _gcd_import File "<frozen importlib._bootstrap>", line 969, in _find_and_load File "<frozen importlib._bootstrap>", line 958, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 673, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 665, in exec_module File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed File "/media/frost/StaticDataT420/work/ragel/master/tests-extra/tests/tsig/ddns/test.py", line 17, in <mod update.send("NOERROR") File "/media/frost/StaticDataT420/work/ragel/master/tests-extra/tools/dnstest/update.py", line 38, in send resp = dns.query.tcp(self.upd, self.server.addr, port=self.server.port) File "/home/frost/.local/lib/python3.5/site-packages/dns/query.py", line 466, in tcp if begin_time is None or received_time is None: UnboundLocalError: local variable 'received_time' referenced before assignment
UnboundLocalError
def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True): """Initialize a zone object. @param origin: The origin of the zone. @type origin: dns.name.Name object @param rdclass: The zone's rdata class; the default is class IN. @type rdclass: int""" if origin is not None: if isinstance(origin, string_types): origin = dns.name.from_text(origin) elif not isinstance(origin, dns.name.Name): raise ValueError("origin parameter must be convertable to a DNS name") if not origin.is_absolute(): raise ValueError("origin parameter must be an absolute name") self.origin = origin self.rdclass = rdclass self.nodes = {} self.relativize = relativize
def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True): """Initialize a zone object. @param origin: The origin of the zone. @type origin: dns.name.Name object @param rdclass: The zone's rdata class; the default is class IN. @type rdclass: int""" if isinstance(origin, string_types): origin = dns.name.from_text(origin) elif not isinstance(origin, dns.name.Name): raise ValueError("origin parameter must be convertable to a DNS name") if not origin.is_absolute(): raise ValueError("origin parameter must be an absolute name") self.origin = origin self.rdclass = rdclass self.nodes = {} self.relativize = relativize
https://github.com/rthalley/dnspython/issues/153
ValueError Traceback (most recent call last) <ipython-input-7-58a197c0fdca> in <module>() 7 print zone_text 8 ----> 9 zone = dns.zone.from_text(zone_text) /Users/graham/venvs/dnspython-testin/lib/python2.7/site-packages/dns/zone.pyc in from_text(text, origin, rdclass, relativize, zone_factory, filename, allow_include, check_origin) 964 reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory, 965 allow_include=allow_include, --> 966 check_origin=check_origin) 967 reader.read() 968 return reader.zone /Users/graham/venvs/dnspython-testin/lib/python2.7/site-packages/dns/zone.pyc in __init__(self, tok, origin, rdclass, relativize, zone_factory, allow_include, check_origin) 597 self.ttl = 0 598 self.last_name = self.current_origin --> 599 self.zone = zone_factory(origin, rdclass, relativize=relativize) 600 self.saved_state = [] 601 self.current_file = None /Users/graham/venvs/dnspython-testin/lib/python2.7/site-packages/dns/zone.pyc in __init__(self, origin, rdclass, relativize) 94 origin = dns.name.from_text(origin) 95 elif not isinstance(origin, dns.name.Name): ---> 96 raise ValueError("origin parameter must be convertable to a " 97 "DNS name") 98 if not origin.is_absolute(): ValueError: origin parameter must be convertable to a DNS name
ValueError
def match( handler: handlers.ResourceHandler, cause: causation.ResourceCause, ) -> bool: # Kwargs are lazily evaluated on the first _actual_ use, and shared for all filters since then. kwargs: MutableMapping[str, Any] = {} return ( _matches_resource(handler, cause.resource) and _matches_labels(handler, cause, kwargs) and _matches_annotations(handler, cause, kwargs) and _matches_field_values(handler, cause, kwargs) and _matches_field_changes(handler, cause, kwargs) and _matches_filter_callback( handler, cause, kwargs ) # the callback comes in the end! )
def match( handler: handlers.ResourceHandler, cause: causation.ResourceCause, ) -> bool: # Kwargs are lazily evaluated on the first _actual_ use, and shared for all filters since then. kwargs: MutableMapping[str, Any] = {} return all( [ _matches_resource(handler, cause.resource), _matches_labels(handler, cause, kwargs), _matches_annotations(handler, cause, kwargs), _matches_field_values(handler, cause, kwargs), _matches_field_changes(handler, cause, kwargs), _matches_filter_callback(handler, cause, kwargs), ] )
https://github.com/nolar/kopf/issues/648
2021-01-19 16:43:29,972] kopf.reactor.activit [INFO ] Initial authentication has been initiated. [2021-01-19 16:43:30,129] kopf.activities.auth [INFO ] Activity 'login_via_client' succeeded. [2021-01-19 16:43:30,129] kopf.reactor.activit [INFO ] Initial authentication has finished. [2021-01-19 16:43:30,359] kopf.objects [ERROR ] [kube-system/coredns] Throttling for 1 seconds due to an unexpected error: Traceback (most recent call last): File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/effects.py", line 200, in throttled yield should_run File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/processing.py", line 79, in process_resource_event delays, matched = await process_resource_causes( File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/processing.py", line 167, in process_resource_causes not registry._resource_changing.prematch(cause=resource_changing_cause)): File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/registries.py", line 211, in prematch if prematch(handler=handler, cause=cause): File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/registries.py", line 348, in prematch _matches_filter_callback(handler, cause, kwargs), File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/registries.py", line 501, in _matches_filter_callback return handler.when(**kwargs) File "/Users/etilley/Documents/kubernetes/kopf/handler-testing/handler.py", line 11, in is_good_enough return spec['field'] in spec['items'] File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/structs/dicts.py", line 249, in __getitem__ return resolve(self._src, self._path + (item,)) File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/structs/dicts.py", line 83, in resolve result = result[key] KeyError: 'field' [2021-01-19 16:43:30,368] kopf.objects [ERROR ] [local-path-storage/local-path-provisioner] Throttling for 1 seconds due to an unexpected error: Traceback (most recent call last): File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/effects.py", line 200, in throttled yield should_run File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/processing.py", line 79, in process_resource_event delays, matched = await process_resource_causes( File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/processing.py", line 167, in process_resource_causes not registry._resource_changing.prematch(cause=resource_changing_cause)): File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/registries.py", line 211, in prematch if prematch(handler=handler, cause=cause): File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/registries.py", line 348, in prematch _matches_filter_callback(handler, cause, kwargs), File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/reactor/registries.py", line 501, in _matches_filter_callback return handler.when(**kwargs) File "/Users/etilley/Documents/kubernetes/kopf/handler-testing/handler.py", line 11, in is_good_enough return spec['field'] in spec['items'] File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/structs/dicts.py", line 249, in __getitem__ return resolve(self._src, self._path + (item,)) File "/Users/etilley/.pyenv/versions/3.8.0/lib/python3.8/site-packages/kopf/structs/dicts.py", line 83, in resolve result = result[key] KeyError: 'field' [2021-01-19 16:43:31,361] kopf.objects [INFO ] [kube-system/coredns] Throttling is over. Switching back to normal operations. [2021-01-19 16:43:31,369] kopf.objects [INFO ] [local-path-storage/local-path-provisioner] Throttling is over. Switching back to normal operations.
KeyError
async def watch_objs( *, settings: configuration.OperatorSettings, resource: resources.Resource, namespace: Optional[str] = None, timeout: Optional[float] = None, since: Optional[str] = None, context: Optional[auth.APIContext] = None, # injected by the decorator freeze_waiter: asyncio_Future, ) -> AsyncIterator[bodies.RawInput]: """ Watch objects of a specific resource type. The cluster-scoped call is used in two cases: * The resource itself is cluster-scoped, and namespacing makes not sense. * The operator serves all namespaces for the namespaced custom resource. Otherwise, the namespace-scoped call is used: * The resource is namespace-scoped AND operator is namespaced-restricted. """ if context is None: raise RuntimeError("API instance is not injected by the decorator.") is_namespaced = await discovery.is_namespaced(resource=resource, context=context) namespace = namespace if is_namespaced else None params: Dict[str, str] = {} params["watch"] = "true" if since is not None: params["resourceVersion"] = since if timeout is not None: params["timeoutSeconds"] = str(timeout) # Stream the parsed events from the response until it is closed server-side, # or until it is closed client-side by the freeze-waiting future's callbacks. try: response = await context.session.get( url=resource.get_url( server=context.server, namespace=namespace, params=params ), timeout=aiohttp.ClientTimeout( total=settings.watching.client_timeout, sock_connect=settings.watching.connect_timeout, ), ) response.raise_for_status() response_close_callback = lambda _: response.close() freeze_waiter.add_done_callback(response_close_callback) try: async with response: async for line in _iter_jsonlines(response.content): raw_input = cast(bodies.RawInput, json.loads(line.decode("utf-8"))) yield raw_input finally: freeze_waiter.remove_done_callback(response_close_callback) except ( aiohttp.ClientConnectionError, aiohttp.ClientPayloadError, asyncio.TimeoutError, ): pass
async def watch_objs( *, settings: configuration.OperatorSettings, resource: resources.Resource, namespace: Optional[str] = None, timeout: Optional[float] = None, since: Optional[str] = None, context: Optional[auth.APIContext] = None, # injected by the decorator freeze_waiter: asyncio_Future, ) -> AsyncIterator[bodies.RawInput]: """ Watch objects of a specific resource type. The cluster-scoped call is used in two cases: * The resource itself is cluster-scoped, and namespacing makes not sense. * The operator serves all namespaces for the namespaced custom resource. Otherwise, the namespace-scoped call is used: * The resource is namespace-scoped AND operator is namespaced-restricted. """ if context is None: raise RuntimeError("API instance is not injected by the decorator.") is_namespaced = await discovery.is_namespaced(resource=resource, context=context) namespace = namespace if is_namespaced else None params: Dict[str, str] = {} params["watch"] = "true" if since is not None: params["resourceVersion"] = since if timeout is not None: params["timeoutSeconds"] = str(timeout) # Talk to the API and initiate a streaming response. response = await context.session.get( url=resource.get_url(server=context.server, namespace=namespace, params=params), timeout=aiohttp.ClientTimeout( total=settings.watching.client_timeout, sock_connect=settings.watching.connect_timeout, ), ) response.raise_for_status() # Stream the parsed events from the response until it is closed server-side, # or until it is closed client-side by the freeze-waiting future's callbacks. response_close_callback = lambda _: response.close() freeze_waiter.add_done_callback(response_close_callback) try: async with response: async for line in _iter_jsonlines(response.content): raw_input = cast(bodies.RawInput, json.loads(line.decode("utf-8"))) yield raw_input except ( aiohttp.ClientConnectionError, aiohttp.ClientPayloadError, asyncio.TimeoutError, ): pass finally: freeze_waiter.remove_done_callback(response_close_callback)
https://github.com/nolar/kopf/issues/368
[2020-05-25 10:44:44,924] kopf.reactor.running [ERROR ] Root task 'watcher of pods' is failed: Traceback (most recent call last): File "/usr/local/lib/python3.7/dist-packages/kopf/reactor/running.py", line 453, in _root_task_checker await coro File "/usr/local/lib/python3.7/dist-packages/kopf/reactor/queueing.py", line 109, in watcher async for raw_event in stream: File "/usr/local/lib/python3.7/dist-packages/kopf/clients/watching.py", line 75, in infinite_watch async for raw_event in stream: File "/usr/local/lib/python3.7/dist-packages/kopf/clients/watching.py", line 112, in streaming_watch async for raw_event in stream: File "/usr/local/lib/python3.7/dist-packages/kopf/clients/watching.py", line 146, in continuous_watch async for raw_input in stream: File "/usr/local/lib/python3.7/dist-packages/kopf/clients/auth.py", line 78, in wrapper async for item in fn(*args, **kwargs, context=context): File "/usr/local/lib/python3.7/dist-packages/kopf/clients/watching.py", line 215, in watch_objs sock_connect=settings.watching.connect_timeout, File "/usr/local/lib/python3.7/dist-packages/aiohttp/client.py", line 504, in _request await resp.start(conn) File "/usr/local/lib/python3.7/dist-packages/aiohttp/client_reqrep.py", line 847, in start message, payload = await self._protocol.read() # type: ignore # noqa File "/usr/local/lib/python3.7/dist-packages/aiohttp/streams.py", line 591, in read await self._waiter aiohttp.client_exceptions.ServerDisconnectedError
aiohttp.client_exceptions.ServerDisconnectedError
def _create_tag(self, tag_str: str): """Create a Tag object from a tag string.""" tag_hierarchy = tag_str.split(self.hierarchy_separator) tag_prefix = "" parent_tag = None tag = None for sub_tag in tag_hierarchy: # Get or create subtag. sub_tag_name = self._scrub_tag_name(sub_tag) if sub_tag_name: tag_name = tag_prefix + sub_tag_name tag = self._get_tag(tag_name) if not tag: tag = self._create_tag_instance(tag_name) # Set tag parent. tag.parent = parent_tag # Update parent and tag prefix. parent_tag = tag tag_prefix = tag.name + self.hierarchy_separator return tag
def _create_tag(self, tag_str): """Create a Tag object from a tag string.""" tag_hierarchy = tag_str.split(self.hierarchy_separator) tag_prefix = "" parent_tag = None for sub_tag in tag_hierarchy: # Get or create subtag. tag_name = tag_prefix + self._scrub_tag_name(sub_tag) tag = self._get_tag(tag_name) if not tag: tag = self._create_tag_instance(tag_name) # Set tag parent. tag.parent = parent_tag # Update parent and tag prefix. parent_tag = tag tag_prefix = tag.name + self.hierarchy_separator return tag
https://github.com/galaxyproject/galaxy/issues/11451
galaxy.web.framework.decorators ERROR 2021-02-23 12:24:13,429 [p:203,w:1,m:0] [uWSGIWorker1Core2] Uncaught exception in exposed API method: Traceback (most recent call last): File "lib/galaxy/web/framework/decorators.py", line 305, in decorator rval = func(self, trans, *args, **kwargs) File "lib/galaxy/webapps/galaxy/api/histories.py", line 471, in update self.deserializer.deserialize(history, payload, user=trans.user, trans=trans) File "lib/galaxy/managers/base.py", line 741, in deserialize new_dict[key] = self.deserializers[key](item, key, val, **context) File "lib/galaxy/managers/taggable.py", line 93, in deserialize_tags _tags_from_strings(item, self.app.tag_handler, new_tags_list, user=user) File "lib/galaxy/managers/taggable.py", line 44, in _tags_from_strings tag_handler.apply_item_tags(user, item, unicodify(new_tags_str, 'utf-8')) File "lib/galaxy/model/tags.py", line 193, in apply_item_tags self.apply_item_tag(user, item, name, value, flush=flush) File "lib/galaxy/model/tags.py", line 165, in apply_item_tag tag = self._get_or_create_tag(lc_name) File "lib/galaxy/model/tags.py", line 253, in _get_or_create_tag tag = self._create_tag(scrubbed_tag_str) File "lib/galaxy/model/tags.py", line 226, in _create_tag tag_name = tag_prefix + self._scrub_tag_name(sub_tag) TypeError: must be str, not NoneType
TypeError