code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def about_html(self): """Get the html for the /about page. Currently unused for any functionality. :rtype: str """ if self._about_html: return self._about_html else: self._about_html = request.get(self.about_url) return self._about_html
Get the html for the /about page. Currently unused for any functionality. :rtype: str
about_html
python
pytube/pytube
pytube/contrib/channel.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/channel.py
Unlicense
def _extract_videos(raw_json: str) -> Tuple[List[str], Optional[str]]: """Extracts videos from a raw json page :param str raw_json: Input json extracted from the page or the last server response :rtype: Tuple[List[str], Optional[str]] :returns: Tuple containing a list of up to 100 video watch ids and a continuation token, if more videos are available """ initial_data = json.loads(raw_json) # this is the json tree structure, if the json was extracted from # html try: videos = initial_data["contents"][ "twoColumnBrowseResultsRenderer"][ "tabs"][1]["tabRenderer"]["content"][ "sectionListRenderer"]["contents"][0][ "itemSectionRenderer"]["contents"][0][ "gridRenderer"]["items"] except (KeyError, IndexError, TypeError): try: # this is the json tree structure, if the json was directly sent # by the server in a continuation response important_content = initial_data[1]['response']['onResponseReceivedActions'][ 0 ]['appendContinuationItemsAction']['continuationItems'] videos = important_content except (KeyError, IndexError, TypeError): try: # this is the json tree structure, if the json was directly sent # by the server in a continuation response # no longer a list and no longer has the "response" key important_content = initial_data['onResponseReceivedActions'][0][ 'appendContinuationItemsAction']['continuationItems'] videos = important_content except (KeyError, IndexError, TypeError) as p: logger.info(p) return [], None try: continuation = videos[-1]['continuationItemRenderer'][ 'continuationEndpoint' ]['continuationCommand']['token'] videos = videos[:-1] except (KeyError, IndexError): # if there is an error, no continuation is available continuation = None # remove duplicates return ( uniqueify( list( # only extract the video ids from the video data map( lambda x: ( f"/watch?v=" f"{x['gridVideoRenderer']['videoId']}" ), videos ) ), ), continuation, )
Extracts videos from a raw json page :param str raw_json: Input json extracted from the page or the last server response :rtype: Tuple[List[str], Optional[str]] :returns: Tuple containing a list of up to 100 video watch ids and a continuation token, if more videos are available
_extract_videos
python
pytube/pytube
pytube/contrib/channel.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/channel.py
Unlicense
def playlist_id(self): """Get the playlist id. :rtype: str """ if self._playlist_id: return self._playlist_id self._playlist_id = extract.playlist_id(self._input_url) return self._playlist_id
Get the playlist id. :rtype: str
playlist_id
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def html(self): """Get the playlist page html. :rtype: str """ if self._html: return self._html self._html = request.get(self.playlist_url) return self._html
Get the playlist page html. :rtype: str
html
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def ytcfg(self): """Extract the ytcfg from the playlist page html. :rtype: dict """ if self._ytcfg: return self._ytcfg self._ytcfg = extract.get_ytcfg(self.html) return self._ytcfg
Extract the ytcfg from the playlist page html. :rtype: dict
ytcfg
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def initial_data(self): """Extract the initial data from the playlist page html. :rtype: dict """ if self._initial_data: return self._initial_data else: self._initial_data = extract.initial_data(self.html) return self._initial_data
Extract the initial data from the playlist page html. :rtype: dict
initial_data
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def sidebar_info(self): """Extract the sidebar info from the playlist page html. :rtype: dict """ if self._sidebar_info: return self._sidebar_info else: self._sidebar_info = self.initial_data['sidebar'][ 'playlistSidebarRenderer']['items'] return self._sidebar_info
Extract the sidebar info from the playlist page html. :rtype: dict
sidebar_info
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def _paginate( self, until_watch_id: Optional[str] = None ) -> Iterable[List[str]]: """Parse the video links from the page source, yields the /watch?v= part from video link :param until_watch_id Optional[str]: YouTube Video watch id until which the playlist should be read. :rtype: Iterable[List[str]] :returns: Iterable of lists of YouTube watch ids """ videos_urls, continuation = self._extract_videos( json.dumps(extract.initial_data(self.html)) ) if until_watch_id: try: trim_index = videos_urls.index(f"/watch?v={until_watch_id}") yield videos_urls[:trim_index] return except ValueError: pass yield videos_urls # Extraction from a playlist only returns 100 videos at a time # if self._extract_videos returns a continuation there are more # than 100 songs inside a playlist, so we need to add further requests # to gather all of them if continuation: load_more_url, headers, data = self._build_continuation_url(continuation) else: load_more_url, headers, data = None, None, None while load_more_url and headers and data: # there is an url found logger.debug("load more url: %s", load_more_url) # requesting the next page of videos with the url generated from the # previous page, needs to be a post req = request.post(load_more_url, extra_headers=headers, data=data) # extract up to 100 songs from the page loaded # returns another continuation if more videos are available videos_urls, continuation = self._extract_videos(req) if until_watch_id: try: trim_index = videos_urls.index(f"/watch?v={until_watch_id}") yield videos_urls[:trim_index] return except ValueError: pass yield videos_urls if continuation: load_more_url, headers, data = self._build_continuation_url( continuation ) else: load_more_url, headers, data = None, None, None
Parse the video links from the page source, yields the /watch?v= part from video link :param until_watch_id Optional[str]: YouTube Video watch id until which the playlist should be read. :rtype: Iterable[List[str]] :returns: Iterable of lists of YouTube watch ids
_paginate
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def _build_continuation_url(self, continuation: str) -> Tuple[str, dict, dict]: """Helper method to build the url and headers required to request the next page of videos :param str continuation: Continuation extracted from the json response of the last page :rtype: Tuple[str, dict, dict] :returns: Tuple of an url and required headers for the next http request """ return ( ( # was changed to this format (and post requests) # between 2021.03.02 and 2021.03.03 "https://www.youtube.com/youtubei/v1/browse?key=" f"{self.yt_api_key}" ), { "X-YouTube-Client-Name": "1", "X-YouTube-Client-Version": "2.20200720.00.02", }, # extra data required for post request { "continuation": continuation, "context": { "client": { "clientName": "WEB", "clientVersion": "2.20200720.00.02" } } } )
Helper method to build the url and headers required to request the next page of videos :param str continuation: Continuation extracted from the json response of the last page :rtype: Tuple[str, dict, dict] :returns: Tuple of an url and required headers for the next http request
_build_continuation_url
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def _extract_videos(raw_json: str) -> Tuple[List[str], Optional[str]]: """Extracts videos from a raw json page :param str raw_json: Input json extracted from the page or the last server response :rtype: Tuple[List[str], Optional[str]] :returns: Tuple containing a list of up to 100 video watch ids and a continuation token, if more videos are available """ initial_data = json.loads(raw_json) try: # this is the json tree structure, if the json was extracted from # html section_contents = initial_data["contents"][ "twoColumnBrowseResultsRenderer"][ "tabs"][0]["tabRenderer"]["content"][ "sectionListRenderer"]["contents"] try: # Playlist without submenus important_content = section_contents[ 0]["itemSectionRenderer"][ "contents"][0]["playlistVideoListRenderer"] except (KeyError, IndexError, TypeError): # Playlist with submenus important_content = section_contents[ 1]["itemSectionRenderer"][ "contents"][0]["playlistVideoListRenderer"] videos = important_content["contents"] except (KeyError, IndexError, TypeError): try: # this is the json tree structure, if the json was directly sent # by the server in a continuation response # no longer a list and no longer has the "response" key important_content = initial_data['onResponseReceivedActions'][0][ 'appendContinuationItemsAction']['continuationItems'] videos = important_content except (KeyError, IndexError, TypeError) as p: logger.info(p) return [], None try: continuation = videos[-1]['continuationItemRenderer'][ 'continuationEndpoint' ]['continuationCommand']['token'] videos = videos[:-1] except (KeyError, IndexError): # if there is an error, no continuation is available continuation = None # remove duplicates return ( uniqueify( list( # only extract the video ids from the video data map( lambda x: ( f"/watch?v=" f"{x['playlistVideoRenderer']['videoId']}" ), videos ) ), ), continuation, )
Extracts videos from a raw json page :param str raw_json: Input json extracted from the page or the last server response :rtype: Tuple[List[str], Optional[str]] :returns: Tuple containing a list of up to 100 video watch ids and a continuation token, if more videos are available
_extract_videos
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def trimmed(self, video_id: str) -> Iterable[str]: """Retrieve a list of YouTube video URLs trimmed at the given video ID i.e. if the playlist has video IDs 1,2,3,4 calling trimmed(3) returns [1,2] :type video_id: str video ID to trim the returned list of playlist URLs at :rtype: List[str] :returns: List of video URLs from the playlist trimmed at the given ID """ for page in self._paginate(until_watch_id=video_id): yield from (self._video_url(watch_path) for watch_path in page)
Retrieve a list of YouTube video URLs trimmed at the given video ID i.e. if the playlist has video IDs 1,2,3,4 calling trimmed(3) returns [1,2] :type video_id: str video ID to trim the returned list of playlist URLs at :rtype: List[str] :returns: List of video URLs from the playlist trimmed at the given ID
trimmed
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def url_generator(self): """Generator that yields video URLs. :Yields: Video URLs """ for page in self._paginate(): for video in page: yield self._video_url(video)
Generator that yields video URLs. :Yields: Video URLs
url_generator
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def last_updated(self) -> Optional[date]: """Extract the date that the playlist was last updated. For some playlists, this will be a specific date, which is returned as a datetime object. For other playlists, this is an estimate such as "1 week ago". Due to the fact that this value is returned as a string, pytube does a best-effort parsing where possible, and returns the raw string where it is not possible. :return: Date of last playlist update where possible, else the string provided :rtype: datetime.date """ last_updated_text = self.sidebar_info[0]['playlistSidebarPrimaryInfoRenderer'][ 'stats'][2]['runs'][1]['text'] try: date_components = last_updated_text.split() month = date_components[0] day = date_components[1].strip(',') year = date_components[2] return datetime.strptime( f"{month} {day:0>2} {year}", "%b %d %Y" ).date() except (IndexError, KeyError): return last_updated_text
Extract the date that the playlist was last updated. For some playlists, this will be a specific date, which is returned as a datetime object. For other playlists, this is an estimate such as "1 week ago". Due to the fact that this value is returned as a string, pytube does a best-effort parsing where possible, and returns the raw string where it is not possible. :return: Date of last playlist update where possible, else the string provided :rtype: datetime.date
last_updated
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def length(self): """Extract the number of videos in the playlist. :return: Playlist video count :rtype: int """ count_text = self.sidebar_info[0]['playlistSidebarPrimaryInfoRenderer'][ 'stats'][0]['runs'][0]['text'] count_text = count_text.replace(',','') return int(count_text)
Extract the number of videos in the playlist. :return: Playlist video count :rtype: int
length
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def views(self): """Extract view count for playlist. :return: Playlist view count :rtype: int """ # "1,234,567 views" views_text = self.sidebar_info[0]['playlistSidebarPrimaryInfoRenderer'][ 'stats'][1]['simpleText'] # "1,234,567" count_text = views_text.split()[0] # "1234567" count_text = count_text.replace(',', '') return int(count_text)
Extract view count for playlist. :return: Playlist view count :rtype: int
views
python
pytube/pytube
pytube/contrib/playlist.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/playlist.py
Unlicense
def __init__(self, query): """Initialize Search object. :param str query: Search query provided by the user. """ self.query = query self._innertube_client = InnerTube(client='WEB') # The first search, without a continuation, is structured differently # and contains completion suggestions, so we must store this separately self._initial_results = None self._results = None self._completion_suggestions = None # Used for keeping track of query continuations so that new results # are always returned when get_next_results() is called self._current_continuation = None
Initialize Search object. :param str query: Search query provided by the user.
__init__
python
pytube/pytube
pytube/contrib/search.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/search.py
Unlicense
def completion_suggestions(self): """Return query autocompletion suggestions for the query. :rtype: list :returns: A list of autocomplete suggestions provided by YouTube for the query. """ if self._completion_suggestions: return self._completion_suggestions if self.results: self._completion_suggestions = self._initial_results['refinements'] return self._completion_suggestions
Return query autocompletion suggestions for the query. :rtype: list :returns: A list of autocomplete suggestions provided by YouTube for the query.
completion_suggestions
python
pytube/pytube
pytube/contrib/search.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/search.py
Unlicense
def results(self): """Return search results. On first call, will generate and return the first set of results. Additional results can be generated using ``.get_next_results()``. :rtype: list :returns: A list of YouTube objects. """ if self._results: return self._results videos, continuation = self.fetch_and_parse() self._results = videos self._current_continuation = continuation return self._results
Return search results. On first call, will generate and return the first set of results. Additional results can be generated using ``.get_next_results()``. :rtype: list :returns: A list of YouTube objects.
results
python
pytube/pytube
pytube/contrib/search.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/search.py
Unlicense
def get_next_results(self): """Use the stored continuation string to fetch the next set of results. This method does not return the results, but instead updates the results property. """ if self._current_continuation: videos, continuation = self.fetch_and_parse(self._current_continuation) self._results.extend(videos) self._current_continuation = continuation else: raise IndexError
Use the stored continuation string to fetch the next set of results. This method does not return the results, but instead updates the results property.
get_next_results
python
pytube/pytube
pytube/contrib/search.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/search.py
Unlicense
def fetch_and_parse(self, continuation=None): """Fetch from the innertube API and parse the results. :param str continuation: Continuation string for fetching results. :rtype: tuple :returns: A tuple of a list of YouTube objects and a continuation string. """ # Begin by executing the query and identifying the relevant sections # of the results raw_results = self.fetch_query(continuation) # Initial result is handled by try block, continuations by except block try: sections = raw_results['contents']['twoColumnSearchResultsRenderer'][ 'primaryContents']['sectionListRenderer']['contents'] except KeyError: sections = raw_results['onResponseReceivedCommands'][0][ 'appendContinuationItemsAction']['continuationItems'] item_renderer = None continuation_renderer = None for s in sections: if 'itemSectionRenderer' in s: item_renderer = s['itemSectionRenderer'] if 'continuationItemRenderer' in s: continuation_renderer = s['continuationItemRenderer'] # If the continuationItemRenderer doesn't exist, assume no further results if continuation_renderer: next_continuation = continuation_renderer['continuationEndpoint'][ 'continuationCommand']['token'] else: next_continuation = None # If the itemSectionRenderer doesn't exist, assume no results. if item_renderer: videos = [] raw_video_list = item_renderer['contents'] for video_details in raw_video_list: # Skip over ads if video_details.get('searchPyvRenderer', {}).get('ads', None): continue # Skip "recommended" type videos e.g. "people also watched" and "popular X" # that break up the search results if 'shelfRenderer' in video_details: continue # Skip auto-generated "mix" playlist results if 'radioRenderer' in video_details: continue # Skip playlist results if 'playlistRenderer' in video_details: continue # Skip channel results if 'channelRenderer' in video_details: continue # Skip 'people also searched for' results if 'horizontalCardListRenderer' in video_details: continue # Can't seem to reproduce, probably related to typo fix suggestions if 'didYouMeanRenderer' in video_details: continue # Seems to be the renderer used for the image shown on a no results page if 'backgroundPromoRenderer' in video_details: continue if 'videoRenderer' not in video_details: logger.warning('Unexpected renderer encountered.') logger.warning(f'Renderer name: {video_details.keys()}') logger.warning(f'Search term: {self.query}') logger.warning( 'Please open an issue at ' 'https://github.com/pytube/pytube/issues ' 'and provide this log output.' ) continue # Extract relevant video information from the details. # Some of this can be used to pre-populate attributes of the # YouTube object. vid_renderer = video_details['videoRenderer'] vid_id = vid_renderer['videoId'] vid_url = f'https://www.youtube.com/watch?v={vid_id}' vid_title = vid_renderer['title']['runs'][0]['text'] vid_channel_name = vid_renderer['ownerText']['runs'][0]['text'] vid_channel_uri = vid_renderer['ownerText']['runs'][0][ 'navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'] # Livestreams have "runs", non-livestreams have "simpleText", # and scheduled releases do not have 'viewCountText' if 'viewCountText' in vid_renderer: if 'runs' in vid_renderer['viewCountText']: vid_view_count_text = vid_renderer['viewCountText']['runs'][0]['text'] else: vid_view_count_text = vid_renderer['viewCountText']['simpleText'] # Strip ' views' text, then remove commas stripped_text = vid_view_count_text.split()[0].replace(',','') if stripped_text == 'No': vid_view_count = 0 else: vid_view_count = int(stripped_text) else: vid_view_count = 0 if 'lengthText' in vid_renderer: vid_length = vid_renderer['lengthText']['simpleText'] else: vid_length = None vid_metadata = { 'id': vid_id, 'url': vid_url, 'title': vid_title, 'channel_name': vid_channel_name, 'channel_url': vid_channel_uri, 'view_count': vid_view_count, 'length': vid_length } # Construct YouTube object from metadata and append to results vid = YouTube(vid_metadata['url']) vid.author = vid_metadata['channel_name'] vid.title = vid_metadata['title'] videos.append(vid) else: videos = None return videos, next_continuation
Fetch from the innertube API and parse the results. :param str continuation: Continuation string for fetching results. :rtype: tuple :returns: A tuple of a list of YouTube objects and a continuation string.
fetch_and_parse
python
pytube/pytube
pytube/contrib/search.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/search.py
Unlicense
def fetch_query(self, continuation=None): """Fetch raw results from the innertube API. :param str continuation: Continuation string for fetching results. :rtype: dict :returns: The raw json object returned by the innertube API. """ query_results = self._innertube_client.search(self.query, continuation) if not self._initial_results: self._initial_results = query_results return query_results # noqa:R504
Fetch raw results from the innertube API. :param str continuation: Continuation string for fetching results. :rtype: dict :returns: The raw json object returned by the innertube API.
fetch_query
python
pytube/pytube
pytube/contrib/search.py
https://github.com/pytube/pytube/blob/master/pytube/contrib/search.py
Unlicense
def load_and_init_from_playback_file(filename, mock_urlopen): """Load a gzip json playback file and create YouTube instance.""" pb = load_playback_file(filename) # Mock the responses to YouTube mock_url_open_object = mock.Mock() mock_url_open_object.read.side_effect = [ pb['watch_html'].encode('utf-8'), pb['js'].encode('utf-8') ] mock_urlopen.return_value = mock_url_open_object # Pytest caches this result, so we can speed up the tests # by causing the object to fetch all the relevant information # it needs. Previously, this was handled by prefetch_init() # and descramble(), but this functionality has since been # deferred v = YouTube(pb["url"]) v.watch_html v._vid_info = pb['vid_info'] v.js v.fmt_streams return v
Load a gzip json playback file and create YouTube instance.
load_and_init_from_playback_file
python
pytube/pytube
tests/conftest.py
https://github.com/pytube/pytube/blob/master/tests/conftest.py
Unlicense
def playlist_html(): """Youtube playlist HTML loaded on 2020-01-25 from https://www.youtube.com/playlist?list=PLzMcBGfZo4-mP7qA9cagf68V06sko5otr """ file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "mocks", "playlist.html.gz", ) with gzip.open(file_path, "rb") as f: return f.read().decode("utf-8")
Youtube playlist HTML loaded on 2020-01-25 from https://www.youtube.com/playlist?list=PLzMcBGfZo4-mP7qA9cagf68V06sko5otr
playlist_html
python
pytube/pytube
tests/conftest.py
https://github.com/pytube/pytube/blob/master/tests/conftest.py
Unlicense
def playlist_long_html(): """Youtube playlist HTML loaded on 2020-01-25 from https://www.youtube.com/playlist?list=PLzMcBGfZo4-mP7qA9cagf68V06sko5otr """ file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "mocks", "playlist_long.html.gz", ) with gzip.open(file_path, "rb") as f: return f.read().decode("utf-8")
Youtube playlist HTML loaded on 2020-01-25 from https://www.youtube.com/playlist?list=PLzMcBGfZo4-mP7qA9cagf68V06sko5otr
playlist_long_html
python
pytube/pytube
tests/conftest.py
https://github.com/pytube/pytube/blob/master/tests/conftest.py
Unlicense
def playlist_submenu_html(): """Youtube playlist HTML loaded on 2020-01-24 from https://www.youtube.com/playlist?list=PLZHQObOWTQDMsr9K-rj53DwVRMYO3t5Yr """ file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "mocks", "playlist_submenu.html.gz", ) with gzip.open(file_path, "rb") as f: return f.read().decode("utf-8")
Youtube playlist HTML loaded on 2020-01-24 from https://www.youtube.com/playlist?list=PLZHQObOWTQDMsr9K-rj53DwVRMYO3t5Yr
playlist_submenu_html
python
pytube/pytube
tests/conftest.py
https://github.com/pytube/pytube/blob/master/tests/conftest.py
Unlicense
def stream_dict(): """Youtube instance initialized with video id WXxV9g7lsFE.""" file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "mocks", "yt-video-WXxV9g7lsFE-html.json.gz", ) with gzip.open(file_path, "rb") as f: content = json.loads(f.read().decode("utf-8")) return content['watch_html']
Youtube instance initialized with video id WXxV9g7lsFE.
stream_dict
python
pytube/pytube
tests/conftest.py
https://github.com/pytube/pytube/blob/master/tests/conftest.py
Unlicense
def channel_videos_html(): """Youtube channel HTML loaded on 2021-05-05 from https://www.youtube.com/c/ProgrammingKnowledge/videos """ file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "mocks", "channel-videos.html.gz", ) with gzip.open(file_path, 'rb') as f: return f.read().decode('utf-8')
Youtube channel HTML loaded on 2021-05-05 from https://www.youtube.com/c/ProgrammingKnowledge/videos
channel_videos_html
python
pytube/pytube
tests/conftest.py
https://github.com/pytube/pytube/blob/master/tests/conftest.py
Unlicense
def base_js(): """Youtube base.js files retrieved on 2022-02-04 and 2022-04-15 from https://www.youtube.com/watch?v=vmzxpUsN0uA and https://www.youtube.com/watch?v=Y4-GSFKZmEg respectively """ base_js_files = [] for file in ["base.js-2022-02-04.gz", "base.js-2022-04-15.gz"]: file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "mocks", file, ) with gzip.open(file_path, 'rb') as f: base_js_files.append(f.read().decode('utf-8')) return base_js_files
Youtube base.js files retrieved on 2022-02-04 and 2022-04-15 from https://www.youtube.com/watch?v=vmzxpUsN0uA and https://www.youtube.com/watch?v=Y4-GSFKZmEg respectively
base_js
python
pytube/pytube
tests/conftest.py
https://github.com/pytube/pytube/blob/master/tests/conftest.py
Unlicense
def test_safe_filename(): """Unsafe characters get stripped from generated filename""" assert helpers.safe_filename("abc1245$$") == "abc1245" assert helpers.safe_filename("abc##") == "abc"
Unsafe characters get stripped from generated filename
test_safe_filename
python
pytube/pytube
tests/test_helpers.py
https://github.com/pytube/pytube/blob/master/tests/test_helpers.py
Unlicense
def test_filters(test_input, expected, cipher_signature): """Ensure filters produce the expected results.""" result = [s.itag for s in cipher_signature.streams.filter(**test_input)] assert result == expected
Ensure filters produce the expected results.
test_filters
python
pytube/pytube
tests/test_query.py
https://github.com/pytube/pytube/blob/master/tests/test_query.py
Unlicense
def test_empty(test_input, cipher_signature): """Ensure :meth:`~pytube.StreamQuery.last` and :meth:`~pytube.StreamQuery.first` return None if the resultset is empty. """ query = cipher_signature.streams.filter(video_codec="vp20") fn = getattr(query, test_input) assert fn() is None
Ensure :meth:`~pytube.StreamQuery.last` and :meth:`~pytube.StreamQuery.first` return None if the resultset is empty.
test_empty
python
pytube/pytube
tests/test_query.py
https://github.com/pytube/pytube/blob/master/tests/test_query.py
Unlicense
def test_order_by(cipher_signature): """Ensure :meth:`~pytube.StreamQuery.order_by` sorts the list of :class:`Stream <Stream>` instances in the expected order. """ itags = [ s.itag for s in cipher_signature.streams.filter(type="audio").order_by("itag") ] expected_itags = [ s.itag for s in cipher_signature.streams.filter(type="audio") ] expected_itags.sort() assert itags == expected_itags
Ensure :meth:`~pytube.StreamQuery.order_by` sorts the list of :class:`Stream <Stream>` instances in the expected order.
test_order_by
python
pytube/pytube
tests/test_query.py
https://github.com/pytube/pytube/blob/master/tests/test_query.py
Unlicense
def test_order_by_descending(cipher_signature): """Ensure :meth:`~pytube.StreamQuery.desc` sorts the list of :class:`Stream <Stream>` instances in the reverse order. """ # numerical values itags = [ s.itag for s in cipher_signature.streams.filter(type="audio") .order_by("itag") .desc() ] expected_itags = [ s.itag for s in cipher_signature.streams.filter(type="audio") ] expected_itags.sort(reverse=True) assert itags == expected_itags
Ensure :meth:`~pytube.StreamQuery.desc` sorts the list of :class:`Stream <Stream>` instances in the reverse order.
test_order_by_descending
python
pytube/pytube
tests/test_query.py
https://github.com/pytube/pytube/blob/master/tests/test_query.py
Unlicense
def test_order_by_ascending(cipher_signature): """Ensure :meth:`~pytube.StreamQuery.desc` sorts the list of :class:`Stream <Stream>` instances in ascending order. """ # numerical values itags = [ s.itag for s in cipher_signature.streams.filter(type="audio") .order_by("itag") .asc() ] expected_itags = [ s.itag for s in cipher_signature.streams.filter(type="audio") ] assert itags == expected_itags
Ensure :meth:`~pytube.StreamQuery.desc` sorts the list of :class:`Stream <Stream>` instances in ascending order.
test_order_by_ascending
python
pytube/pytube
tests/test_query.py
https://github.com/pytube/pytube/blob/master/tests/test_query.py
Unlicense
def prepare_docstring(s): """ Convert a docstring into lines of parseable reST. Return it as a list of lines usable for inserting into a docutils ViewList (used as argument of nested_parse().) An empty line is added to act as a separator between this docstring and following content. """ if not s or s.isspace(): return [''] s = s.expandtabs() # [MF] begin pydoc hack ************** idxpar = s.find('@param') if idxpar > 0: # insert blank line before keyword list idx = s.rfind('\n',0,idxpar) s = s[:idx]+'\n'+s[idx:] # replace pydoc with sphinx notation s = s.replace("@param", ":param") # [MF] end pydoc hack ************** nl = s.rstrip().find('\n') if nl == -1: # Only one line... return [s.strip(), ''] # The first line may be indented differently... firstline = s[:nl].strip() otherlines = textwrap.dedent(s[nl+1:]) #@UndefinedVariable return [firstline] + otherlines.splitlines() + ['']
Convert a docstring into lines of parseable reST. Return it as a list of lines usable for inserting into a docutils ViewList (used as argument of nested_parse().) An empty line is added to act as a separator between this docstring and following content.
prepare_docstring
python
pybrain/pybrain
docs/sphinx/autodoc_hack.py
https://github.com/pybrain/pybrain/blob/master/docs/sphinx/autodoc_hack.py
BSD-3-Clause
def performAction(self, action): """Incoming action is an int between 0 and 8. The action we provide to the environment consists of a torque T in {-2 N, 0, 2 N}, and a displacement d in {-.02 m, 0, 0.02 m}. """ self.t += 1 assert round(action[0]) == action[0] # -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for # action in {6, 7, 8} torque_selector = np.floor(action[0] / 3.0) - 1.0 T = 2 * torque_selector # Random number in [-1, 1]: p = 2.0 * np.random.rand() - 1.0 # -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for # action in {2, 5, 8} disp_selector = action[0] % 3 - 1.0 d = 0.02 * disp_selector + 0.02 * p super(BalanceTask, self).performAction([T, d])
Incoming action is an int between 0 and 8. The action we provide to the environment consists of a torque T in {-2 N, 0, 2 N}, and a displacement d in {-.02 m, 0, 0.02 m}.
performAction
python
pybrain/pybrain
examples/rl/environments/linear_fa/bicycle.py
https://github.com/pybrain/pybrain/blob/master/examples/rl/environments/linear_fa/bicycle.py
BSD-3-Clause
def evalRnnOnSeqDataset(net, DS, verbose = False, silent = False): """ evaluate the network on all the sequences of a dataset. """ r = 0. samples = 0. for seq in DS: net.reset() for i, t in seq: res = net.activate(i) if verbose: print(t, res) r += sum((t-res)**2) samples += 1 if verbose: print('-'*20) r /= samples if not silent: print('MSE:', r) return r
evaluate the network on all the sequences of a dataset.
evalRnnOnSeqDataset
python
pybrain/pybrain
examples/supervised/backprop/parityrnn.py
https://github.com/pybrain/pybrain/blob/master/examples/supervised/backprop/parityrnn.py
BSD-3-Clause
def multigaussian(x, mean, stddev): """Returns value of uncorrelated Gaussians at given scalar point. x: scalar mean: vector stddev: vector """ tmp = -0.5 * ((x-mean)/stddev)**2 return np.exp(tmp) / (np.sqrt(2.*np.pi) * stddev)
Returns value of uncorrelated Gaussians at given scalar point. x: scalar mean: vector stddev: vector
multigaussian
python
pybrain/pybrain
examples/supervised/neuralnets+svm/example_mixturedensity.py
https://github.com/pybrain/pybrain/blob/master/examples/supervised/neuralnets+svm/example_mixturedensity.py
BSD-3-Clause
def generateClassificationData(size, nClasses=3): """ generate a set of points in 2D belonging to two or three different classes """ if nClasses==3: means = [(-1,0),(2,4),(3,1)] else: means = [(-2,0),(2,1),(6,0)] cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])] dataset = ClassificationDataSet(2, 1, nb_classes=nClasses) for _ in range(size): for c in range(3): input = multivariate_normal(means[c],cov[c]) dataset.addSample(input, [c%nClasses]) dataset.assignClasses() return dataset
generate a set of points in 2D belonging to two or three different classes
generateClassificationData
python
pybrain/pybrain
examples/supervised/neuralnets+svm/datasets/datagenerator.py
https://github.com/pybrain/pybrain/blob/master/examples/supervised/neuralnets+svm/datasets/datagenerator.py
BSD-3-Clause
def generateGridData(x,y, return_ticks=False): """ Generates a dataset containing a regular grid of points. The x and y arguments contain start, end, and step each. Returns the dataset and the x and y mesh or ticks.""" x = np.arange(x[0], x[1], x[2]) y = np.arange(y[0], y[1], y[2]) X, Y = np.meshgrid(x, y) shape = X.shape # need column vectors in dataset, not arrays ds = ClassificationDataSet(2,1) ds.setField('input', np.concatenate((X.reshape(X.size, 1),Y.reshape(X.size, 1)), 1)) ds.setField('target', np.zeros([X.size,1])) ds._convertToOneOfMany() if return_ticks: return (ds, x, y) else: return (ds, X, Y)
Generates a dataset containing a regular grid of points. The x and y arguments contain start, end, and step each. Returns the dataset and the x and y mesh or ticks.
generateGridData
python
pybrain/pybrain
examples/supervised/neuralnets+svm/datasets/datagenerator.py
https://github.com/pybrain/pybrain/blob/master/examples/supervised/neuralnets+svm/datasets/datagenerator.py
BSD-3-Clause
def generateNoisySines( npoints, nseq, noise=0.3 ): """ construct a 2-class dataset out of noisy sines """ x = np.arange(npoints)/float(npoints) * 20. y1 = np.sin(x+rand(1)*3.) y2 = np.sin(x/2.+rand(1)*3.) DS = SequenceClassificationDataSet(1,1, nb_classes=2) for _ in range(nseq): DS.newSequence() buf = rand(npoints)*noise + y1 + (rand(1)-0.5)*noise for i in range(npoints): DS.addSample([buf[i]],[0]) DS.newSequence() buf = rand(npoints)*noise + y2 + (rand(1)-0.5)*noise for i in range(npoints): DS.addSample([buf[i]],[1]) return DS
construct a 2-class dataset out of noisy sines
generateNoisySines
python
pybrain/pybrain
examples/supervised/neuralnets+svm/datasets/datagenerator.py
https://github.com/pybrain/pybrain/blob/master/examples/supervised/neuralnets+svm/datasets/datagenerator.py
BSD-3-Clause
def makeData(amount = 10000): """Return 2D dataset of points in (0, 1) where points in a circle of radius .4 around the center are blue and all the others are red.""" center = array([0.5, 0.5]) def makePoint(): """Return a random point and its satellite information. Satellite is 'blue' if point is in the circle, else 'red'.""" point = random.random((2,)) * 10 vectorLength = lambda x: dot(x.T, x) return point, 'blue' if vectorLength(point - center) < 25 else 'red' return [makePoint() for _ in range(amount)]
Return 2D dataset of points in (0, 1) where points in a circle of radius .4 around the center are blue and all the others are red.
makeData
python
pybrain/pybrain
examples/unsupervised/lsh.py
https://github.com/pybrain/pybrain/blob/master/examples/unsupervised/lsh.py
BSD-3-Clause
def makePoint(): """Return a random point and its satellite information. Satellite is 'blue' if point is in the circle, else 'red'.""" point = random.random((2,)) * 10 vectorLength = lambda x: dot(x.T, x) return point, 'blue' if vectorLength(point - center) < 25 else 'red'
Return a random point and its satellite information. Satellite is 'blue' if point is in the circle, else 'red'.
makePoint
python
pybrain/pybrain
examples/unsupervised/lsh.py
https://github.com/pybrain/pybrain/blob/master/examples/unsupervised/lsh.py
BSD-3-Clause
def drawIndex(probs, tolerant=False): """ Draws an index given an array of probabilities. :key tolerant: if set to True, the array is normalized to sum to 1. """ if not sum(probs) < 1.00001 or not sum(probs) > 0.99999: if tolerant: probs /= sum(probs) else: print((probs, 1 - sum(probs))) raise ValueError() r = random() s = 0 for i, p in enumerate(probs): s += p if s > r: return i return choice(list(range(len(probs))))
Draws an index given an array of probabilities. :key tolerant: if set to True, the array is normalized to sum to 1.
drawIndex
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def drawGibbs(vals, temperature=1.): """ Return the index of the sample drawn by a softmax (Gibbs). """ if temperature == 0: # randomly pick one of the values with the max value. m = max(vals) best = [] for i, v in enumerate(vals): if v == m: best.append(i) return choice(best) else: temp = vals / temperature # make sure we keep the exponential bounded (between +20 and -20) temp += 20 - max(temp) if min(temp) < -20: for i, v in enumerate(temp): if v < -20: temp[i] = -20 temp = exp(temp) temp /= sum(temp) return drawIndex(temp)
Return the index of the sample drawn by a softmax (Gibbs).
drawGibbs
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def iterCombinations(tup): """ all possible of integer tuples of the same dimension than tup, and each component being positive and strictly inferior to the corresponding entry in tup. """ if len(tup) == 1: for i in range(tup[0]): yield (i,) elif len(tup) > 1: for prefix in iterCombinations(tup[:-1]): for i in range(tup[-1]): yield tuple(list(prefix) + [i])
all possible of integer tuples of the same dimension than tup, and each component being positive and strictly inferior to the corresponding entry in tup.
iterCombinations
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def setAllArgs(obj, argdict): """ set all those internal variables which have the same name than an entry in the given object's dictionary. This function can be useful for quick initializations. """ xmlstore = isinstance(obj, XMLBuildable) for n in list(argdict.keys()): if hasattr(obj, n): setattr(obj, n, argdict[n]) if xmlstore: obj.argdict[n] = argdict[n] else: print(('Warning: parameter name', n, 'not found!')) if xmlstore: if not hasattr(obj, '_unknown_argdict'): obj._unknown_argdict = {} obj._unknown_argdict[n] = argdict[n]
set all those internal variables which have the same name than an entry in the given object's dictionary. This function can be useful for quick initializations.
setAllArgs
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def percentError(out, true): """ return percentage of mismatch between out and target values (lists and arrays accepted) """ arrout = array(out).flatten() wrong = where(arrout != array(true).flatten())[0].size return 100. * float(wrong) / float(arrout.size)
return percentage of mismatch between out and target values (lists and arrays accepted)
percentError
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def formatFromExtension(fname): """Tries to infer a protocol from the file extension.""" _base, ext = os.path.splitext(fname) if not ext: return None try: format = known_extensions[ext.replace('.', '')] except KeyError: format = None return format
Tries to infer a protocol from the file extension.
formatFromExtension
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def saveToFileLike(self, flo, format=None, **kwargs): """Save the object to a given file like object in the given format. """ format = 'pickle' if format is None else format save = getattr(self, "save_%s" % format, None) if save is None: raise ValueError("Unknown format '%s'." % format) save(flo, **kwargs)
Save the object to a given file like object in the given format.
saveToFileLike
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def loadFromFileLike(cls, flo, format=None): """Load the object to a given file like object with the given protocol. """ format = 'pickle' if format is None else format load = getattr(cls, "load_%s" % format, None) if load is None: raise ValueError("Unknown format '%s'." % format) return load(flo)
Load the object to a given file like object with the given protocol.
loadFromFileLike
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def saveToFile(self, filename, format=None, **kwargs): """Save the object to file given by filename.""" if format is None: # try to derive protocol from file extension format = formatFromExtension(filename) with open(filename, 'wb') as fp: self.saveToFileLike(fp, format, **kwargs)
Save the object to file given by filename.
saveToFile
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def loadFromFile(cls, filename, format=None): """Return an instance of the class that is saved in the file with the given filename in the specified format.""" if format is None: # try to derive protocol from file extension format = formatFromExtension(filename) with open(filename, 'rbU') as fp: obj = cls.loadFromFileLike(fp, format) obj.filename = filename return obj
Return an instance of the class that is saved in the file with the given filename in the specified format.
loadFromFile
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def _getName(self): """Returns the name, which is generated if it has not been already.""" if self._name is None: self._name = self._generateName() return self._name
Returns the name, which is generated if it has not been already.
_getName
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def fListToString(a_list, a_precision=3): """ Returns a string representing a list of floats with a given precision """ from numpy import around s_list = ", ".join(("%g" % around(x, a_precision)).ljust(a_precision+3) for x in a_list) return "[%s]" % s_list
Returns a string representing a list of floats with a given precision
fListToString
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def tupleRemoveItem(tup, index): """ remove the item at position index of the tuple and return a new tuple. """ l = list(tup) return tuple(l[:index] + l[index + 1:])
remove the item at position index of the tuple and return a new tuple.
tupleRemoveItem
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def confidenceIntervalSize(stdev, nbsamples): """ Determine the size of the confidence interval, given the standard deviation and the number of samples. t-test-percentile: 97.5%, infinitely many degrees of freedom, therefore on the two-sided interval: 95% """ # CHECKME: for better precision, maybe get the percentile dynamically, from the scipy library? return 2 * 1.98 * stdev / sqrt(nbsamples)
Determine the size of the confidence interval, given the standard deviation and the number of samples. t-test-percentile: 97.5%, infinitely many degrees of freedom, therefore on the two-sided interval: 95%
confidenceIntervalSize
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def threaded(callback=lambda * args, **kwargs: None, daemonic=False): """Decorate a function to run in its own thread and report the result by calling callback with it.""" def innerDecorator(func): def inner(*args, **kwargs): target = lambda: callback(func(*args, **kwargs)) t = threading.Thread(target=target) t.setDaemon(daemonic) t.start() return inner return innerDecorator
Decorate a function to run in its own thread and report the result by calling callback with it.
threaded
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def garbagecollect(func): """Decorate a function to invoke the garbage collector after each execution. """ def inner(*args, **kwargs): result = func(*args, **kwargs) gc.collect() return result return inner
Decorate a function to invoke the garbage collector after each execution.
garbagecollect
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def memoize(func): """Decorate a function to 'memoize' results by holding it in a cache that maps call arguments to returns.""" cache = {} def inner(*args, **kwargs): # Dictionaries and lists are unhashable args = tuple(args) # Make a set for checking in the cache, since the order of # .iteritems() is undefined kwargs_set = frozenset(iter(kwargs.items())) if (args, kwargs_set) in cache: result = cache[args, kwargs_set] else: result = func(*args, **kwargs) cache[args, kwargs_set] = result return result return inner
Decorate a function to 'memoize' results by holding it in a cache that maps call arguments to returns.
memoize
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def storeCallResults(obj, verbose=False): """Pseudo-decorate an object to store all evaluations of the function in the returned list.""" results = [] oldcall = obj.__class__.__call__ def newcall(*args, **kwargs): result = oldcall(*args, **kwargs) results.append(result) if verbose: print(result) return result obj.__class__.__call__ = newcall return results
Pseudo-decorate an object to store all evaluations of the function in the returned list.
storeCallResults
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def multiEvaluate(repeat): """Decorate a function to evaluate repeatedly with the same arguments, and return the average result """ def decorator(func): def inner(*args, **kwargs): result = 0. for dummy in range(repeat): result += func(*args, **kwargs) return result / repeat return inner return decorator
Decorate a function to evaluate repeatedly with the same arguments, and return the average result
multiEvaluate
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def _import(name): """Return module from a package. These two are equivalent: > from package import module as bar > bar = _import('package.module') """ mod = __import__(name) components = name.split('.') for comp in components[1:]: try: mod = getattr(mod, comp) except AttributeError: raise ImportError("No module named %s" % mod) return mod
Return module from a package. These two are equivalent: > from package import module as bar > bar = _import('package.module')
_import
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def gray2int(g, size): """ Transforms a Gray code back into an integer. """ res = 0 for i in reversed(list(range(size))): gi = (g >> i) % 2 if i == size - 1: bi = gi else: bi = bi ^ gi res += bi * 2 ** i return res
Transforms a Gray code back into an integer.
gray2int
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def asBinary(i): """ Produces a string from an integer's binary representation. (preceding zeros removed). """ if i > 1: if i % 2 == 1: return asBinary(i >> 1) + '1' else: return asBinary(i >> 1) + '0' else: return str(i)
Produces a string from an integer's binary representation. (preceding zeros removed).
asBinary
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def one_to_n(val, maxval): """ Returns a 1-in-n binary encoding of a non-negative integer. """ a = zeros(maxval, float) a[val] = 1. return a
Returns a 1-in-n binary encoding of a non-negative integer.
one_to_n
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def canonicClassString(x): """ the __class__ attribute changed from old-style to new-style classes... """ if isinstance(x, object): return repr(x.__class__).split("'")[1] else: return repr(x.__class__)
the __class__ attribute changed from old-style to new-style classes...
canonicClassString
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def decrementAny(tup): """ the closest tuples to tup: decrementing by 1 along any dimension. Never go into negatives though. """ res = [] for i, x in enumerate(tup): if x > 0: res.append(tuple(list(tup[:i]) + [x - 1] + list(tup[i + 1:]))) return res
the closest tuples to tup: decrementing by 1 along any dimension. Never go into negatives though.
decrementAny
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def reachable(stepFunction, start, destinations, _alreadyseen=None): """ Determines the subset of destinations that can be reached from a set of starting positions, while using stepFunction (which produces a list of neighbor states) to navigate. Uses breadth-first search. Returns a dictionary with reachable destinations and their distances. """ if len(start) == 0 or len(destinations) == 0: return {} if _alreadyseen is None: _alreadyseen = [] _alreadyseen.extend(start) # dict with distances to destinations res = {} for s in start: if s in destinations: res[s] = 0 start.remove(s) # do one step new = set() for s in start: new.update(stepFunction(s)) new.difference_update(_alreadyseen) ndestinations = list(destinations) for s in list(new): if s in destinations: res[s] = 1 new.remove(s) ndestinations.remove(s) _alreadyseen.append(s) # recursively do the rest deeper = reachable(stepFunction, new, ndestinations, _alreadyseen) # adjust distances for k, val in list(deeper.items()): res[k] = val + 1 return res
Determines the subset of destinations that can be reached from a set of starting positions, while using stepFunction (which produces a list of neighbor states) to navigate. Uses breadth-first search. Returns a dictionary with reachable destinations and their distances.
reachable
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def flood(stepFunction, fullSet, initSet, relevant=None): """ Returns a list of elements of fullSet linked to some element of initSet through the neighborhood-setFunction (which must be defined on all elements of fullSet). :key relevant: (optional) list of relevant elements: stop once all relevant elements are found. """ if fullSet is None: flooded = set(initSet) else: full = set(fullSet) flooded = full.intersection(set(initSet)) if relevant is None: relevant = full.copy() if relevant: relevant = set(relevant) change = flooded.copy() while len(change)>0: new = set() for m in change: if fullSet is None: new.update(stepFunction(m)) else: new.update(full.intersection(stepFunction(m))) change = new.difference(flooded) flooded.update(change) if relevant is not None and relevant.issubset(flooded): break return list(flooded)
Returns a list of elements of fullSet linked to some element of initSet through the neighborhood-setFunction (which must be defined on all elements of fullSet). :key relevant: (optional) list of relevant elements: stop once all relevant elements are found.
flood
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def crossproduct(ss, row=None, level=0): """Returns the cross-product of the sets given in `ss`.""" if row is None: row = [] if len(ss) > 1: return reduce(operator.add, [crossproduct(ss[1:], row + [i], level + 1) for i in ss[0]]) else: return [row + [i] for i in ss[0]]
Returns the cross-product of the sets given in `ss`.
crossproduct
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def permuteToBlocks(arr, blockshape): """Permute an array so that it consists of linearized blocks. Example: A two-dimensional array of the form 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 would be turned into an array like this with (2, 2) blocks: 0 1 4 5 2 3 6 7 8 9 12 13 10 11 14 15 """ if len(blockshape) < 2: raise ValueError("Need more than one dimension.") elif len(blockshape) == 2: blockheight, blockwidth = blockshape return permuteToBlocks2d(arr, blockheight, blockwidth) elif len(blockshape) == 3: blockdepth, blockheight, blockwidth = blockshape return permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth) else: raise NotImplementedError("Only for dimensions 2 and 3.")
Permute an array so that it consists of linearized blocks. Example: A two-dimensional array of the form 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 would be turned into an array like this with (2, 2) blocks: 0 1 4 5 2 3 6 7 8 9 12 13 10 11 14 15
permuteToBlocks
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def triu2flat(m): """ Flattens an upper triangular matrix, returning a vector of the non-zero elements. """ dim = m.shape[0] res = zeros(dim * (dim + 1) / 2) index = 0 for row in range(dim): res[index:index + dim - row] = m[row, row:] index += dim - row return res
Flattens an upper triangular matrix, returning a vector of the non-zero elements.
triu2flat
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def flat2triu(a, dim): """ Produces an upper triangular matrix of dimension dim from the elements of the given vector. """ res = zeros((dim, dim)) index = 0 for row in range(dim): res[row, row:] = a[index:index + dim - row] index += dim - row return res
Produces an upper triangular matrix of dimension dim from the elements of the given vector.
flat2triu
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def blockList2Matrix(l): """ Converts a list of matrices into a corresponding big block-diagonal one. """ dims = [m.shape[0] for m in l] s = sum(dims) res = zeros((s, s)) index = 0 for i in range(len(l)): d = dims[i] m = l[i] res[index:index + d, index:index + d] = m index += d return res
Converts a list of matrices into a corresponding big block-diagonal one.
blockList2Matrix
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def blockCombine(l): """ Produce a matrix from a list of lists of its components. """ l = [list(map(mat, row)) for row in l] hdims = [m.shape[1] for m in l[0]] hs = sum(hdims) vdims = [row[0].shape[0] for row in l] vs = sum(vdims) res = zeros((hs, vs)) vindex = 0 for i, row in enumerate(l): hindex = 0 for j, m in enumerate(row): res[vindex:vindex + vdims[i], hindex:hindex + hdims[j]] = m hindex += hdims[j] vindex += vdims[i] return res
Produce a matrix from a list of lists of its components.
blockCombine
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def avgFoundAfter(decreasingTargetValues, listsOfActualValues, batchSize=1, useMedian=False): """ Determine the average number of steps to reach a certain value (for the first time), given a list of value sequences. If a value is not always encountered, the length of the longest sequence is used. Returns an array. """ from scipy import sum numLists = len(listsOfActualValues) longest = max(list(map(len, listsOfActualValues))) # gather a list of indices of first encounters res = [[0] for _ in range(numLists)] for tval in decreasingTargetValues: for li, l in enumerate(listsOfActualValues): lres = res[li] found = False for i in range(lres[-1], len(l)): if l[i] <= tval: lres.append(i) found = True break if not found: lres.append(longest) tmp = array(res) if useMedian: resx = median(tmp, axis=0)[1:] else: resx = sum(tmp, axis=0)[1:] / float(numLists) return resx * batchSize
Determine the average number of steps to reach a certain value (for the first time), given a list of value sequences. If a value is not always encountered, the length of the longest sequence is used. Returns an array.
avgFoundAfter
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def matchingDict(d, selection, require_existence=False): """ Determines if the dictionary d conforms to the specified selection, i.e. if a (key, x) is in the selection, then if key is in d as well it must be x or contained in x (if x is a list). """ for k, v in list(selection.items()): if k in d: if isinstance(v, list): if d[k] not in v: return False else: if d[k] != v: return False elif require_existence: return False return True
Determines if the dictionary d conforms to the specified selection, i.e. if a (key, x) is in the selection, then if key is in d as well it must be x or contained in x (if x is a list).
matchingDict
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def subDict(d, allowedkeys, flip=False): """ Returns a new dictionary with a subset of the entries of d that have on of the (dis-)allowed keys.""" res = {} for k, v in list(d.items()): if (k in allowedkeys) ^ flip: res[k] = v return res
Returns a new dictionary with a subset of the entries of d that have on of the (dis-)allowed keys.
subDict
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def dictCombinations(listdict): """ Iterates over dictionaries that go through every possible combination of key-value pairs as specified in the lists of values for each key in listdict.""" listdict = listdict.copy() if len(listdict) == 0: return [{}] k, vs = listdict.popitem() res = dictCombinations(listdict) if isinstance(vs, list) or isinstance(vs, tuple): res = [dict(d, **{k:v}) for d in res for v in sorted(set(vs))] else: res = [dict(d, **{k:vs}) for d in res] return res
Iterates over dictionaries that go through every possible combination of key-value pairs as specified in the lists of values for each key in listdict.
dictCombinations
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def r_argmax(v): """ Acts like scipy argmax, but break ties randomly. """ if len(v) == 1: return 0 maxbid = max(v) maxbidders = [i for (i, b) in enumerate(v) if b==maxbid] return choice(maxbidders)
Acts like scipy argmax, but break ties randomly.
r_argmax
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def all_argmax(x): """ Return the indices of all values that are equal to the maximum: no breaking ties. """ m = max(x) return [i for i, v in enumerate(x) if v == m]
Return the indices of all values that are equal to the maximum: no breaking ties.
all_argmax
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def sparse_orth(d): """ Constructs a sparse orthogonal matrix. The method is described in: Gi-Sang Cheon et al., Constructions for the sparsest orthogonal matrices, Bull. Korean Math. Soc 36 (1999) No.1 pp.199-129 """ from scipy.sparse import eye from scipy import r_, pi, sin, cos if d%2 == 0: seq = r_[0:d:2,1:d-1:2] else: seq = r_[0:d-1:2,1:d:2] Q = eye(d,d).tocsc() for i in seq: theta = random() * 2 * pi flip = (random() - 0.5)>0; Qi = eye(d,d).tocsc() Qi[i,i] = cos(theta) Qi[(i+1),i] = sin(theta) if flip > 0: Qi[i,(i+1)] = -sin(theta) Qi[(i+1),(i+1)] = cos(theta) else: Qi[i,(i+1)] = sin(theta) Qi[(i+1),(i+1)] = -cos(theta) Q = Q*Qi; return Q
Constructs a sparse orthogonal matrix. The method is described in: Gi-Sang Cheon et al., Constructions for the sparsest orthogonal matrices, Bull. Korean Math. Soc 36 (1999) No.1 pp.199-129
sparse_orth
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def binArr2int(arr): """ Convert a binary array into its (long) integer representation. """ from numpy import packbits tmp2 = packbits(arr.astype(int)) return sum(val * 256 ** i for i, val in enumerate(tmp2[::-1]))
Convert a binary array into its (long) integer representation.
binArr2int
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def seedit(seed=0): """ Fixed seed makes for repeatability, but there may be two different random number generators involved. """ import random import numpy random.seed(seed) numpy.random.seed(seed)
Fixed seed makes for repeatability, but there may be two different random number generators involved.
seedit
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def weightedUtest(g1, w1, g2, w2): """ Determines the confidence level of the assertion: 'The values of g2 are higher than those of g1'. (adapted from the scipy.stats version) Twist: here the elements of each group have associated weights, corresponding to how often they are present (i.e. two identical entries with weight w are equivalent to one entry with weight 2w). Reference: "Studies in Continuous Black-box Optimization", Schaul, 2011 [appendix B]. TODO: make more efficient for large sets. """ from scipy.stats.distributions import norm import numpy n1 = sum(w1) n2 = sum(w2) u1 = 0. for x1, wx1 in zip(g1, w1): for x2, wx2 in zip(g2, w2): if x1 == x2: u1 += 0.5 * wx1 * wx2 elif x1 > x2: u1 += wx1 * wx2 mu = n1*n2/2. sigu = numpy.sqrt(n1*n2*(n1+n2+1)/12.) z = (u1 - mu) / sigu conf = norm.cdf(z) return conf
Determines the confidence level of the assertion: 'The values of g2 are higher than those of g1'. (adapted from the scipy.stats version) Twist: here the elements of each group have associated weights, corresponding to how often they are present (i.e. two identical entries with weight w are equivalent to one entry with weight 2w). Reference: "Studies in Continuous Black-box Optimization", Schaul, 2011 [appendix B]. TODO: make more efficient for large sets.
weightedUtest
python
pybrain/pybrain
pybrain/utilities.py
https://github.com/pybrain/pybrain/blob/master/pybrain/utilities.py
BSD-3-Clause
def __init__(self, indim, start=0, stop=1, step=0.1): """ initializes the gaussian process object. :arg indim: input dimension :key start: start of interval for sampling the GP. :key stop: stop of interval for sampling the GP. :key step: stepsize for sampling interval. :note: start, stop, step can either be scalars or tuples of size 'indim'. """ self.mean = 0 self.start = start self.stop = stop self.step = step self.indim = indim self.trainx = zeros((0, indim), float) self.trainy = zeros((0), float) self.noise = zeros((0), float) self.testx = self._buildGrid() self.calculated = True self.pred_mean = zeros(len(self.testx)) self.pred_cov = eye(len(self.testx)) self.autonoise = False self.hyper = (0.5, 2.0, 0.1)
initializes the gaussian process object. :arg indim: input dimension :key start: start of interval for sampling the GP. :key stop: stop of interval for sampling the GP. :key step: stepsize for sampling interval. :note: start, stop, step can either be scalars or tuples of size 'indim'.
__init__
python
pybrain/pybrain
pybrain/auxiliary/gaussprocess.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/gaussprocess.py
BSD-3-Clause
def trainOnDataset(self, dataset): """ takes a SequentialDataSet with indim input dimension and scalar target """ assert (dataset.getDimension('input') == self.indim) assert (dataset.getDimension('target') == 1) self.trainx = dataset.getField('input') self.trainy = ravel(dataset.getField('target')) self.noise = array([0.001] * len(self.trainx)) # print(self.trainx, self.trainy) self.calculated = False
takes a SequentialDataSet with indim input dimension and scalar target
trainOnDataset
python
pybrain/pybrain
pybrain/auxiliary/gaussprocess.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/gaussprocess.py
BSD-3-Clause
def addDataset(self, dataset): """ adds the points from the dataset to the training set """ assert (dataset.getDimension('input') == self.indim) assert (dataset.getDimension('target') == 1) self.trainx = r_[self.trainx, dataset.getField('input')] self.trainy = r_[self.trainy, ravel(dataset.getField('target'))] self.noise = array([0.001] * len(self.trainx)) self.calculated = False
adds the points from the dataset to the training set
addDataset
python
pybrain/pybrain
pybrain/auxiliary/gaussprocess.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/gaussprocess.py
BSD-3-Clause
def __init__(self): """ initialize algorithms with standard parameters (typical values given in parentheses)""" # --- BackProp parameters --- # learning rate (0.1-0.001, down to 1e-7 for RNNs) self.alpha = 0.1 # alpha decay (0.999; 1.0 = disabled) self.alphadecay = 1.0 # momentum parameters (0.1 or 0.9) self.momentum = 0.0 self.momentumvector = None # --- RProp parameters --- self.rprop = False # maximum step width (1 - 20) self.deltamax = 5.0 # minimum step width (0.01 - 1e-6) self.deltamin = 0.01 # the remaining parameters do not normally need to be changed self.deltanull = 0.1 self.etaplus = 1.2 self.etaminus = 0.5 self.lastgradient = None
initialize algorithms with standard parameters (typical values given in parentheses)
__init__
python
pybrain/pybrain
pybrain/auxiliary/gradientdescent.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/gradientdescent.py
BSD-3-Clause
def init(self, values): """ call this to initialize data structures *after* algorithm to use has been selected :arg values: the list (or array) of parameters to perform gradient descent on (will be copied, original not modified) """ assert isinstance(values, ndarray) self.values = values.copy() if self.rprop: self.lastgradient = zeros(len(values), dtype='float64') self.rprop_theta = self.lastgradient + self.deltanull self.momentumvector = None else: self.lastgradient = None self.momentumvector = zeros(len(values))
call this to initialize data structures *after* algorithm to use has been selected :arg values: the list (or array) of parameters to perform gradient descent on (will be copied, original not modified)
init
python
pybrain/pybrain
pybrain/auxiliary/gradientdescent.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/gradientdescent.py
BSD-3-Clause
def __call__(self, gradient, error=None): """ calculates parameter change based on given gradient and returns updated parameters """ # check if gradient has correct dimensionality, then make array """ assert len(gradient) == len(self.values) gradient_arr = asarray(gradient) if self.rprop: rprop_theta = self.rprop_theta # update parameters self.values += sign(gradient_arr) * rprop_theta # update rprop meta parameters dirSwitch = self.lastgradient * gradient_arr rprop_theta[dirSwitch > 0] *= self.etaplus idx = dirSwitch < 0 rprop_theta[idx] *= self.etaminus gradient_arr[idx] = 0 # upper and lower bound for both matrices rprop_theta = rprop_theta.clip(min=self.deltamin, max=self.deltamax) # save current gradients to compare with in next time step self.lastgradient = gradient_arr.copy() self.rprop_theta = rprop_theta else: # update momentum vector (momentum = 0 clears it) self.momentumvector *= self.momentum # update parameters (including momentum) self.momentumvector += self.alpha * gradient_arr self.alpha *= self.alphadecay # update parameters self.values += self.momentumvector return self.values
calculates parameter change based on given gradient and returns updated parameters
__call__
python
pybrain/pybrain
pybrain/auxiliary/gradientdescent.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/gradientdescent.py
BSD-3-Clause
def importanceMixing(oldpoints, oldpdf, newpdf, newdistr, forcedRefresh = 0.01): """ Implements importance mixing. Given a set of points, an old and a new pdf-function for them and a generator function for new points, it produces a list of indices of the old points to be reused and a list of new points. Parameter (optional): forced refresh rate. """ reuseindices = [] batch = len(oldpoints) for i, sample in enumerate(oldpoints): r = uniform(0, 1) if r < (1-forcedRefresh) * newpdf(sample) / oldpdf(sample): reuseindices.append(i) # never use only old samples if batch - len(reuseindices) <= max(1, batch * forcedRefresh): break newpoints = [] # add the remaining ones while len(reuseindices)+len(newpoints) < batch: r = uniform(0, 1) sample = newdistr() if r < forcedRefresh: newpoints.append(sample) else: if r < 1 - oldpdf(sample)/newpdf(sample): newpoints.append(sample) return reuseindices, newpoints
Implements importance mixing. Given a set of points, an old and a new pdf-function for them and a generator function for new points, it produces a list of indices of the old points to be reused and a list of new points. Parameter (optional): forced refresh rate.
importanceMixing
python
pybrain/pybrain
pybrain/auxiliary/importancemixing.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/importancemixing.py
BSD-3-Clause
def reduceDim(data, dim, func='pca'): """Reduce the dimension of datapoints to dim via principal component analysis. A matrix of shape (n, d) specifies n points of dimension d. """ try: pcaFunc = globals()[func] except KeyError: raise ValueError('Unknown function to calc principal components') pc = pcaFunc(data, dim) return (pc * asmatrix(makeCentered(data)).T).T
Reduce the dimension of datapoints to dim via principal component analysis. A matrix of shape (n, d) specifies n points of dimension d.
reduceDim
python
pybrain/pybrain
pybrain/auxiliary/pca.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/pca.py
BSD-3-Clause
def pca(data, dim): """ Return the first dim principal components as colums of a matrix. Every row of the matrix resembles a point in the data space. """ assert dim <= data.shape[1], \ "dim must be less or equal than the original dimension" # We have to make a copy of the original data and substract the mean # of every entry data = makeCentered(data) cm = cov(data.T) # OPT only calculate the dim first eigenvectors here # The following calculation may seem a bit "weird" but also correct to me. # The eigenvectors with the dim highest eigenvalues have to be selected # We keep track of the indexes via enumerate to restore the right ordering # later. eigval, eigvec = eig(cm) eigval = [(val, ind) for ind, val in enumerate(eigval)] eigval.sort() eigval[:-dim] = [] # remove all but the highest dim elements # now we have to bring them back in the right order eig_indexes = [(ind, val) for val, ind in eigval] eig_indexes.sort(reverse=True) eig_indexes = [ind for ind, val in eig_indexes] return eigvec.take(eig_indexes, 1).T
Return the first dim principal components as colums of a matrix. Every row of the matrix resembles a point in the data space.
pca
python
pybrain/pybrain
pybrain/auxiliary/pca.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/pca.py
BSD-3-Clause
def pPca(data, dim): """Return a matrix which contains the first `dim` dimensions principal components of data. data is a matrix which's rows correspond to datapoints. Implementation of the 'probabilistic PCA' algorithm. """ num = data.shape[1] data = asmatrix(makeCentered(data)) # Pick a random reduction W = asmatrix(standard_normal((num, dim))) # Save for convergence check W_ = W[:] while True: E = inv(W.T * W) * W.T * data.T W, W_ = data.T * E.T * inv(E * E.T), W if abs(W - W_).max() < 0.001: break return W.T
Return a matrix which contains the first `dim` dimensions principal components of data. data is a matrix which's rows correspond to datapoints. Implementation of the 'probabilistic PCA' algorithm.
pPca
python
pybrain/pybrain
pybrain/auxiliary/pca.py
https://github.com/pybrain/pybrain/blob/master/pybrain/auxiliary/pca.py
BSD-3-Clause
def __init__(self, inp, target=1, nb_classes=0, class_labels=None): """Initialize an empty dataset. `inp` is used to specify the dimensionality of the input. While the number of targets is given by implicitly by the training samples, it can also be set explicity by `nb_classes`. To give the classes names, supply an iterable of strings as `class_labels`.""" # FIXME: hard to keep nClasses synchronized if appendLinked() etc. is used. SupervisedDataSet.__init__(self, inp, target) self.addField('class', 1) self.nClasses = nb_classes if len(self) > 0: # calculate class histogram, if we already have data self.calculateStatistics() self.convertField('target', int) if class_labels is None: self.class_labels = list(set(self.getField('target').flatten())) else: self.class_labels = class_labels # copy classes (may be changed into other representation) self.setField('class', self.getField('target'))
Initialize an empty dataset. `inp` is used to specify the dimensionality of the input. While the number of targets is given by implicitly by the training samples, it can also be set explicity by `nb_classes`. To give the classes names, supply an iterable of strings as `class_labels`.
__init__
python
pybrain/pybrain
pybrain/datasets/classification.py
https://github.com/pybrain/pybrain/blob/master/pybrain/datasets/classification.py
BSD-3-Clause
def load_matlab(cls, fname): """Create a dataset by reading a Matlab file containing one variable called 'data' which is an array of nSamples * nFeatures + 1 and contains the class in the first column.""" from mlabwrap import mlab #@UnresolvedImport d = mlab.load(fname) return cls(d.data[:, 0], d.data[:, 1:])
Create a dataset by reading a Matlab file containing one variable called 'data' which is an array of nSamples * nFeatures + 1 and contains the class in the first column.
load_matlab
python
pybrain/pybrain
pybrain/datasets/classification.py
https://github.com/pybrain/pybrain/blob/master/pybrain/datasets/classification.py
BSD-3-Clause
def load_libsvm(cls, f): """Create a dataset by reading a sparse LIBSVM/SVMlight format file (with labels only).""" nFeat = 0 # find max. number of features for line in f: n = int(line.split()[-1].split(':')[0]) if n > nFeat: nFeat = n f.seek(0) labels = [] features = [] # read all data for line in f: # format is: # <class> <featnr>:<featval> <featnr>:<featval> ... # (whereby featnr starts at 1) if not line: break line = line.split() label = int(line[0]) feat = [] nextidx = 1 for r in line[1:]: # construct list of features, taking care of sparsity (idx, val) = r.split(':') idx = int(idx) for _ in range(nextidx, idx): feat.append(0.0) feat.append(float(val)) nextidx = idx + 1 for _ in range(nextidx, nFeat + 1): feat.append(0.0) features.append(feat[:]) # [:] causes copy labels.append([label]) DS = cls(features, labels) return DS
Create a dataset by reading a sparse LIBSVM/SVMlight format file (with labels only).
load_libsvm
python
pybrain/pybrain
pybrain/datasets/classification.py
https://github.com/pybrain/pybrain/blob/master/pybrain/datasets/classification.py
BSD-3-Clause
def __add__(self, other): """Adds the patterns of two datasets, if dimensions and type match.""" if type(self) != type(other): raise TypeError('DataSets to be added must agree in type') elif self.indim != other.indim: raise TypeError('DataSets to be added must agree in input dimensions') elif self.outdim != 1 or other.outdim != 1: raise TypeError('Cannot add DataSets in 1-of-k representation') elif self.nClasses != other.nClasses: raise IndexError('Number of classes does not agree') else: result = self.copy() for pat in other: result.addSample(*pat) result.assignClasses() return result
Adds the patterns of two datasets, if dimensions and type match.
__add__
python
pybrain/pybrain
pybrain/datasets/classification.py
https://github.com/pybrain/pybrain/blob/master/pybrain/datasets/classification.py
BSD-3-Clause