Dataset Viewer
Auto-converted to Parquet Duplicate
repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
public-apis/public-apis
https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/tests/__init__.py
scripts/tests/__init__.py
# -*- coding: utf-8 -*-
python
MIT
a58c76cd32ef345da3e4b7252c7b47275e866ae7
2026-01-04T14:38:15.124778Z
false
public-apis/public-apis
https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/tests/test_validate_format.py
scripts/tests/test_validate_format.py
# -*- coding: utf-8 -*- import unittest from validate.format import error_message from validate.format import get_categories_content from validate.format import check_alphabetical_order from validate.format import check_title from validate.format import check_description, max_description_length from validate.format import check_auth, auth_keys from validate.format import check_https, https_keys from validate.format import check_cors, cors_keys from validate.format import check_entry from validate.format import check_file_format, min_entries_per_category, num_segments class TestValidadeFormat(unittest.TestCase): def test_error_message_return_and_return_type(self): line_num_unity = 1 line_num_ten = 10 line_num_hundred = 100 line_num_thousand = 1000 msg = 'This is a unit test' err_msg_unity = error_message(line_num_unity, msg) err_msg_ten = error_message(line_num_ten, msg) err_msg_hundred = error_message(line_num_hundred, msg) err_msg_thousand = error_message(line_num_thousand, msg) self.assertIsInstance(err_msg_unity, str) self.assertIsInstance(err_msg_ten, str) self.assertIsInstance(err_msg_hundred, str) self.assertIsInstance(err_msg_thousand, str) self.assertEqual(err_msg_unity, '(L002) This is a unit test') self.assertEqual(err_msg_ten, '(L011) This is a unit test') self.assertEqual(err_msg_hundred, '(L101) This is a unit test') self.assertEqual(err_msg_thousand, '(L1001) This is a unit test') def test_if_get_categories_content_return_correct_data_of_categories(self): fake_contents = [ '### A', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '', '### B', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |' ] result = get_categories_content(fake_contents) self.assertIsInstance(result, tuple) categories, category_line_num = result self.assertIsInstance(categories, dict) self.assertIsInstance(category_line_num, dict) expected_result = ({'A': ['AA', 'AB'], 'B': ['BA', 'BB']}, {'A': 0, 'B': 6}) for res, ex_res in zip(result, expected_result): with self.subTest(): self.assertEqual(res, ex_res) def test_if_check_alphabetical_order_return_correct_msg_error(self): correct_lines = [ '### A', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '', '### B', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |' ] incorrect_lines = [ '### A', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '', '### B', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |' ] err_msgs_1 = check_alphabetical_order(correct_lines) err_msgs_2 = check_alphabetical_order(incorrect_lines) self.assertIsInstance(err_msgs_1, list) self.assertIsInstance(err_msgs_2, list) self.assertEqual(len(err_msgs_1), 0) self.assertEqual(len(err_msgs_2), 2) expected_err_msgs = [ '(L001) A category is not alphabetical order', '(L007) B category is not alphabetical order' ] for err_msg, ex_err_msg in zip(err_msgs_2, expected_err_msgs): with self.subTest(): self.assertEqual(err_msg, ex_err_msg) def test_check_title_with_correct_title(self): raw_title = '[A](https://www.ex.com)' err_msgs = check_title(0, raw_title) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 0) self.assertEqual(err_msgs, []) def test_check_title_with_markdown_syntax_incorrect(self): raw_title = '[A(https://www.ex.com)' err_msgs = check_title(0, raw_title) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] expected_err_msg = '(L001) Title syntax should be "[TITLE](LINK)"' self.assertEqual(err_msg, expected_err_msg) def test_check_title_with_api_at_the_end_of_the_title(self): raw_title = '[A API](https://www.ex.com)' err_msgs = check_title(0, raw_title) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] expected_err_msg = '(L001) Title should not end with "... API". Every entry is an API here!' self.assertEqual(err_msg, expected_err_msg) def test_check_description_with_correct_description(self): desc = 'This is a fake description' err_msgs = check_description(0, desc) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 0) self.assertEqual(err_msgs, []) def test_check_description_with_first_char_is_not_capitalized(self): desc = 'this is a fake description' err_msgs = check_description(0, desc) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] expected_err_msg = '(L001) first character of description is not capitalized' self.assertIsInstance(err_msg, str) self.assertEqual(err_msg, expected_err_msg) def test_check_description_with_punctuation_in_the_end(self): base_desc = 'This is a fake description' punctuation = r"""!"#$%&'*+,-./:;<=>?@[\]^_`{|}~""" desc_with_punc = [base_desc + punc for punc in punctuation] for desc in desc_with_punc: with self.subTest(): err_msgs = check_description(0, desc) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] expected_err_msg = f'(L001) description should not end with {desc[-1]}' self.assertIsInstance(err_msg, str) self.assertEqual(err_msg, expected_err_msg) def test_check_description_that_exceeds_the_character_limit(self): long_desc = 'Desc' * max_description_length long_desc_length = len(long_desc) err_msgs = check_description(0, long_desc) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] expected_err_msg = f'(L001) description should not exceed {max_description_length} characters (currently {long_desc_length})' self.assertIsInstance(err_msg, str) self.assertEqual(err_msg, expected_err_msg) def test_check_auth_with_valid_auth(self): auth_valid = [f'`{auth}`' for auth in auth_keys if auth != 'No'] auth_valid.append('No') for auth in auth_valid: with self.subTest(): err_msgs = check_auth(0, auth) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 0) self.assertEqual(err_msgs, []) def test_check_auth_without_backtick(self): auth_without_backtick = [auth for auth in auth_keys if auth != 'No'] for auth in auth_without_backtick: with self.subTest(): err_msgs = check_auth(0, auth) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] expected_err_msg = '(L001) auth value is not enclosed with `backticks`' self.assertIsInstance(err_msg, str) self.assertEqual(err_msg, expected_err_msg) def test_check_auth_with_invalid_auth(self): auth_invalid_without_backtick = ['Yes', 'yes', 'no', 'random', 'Unknown'] auth_invalid_with_backtick = ['`Yes`', '`yes`', '`no`', '`random`', '`Unknown`'] for auth in auth_invalid_without_backtick: with self.subTest(): err_msgs = check_auth(0, auth) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 2) err_msg_1 = err_msgs[0] err_msg_2 = err_msgs[1] expected_err_msg_1 = f'(L001) auth value is not enclosed with `backticks`' expected_err_msg_2 = f'(L001) {auth} is not a valid Auth option' self.assertIsInstance(err_msg_1, str) self.assertIsInstance(err_msg_2, str) self.assertEqual(err_msg_1, expected_err_msg_1) self.assertEqual(err_msg_2, expected_err_msg_2) for auth in auth_invalid_with_backtick: with self.subTest(): err_msgs = check_auth(0, auth) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] expected_err_msg = f'(L001) {auth} is not a valid Auth option' self.assertIsInstance(err_msg, str) self.assertEqual(err_msg, expected_err_msg) def test_check_https_with_valid_https(self): for https in https_keys: with self.subTest(): err_msgs = check_https(0, https) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 0) self.assertEqual(err_msgs, []) def test_check_https_with_invalid_https(self): invalid_https_keys = ['yes', 'no', 'Unknown', 'https', 'http'] for https in invalid_https_keys: with self.subTest(): err_msgs = check_https(0, https) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] expected_err_msg = f'(L001) {https} is not a valid HTTPS option' self.assertIsInstance(err_msg, str) self.assertEqual(err_msg, expected_err_msg) def test_check_cors_with_valid_cors(self): for cors in cors_keys: with self.subTest(): err_msgs = check_cors(0, cors) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 0) self.assertEqual(err_msgs, []) def test_check_cors_with_invalid_cors(self): invalid_cors_keys = ['yes', 'no', 'unknown', 'cors'] for cors in invalid_cors_keys: with self.subTest(): err_msgs = check_cors(0, cors) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] expected_err_msg = f'(L001) {cors} is not a valid CORS option' self.assertIsInstance(err_msg, str) self.assertEqual(err_msg, expected_err_msg) def test_check_entry_with_correct_segments(self): correct_segments = ['[A](https://www.ex.com)', 'Desc', '`apiKey`', 'Yes', 'Yes'] err_msgs = check_entry(0, correct_segments) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 0) self.assertEqual(err_msgs, []) def test_check_entry_with_incorrect_segments(self): incorrect_segments = ['[A API](https://www.ex.com)', 'desc.', 'yes', 'yes', 'yes'] err_msgs = check_entry(0, incorrect_segments) expected_err_msgs = [ '(L001) Title should not end with "... API". Every entry is an API here!', '(L001) first character of description is not capitalized', '(L001) description should not end with .', '(L001) auth value is not enclosed with `backticks`', '(L001) yes is not a valid Auth option', '(L001) yes is not a valid HTTPS option', '(L001) yes is not a valid CORS option' ] self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 7) for err_msg in err_msgs: with self.subTest(): self.assertIsInstance(err_msg, str) self.assertEqual(err_msgs, expected_err_msgs) def test_check_file_format_with_correct_format(self): correct_format = [ '## Index', '* [A](#a)', '* [B](#b)', '', '### A', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '', '### B', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [BC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |' ] err_msgs = check_file_format(lines=correct_format) self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 0) self.assertEqual(err_msgs, []) def test_check_file_format_with_category_header_not_added_to_index(self): incorrect_format = [ '## Index', '', '### A', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', ] err_msgs = check_file_format(lines=incorrect_format) expected_err_msg = '(L003) category header (A) not added to Index section' self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] self.assertEqual(err_msg, expected_err_msg) def test_check_file_format_with_category_without_min_entries(self): incorrect_format = [ '## Index', '* [A](#a)', '* [B](#b)', '', '### A', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '', '### B', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [BC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |' ] category_with_err = 'A' num_in_category = 1 err_msgs = check_file_format(lines=incorrect_format) expected_err_msg = f'(L005) {category_with_err} category does not have the minimum {min_entries_per_category} entries (only has {num_in_category})' self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] self.assertEqual(err_msg, expected_err_msg) def test_check_file_format_entry_without_all_necessary_columns(self): incorrect_format = [ '## Index', '* [A](#a)', '', '### A', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [AB](https://www.ex.com) | Desc | `apiKey` |', # missing https and cors '| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', ] current_segments_num = 3 err_msgs = check_file_format(lines=incorrect_format) expected_err_msg = f'(L008) entry does not have all the required columns (have {current_segments_num}, need {num_segments})' self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] self.assertEqual(err_msg, expected_err_msg) def test_check_file_format_without_1_space_between_the_segments(self): incorrect_format = [ '## Index', '* [A](#a)', '', '### A', 'API | Description | Auth | HTTPS | CORS |', '|---|---|---|---|---|', '| [AA](https://www.ex.com) | Desc |`apiKey`| Yes | Yes |', # space between segment of auth column missing '| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', '| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |', ] err_msgs = check_file_format(lines=incorrect_format) expected_err_msg = f'(L007) each segment must start and end with exactly 1 space' self.assertIsInstance(err_msgs, list) self.assertEqual(len(err_msgs), 1) err_msg = err_msgs[0] self.assertEqual(err_msg, expected_err_msg)
python
MIT
a58c76cd32ef345da3e4b7252c7b47275e866ae7
2026-01-04T14:38:15.124778Z
false
public-apis/public-apis
https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/tests/test_validate_links.py
scripts/tests/test_validate_links.py
# -*- coding: utf-8 -*- import unittest from validate.links import find_links_in_text from validate.links import check_duplicate_links from validate.links import fake_user_agent from validate.links import get_host_from_link from validate.links import has_cloudflare_protection class FakeResponse(): def __init__(self, code: int, headers: dict, text: str) -> None: self.status_code = code self.headers = headers self.text = text class TestValidateLinks(unittest.TestCase): def setUp(self): self.duplicate_links = [ 'https://www.example.com', 'https://www.example.com', 'https://www.example.com', 'https://www.anotherexample.com', ] self.no_duplicate_links = [ 'https://www.firstexample.com', 'https://www.secondexample.com', 'https://www.anotherexample.com', ] self.code_200 = 200 self.code_403 = 403 self.code_503 = 503 self.cloudflare_headers = {'Server': 'cloudflare'} self.no_cloudflare_headers = {'Server': 'google'} self.text_with_cloudflare_flags = '403 Forbidden Cloudflare We are checking your browser...' self.text_without_cloudflare_flags = 'Lorem Ipsum' def test_find_link_in_text(self): text = """ # this is valid http://example.com?param1=1&param2=2#anchor https://www.example.com?param1=1&param2=2#anchor https://www.example.com.br https://www.example.com.gov.br [Example](https://www.example.com?param1=1&param2=2#anchor) lorem ipsum https://www.example.com?param1=1&param2=2#anchor https://www.example.com?param1=1&param2=2#anchor lorem ipsum # this not is valid example.com https:example.com https:/example.com https//example.com https//.com """ links = find_links_in_text(text) self.assertIsInstance(links, list) self.assertEqual(len(links), 7) for link in links: with self.subTest(): self.assertIsInstance(link, str) def test_find_link_in_text_with_invalid_argument(self): with self.assertRaises(TypeError): find_links_in_text() find_links_in_text(1) find_links_in_text(True) def test_if_check_duplicate_links_has_the_correct_return(self): result_1 = check_duplicate_links(self.duplicate_links) result_2 = check_duplicate_links(self.no_duplicate_links) self.assertIsInstance(result_1, tuple) self.assertIsInstance(result_2, tuple) has_duplicate_links, links = result_1 no_duplicate_links, no_links = result_2 self.assertTrue(has_duplicate_links) self.assertFalse(no_duplicate_links) self.assertIsInstance(links, list) self.assertIsInstance(no_links, list) self.assertEqual(len(links), 2) self.assertEqual(len(no_links), 0) def test_if_fake_user_agent_has_a_str_as_return(self): user_agent = fake_user_agent() self.assertIsInstance(user_agent, str) def test_get_host_from_link(self): links = [ 'example.com', 'https://example.com', 'https://www.example.com', 'https://www.example.com.br', 'https://www.example.com/route', 'https://www.example.com?p=1&q=2', 'https://www.example.com#anchor' ] for link in links: host = get_host_from_link(link) with self.subTest(): self.assertIsInstance(host, str) self.assertNotIn('://', host) self.assertNotIn('/', host) self.assertNotIn('?', host) self.assertNotIn('#', host) with self.assertRaises(TypeError): get_host_from_link() def test_has_cloudflare_protection_with_code_403_and_503_in_response(self): resp_with_cloudflare_protection_code_403 = FakeResponse( code=self.code_403, headers=self.cloudflare_headers, text=self.text_with_cloudflare_flags ) resp_with_cloudflare_protection_code_503 = FakeResponse( code=self.code_503, headers=self.cloudflare_headers, text=self.text_with_cloudflare_flags ) result1 = has_cloudflare_protection(resp_with_cloudflare_protection_code_403) result2 = has_cloudflare_protection(resp_with_cloudflare_protection_code_503) self.assertTrue(result1) self.assertTrue(result2) def test_has_cloudflare_protection_when_there_is_no_protection(self): resp_without_cloudflare_protection1 = FakeResponse( code=self.code_200, headers=self.no_cloudflare_headers, text=self.text_without_cloudflare_flags ) resp_without_cloudflare_protection2 = FakeResponse( code=self.code_403, headers=self.no_cloudflare_headers, text=self.text_without_cloudflare_flags ) resp_without_cloudflare_protection3 = FakeResponse( code=self.code_503, headers=self.no_cloudflare_headers, text=self.text_without_cloudflare_flags ) result1 = has_cloudflare_protection(resp_without_cloudflare_protection1) result2 = has_cloudflare_protection(resp_without_cloudflare_protection2) result3 = has_cloudflare_protection(resp_without_cloudflare_protection3) self.assertFalse(result1) self.assertFalse(result2) self.assertFalse(result3)
python
MIT
a58c76cd32ef345da3e4b7252c7b47275e866ae7
2026-01-04T14:38:15.124778Z
false
public-apis/public-apis
https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/validate/format.py
scripts/validate/format.py
# -*- coding: utf-8 -*- import re import sys from string import punctuation from typing import List, Tuple, Dict # Temporary replacement # The descriptions that contain () at the end must adapt to the new policy later punctuation = punctuation.replace('()', '') anchor = '###' auth_keys = ['apiKey', 'OAuth', 'X-Mashape-Key', 'User-Agent', 'No'] https_keys = ['Yes', 'No'] cors_keys = ['Yes', 'No', 'Unknown'] index_title = 0 index_desc = 1 index_auth = 2 index_https = 3 index_cors = 4 num_segments = 5 min_entries_per_category = 3 max_description_length = 100 anchor_re = re.compile(anchor + '\s(.+)') category_title_in_index_re = re.compile('\*\s\[(.*)\]') link_re = re.compile('\[(.+)\]\((http.*)\)') # Type aliases APIList = List[str] Categories = Dict[str, APIList] CategoriesLineNumber = Dict[str, int] def error_message(line_number: int, message: str) -> str: line = line_number + 1 return f'(L{line:03d}) {message}' def get_categories_content(contents: List[str]) -> Tuple[Categories, CategoriesLineNumber]: categories = {} category_line_num = {} for line_num, line_content in enumerate(contents): if line_content.startswith(anchor): category = line_content.split(anchor)[1].strip() categories[category] = [] category_line_num[category] = line_num continue if not line_content.startswith('|') or line_content.startswith('|---'): continue raw_title = [ raw_content.strip() for raw_content in line_content.split('|')[1:-1] ][0] title_match = link_re.match(raw_title) if title_match: title = title_match.group(1).upper() categories[category].append(title) return (categories, category_line_num) def check_alphabetical_order(lines: List[str]) -> List[str]: err_msgs = [] categories, category_line_num = get_categories_content(contents=lines) for category, api_list in categories.items(): if sorted(api_list) != api_list: err_msg = error_message( category_line_num[category], f'{category} category is not alphabetical order' ) err_msgs.append(err_msg) return err_msgs def check_title(line_num: int, raw_title: str) -> List[str]: err_msgs = [] title_match = link_re.match(raw_title) # url should be wrapped in "[TITLE](LINK)" Markdown syntax if not title_match: err_msg = error_message(line_num, 'Title syntax should be "[TITLE](LINK)"') err_msgs.append(err_msg) else: # do not allow "... API" in the entry title title = title_match.group(1) if title.upper().endswith(' API'): err_msg = error_message(line_num, 'Title should not end with "... API". Every entry is an API here!') err_msgs.append(err_msg) return err_msgs def check_description(line_num: int, description: str) -> List[str]: err_msgs = [] first_char = description[0] if first_char.upper() != first_char: err_msg = error_message(line_num, 'first character of description is not capitalized') err_msgs.append(err_msg) last_char = description[-1] if last_char in punctuation: err_msg = error_message(line_num, f'description should not end with {last_char}') err_msgs.append(err_msg) desc_length = len(description) if desc_length > max_description_length: err_msg = error_message(line_num, f'description should not exceed {max_description_length} characters (currently {desc_length})') err_msgs.append(err_msg) return err_msgs def check_auth(line_num: int, auth: str) -> List[str]: err_msgs = [] backtick = '`' if auth != 'No' and (not auth.startswith(backtick) or not auth.endswith(backtick)): err_msg = error_message(line_num, 'auth value is not enclosed with `backticks`') err_msgs.append(err_msg) if auth.replace(backtick, '') not in auth_keys: err_msg = error_message(line_num, f'{auth} is not a valid Auth option') err_msgs.append(err_msg) return err_msgs def check_https(line_num: int, https: str) -> List[str]: err_msgs = [] if https not in https_keys: err_msg = error_message(line_num, f'{https} is not a valid HTTPS option') err_msgs.append(err_msg) return err_msgs def check_cors(line_num: int, cors: str) -> List[str]: err_msgs = [] if cors not in cors_keys: err_msg = error_message(line_num, f'{cors} is not a valid CORS option') err_msgs.append(err_msg) return err_msgs def check_entry(line_num: int, segments: List[str]) -> List[str]: raw_title = segments[index_title] description = segments[index_desc] auth = segments[index_auth] https = segments[index_https] cors = segments[index_cors] title_err_msgs = check_title(line_num, raw_title) desc_err_msgs = check_description(line_num, description) auth_err_msgs = check_auth(line_num, auth) https_err_msgs = check_https(line_num, https) cors_err_msgs = check_cors(line_num, cors) err_msgs = [ *title_err_msgs, *desc_err_msgs, *auth_err_msgs, *https_err_msgs, *cors_err_msgs ] return err_msgs def check_file_format(lines: List[str]) -> List[str]: err_msgs = [] category_title_in_index = [] alphabetical_err_msgs = check_alphabetical_order(lines) err_msgs.extend(alphabetical_err_msgs) num_in_category = min_entries_per_category + 1 category = '' category_line = 0 for line_num, line_content in enumerate(lines): category_title_match = category_title_in_index_re.match(line_content) if category_title_match: category_title_in_index.append(category_title_match.group(1)) # check each category for the minimum number of entries if line_content.startswith(anchor): category_match = anchor_re.match(line_content) if category_match: if category_match.group(1) not in category_title_in_index: err_msg = error_message(line_num, f'category header ({category_match.group(1)}) not added to Index section') err_msgs.append(err_msg) else: err_msg = error_message(line_num, 'category header is not formatted correctly') err_msgs.append(err_msg) if num_in_category < min_entries_per_category: err_msg = error_message(category_line, f'{category} category does not have the minimum {min_entries_per_category} entries (only has {num_in_category})') err_msgs.append(err_msg) category = line_content.split(' ')[1] category_line = line_num num_in_category = 0 continue # skips lines that we do not care about if not line_content.startswith('|') or line_content.startswith('|---'): continue num_in_category += 1 segments = line_content.split('|')[1:-1] if len(segments) < num_segments: err_msg = error_message(line_num, f'entry does not have all the required columns (have {len(segments)}, need {num_segments})') err_msgs.append(err_msg) continue for segment in segments: # every line segment should start and end with exactly 1 space if len(segment) - len(segment.lstrip()) != 1 or len(segment) - len(segment.rstrip()) != 1: err_msg = error_message(line_num, 'each segment must start and end with exactly 1 space') err_msgs.append(err_msg) segments = [segment.strip() for segment in segments] entry_err_msgs = check_entry(line_num, segments) err_msgs.extend(entry_err_msgs) return err_msgs def main(filename: str) -> None: with open(filename, mode='r', encoding='utf-8') as file: lines = list(line.rstrip() for line in file) file_format_err_msgs = check_file_format(lines) if file_format_err_msgs: for err_msg in file_format_err_msgs: print(err_msg) sys.exit(1) if __name__ == '__main__': num_args = len(sys.argv) if num_args < 2: print('No .md file passed (file should contain Markdown table syntax)') sys.exit(1) filename = sys.argv[1] main(filename)
python
MIT
a58c76cd32ef345da3e4b7252c7b47275e866ae7
2026-01-04T14:38:15.124778Z
false
public-apis/public-apis
https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/validate/links.py
scripts/validate/links.py
# -*- coding: utf-8 -*- import re import sys import random from typing import List, Tuple import requests from requests.models import Response def find_links_in_text(text: str) -> List[str]: """Find links in a text and return a list of URLs.""" link_pattern = re.compile(r'((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'\".,<>?«»“”‘’]))') raw_links = re.findall(link_pattern, text) links = [ str(raw_link[0]) for raw_link in raw_links ] return links def find_links_in_file(filename: str) -> List[str]: """Find links in a file and return a list of URLs from text file.""" with open(filename, mode='r', encoding='utf-8') as file: readme = file.read() index_section = readme.find('## Index') if index_section == -1: index_section = 0 content = readme[index_section:] links = find_links_in_text(content) return links def check_duplicate_links(links: List[str]) -> Tuple[bool, List]: """Check for duplicated links. Returns a tuple with True or False and duplicate list. """ seen = {} duplicates = [] has_duplicate = False for link in links: link = link.rstrip('/') if link not in seen: seen[link] = 1 else: if seen[link] == 1: duplicates.append(link) if duplicates: has_duplicate = True return (has_duplicate, duplicates) def fake_user_agent() -> str: """Faking user agent as some hosting services block not-whitelisted UA.""" user_agents = [ 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko)', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36', ] return random.choice(user_agents) def get_host_from_link(link: str) -> str: host = link.split('://', 1)[1] if '://' in link else link # Remove routes, arguments and anchors if '/' in host: host = host.split('/', 1)[0] elif '?' in host: host = host.split('?', 1)[0] elif '#' in host: host = host.split('#', 1)[0] return host def has_cloudflare_protection(resp: Response) -> bool: """Checks if there is any cloudflare protection in the response. Cloudflare implements multiple network protections on a given link, this script tries to detect if any of them exist in the response from request. Common protections have the following HTTP code as a response: - 403: When host header is missing or incorrect (and more) - 503: When DDOS protection exists See more about it at: - https://support.cloudflare.com/hc/en-us/articles/115003014512-4xx-Client-Error - https://support.cloudflare.com/hc/en-us/articles/115003011431-Troubleshooting-Cloudflare-5XX-errors - https://www.cloudflare.com/ddos/ - https://superuser.com/a/888526 Discussions in issues and pull requests: - https://github.com/public-apis/public-apis/pull/2409 - https://github.com/public-apis/public-apis/issues/2960 """ code = resp.status_code server = resp.headers.get('Server') or resp.headers.get('server') cloudflare_flags = [ '403 Forbidden', 'cloudflare', 'Cloudflare', 'Security check', 'Please Wait... | Cloudflare', 'We are checking your browser...', 'Please stand by, while we are checking your browser...', 'Checking your browser before accessing', 'This process is automatic.', 'Your browser will redirect to your requested content shortly.', 'Please allow up to 5 seconds', 'DDoS protection by', 'Ray ID:', 'Cloudflare Ray ID:', '_cf_chl', '_cf_chl_opt', '__cf_chl_rt_tk', 'cf-spinner-please-wait', 'cf-spinner-redirecting' ] if code in [403, 503] and server == 'cloudflare': html = resp.text flags_found = [flag in html for flag in cloudflare_flags] any_flag_found = any(flags_found) if any_flag_found: return True return False def check_if_link_is_working(link: str) -> Tuple[bool, str]: """Checks if a link is working. If an error is identified when the request for the link occurs, the return will be a tuple with the first value True and the second value a string containing the error message. If no errors are identified, the return will be a tuple with the first value False and the second an empty string. """ has_error = False error_message = '' try: resp = requests.get(link, timeout=25, headers={ 'User-Agent': fake_user_agent(), 'host': get_host_from_link(link) }) code = resp.status_code if code >= 400 and not has_cloudflare_protection(resp): has_error = True error_message = f'ERR:CLT: {code} : {link}' except requests.exceptions.SSLError as error: has_error = True error_message = f'ERR:SSL: {error} : {link}' except requests.exceptions.ConnectionError as error: has_error = True error_message = f'ERR:CNT: {error} : {link}' except (TimeoutError, requests.exceptions.ConnectTimeout): has_error = True error_message = f'ERR:TMO: {link}' except requests.exceptions.TooManyRedirects as error: has_error = True error_message = f'ERR:TMR: {error} : {link}' except (Exception, requests.exceptions.RequestException) as error: has_error = True error_message = f'ERR:UKN: {error} : {link}' return (has_error, error_message) def check_if_list_of_links_are_working(list_of_links: List[str]) -> List[str]: error_messages = [] for link in list_of_links: has_error, error_message = check_if_link_is_working(link) if has_error: error_messages.append(error_message) return error_messages def start_duplicate_links_checker(links: List[str]) -> None: print('Checking for duplicate links...') has_duplicate_link, duplicates_links = check_duplicate_links(links) if has_duplicate_link: print(f'Found duplicate links:') for duplicate_link in duplicates_links: print(duplicate_link) sys.exit(1) else: print('No duplicate links.') def start_links_working_checker(links: List[str]) -> None: print(f'Checking if {len(links)} links are working...') errors = check_if_list_of_links_are_working(links) if errors: num_errors = len(errors) print(f'Apparently {num_errors} links are not working properly. See in:') for error_message in errors: print(error_message) sys.exit(1) def main(filename: str, only_duplicate_links_checker: bool) -> None: links = find_links_in_file(filename) start_duplicate_links_checker(links) if not only_duplicate_links_checker: start_links_working_checker(links) if __name__ == '__main__': num_args = len(sys.argv) only_duplicate_links_checker = False if num_args < 2: print('No .md file passed') sys.exit(1) elif num_args == 3: third_arg = sys.argv[2].lower() if third_arg == '-odlc' or third_arg == '--only_duplicate_links_checker': only_duplicate_links_checker = True else: print(f'Third invalid argument. Usage: python {__file__} [-odlc | --only_duplicate_links_checker]') sys.exit(1) filename = sys.argv[1] main(filename, only_duplicate_links_checker)
python
MIT
a58c76cd32ef345da3e4b7252c7b47275e866ae7
2026-01-04T14:38:15.124778Z
false
public-apis/public-apis
https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/validate/__init__.py
scripts/validate/__init__.py
# -*- coding: utf-8 -*- from validate import format from validate import links
python
MIT
a58c76cd32ef345da3e4b7252c7b47275e866ae7
2026-01-04T14:38:15.124778Z
false
deepseek-ai/DeepSeek-V3
https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/convert.py
inference/convert.py
import os import shutil from argparse import ArgumentParser from glob import glob from tqdm import tqdm, trange import torch from safetensors.torch import safe_open, save_file mapping = { "embed_tokens": ("embed", 0), "input_layernorm": ("attn_norm", None), "post_attention_layernorm": ("ffn_norm", None), "q_proj": ("wq", 0), "q_a_proj": ("wq_a", None), "q_a_layernorm": ("q_norm", None), "q_b_proj": ("wq_b", 0), "kv_a_proj_with_mqa": ("wkv_a", None), "kv_a_layernorm": ("kv_norm", None), "kv_b_proj": ("wkv_b", 0), "o_proj": ("wo", 1), "gate": ("gate", None), "gate_proj": ("w1", 0), "down_proj": ("w2", 1), "up_proj": ("w3", 0), "norm": ("norm", None), "lm_head": ("head", 0), "scale": ("scale", None), } def main(hf_ckpt_path, save_path, n_experts, mp): """ Converts and saves model checkpoint files into a specified format. Args: hf_ckpt_path (str): Path to the directory containing the input checkpoint files. save_path (str): Path to the directory where the converted checkpoint files will be saved. n_experts (int): Total number of experts in the model. mp (int): Model parallelism factor. Returns: None """ torch.set_num_threads(8) n_local_experts = n_experts // mp state_dicts = [{} for _ in range(mp)] for file_path in tqdm(glob(os.path.join(hf_ckpt_path, "*.safetensors"))): with safe_open(file_path, framework="pt", device="cpu") as f: for name in f.keys(): if "model.layers.61" in name: continue param: torch.Tensor = f.get_tensor(name) if name.startswith("model."): name = name[len("model."):] name = name.replace("self_attn", "attn") name = name.replace("mlp", "ffn") name = name.replace("weight_scale_inv", "scale") name = name.replace("e_score_correction_bias", "bias") key = name.split(".")[-2] assert key in mapping, f"Key {key} not found in mapping" new_key, dim = mapping[key] name = name.replace(key, new_key) for i in range(mp): new_param = param if "experts" in name and "shared_experts" not in name: idx = int(name.split(".")[-3]) if idx < i * n_local_experts or idx >= (i + 1) * n_local_experts: continue elif dim is not None: assert param.size(dim) % mp == 0, f"Dimension {dim} must be divisible by {mp}" shard_size = param.size(dim) // mp new_param = param.narrow(dim, i * shard_size, shard_size).contiguous() state_dicts[i][name] = new_param os.makedirs(save_path, exist_ok=True) for i in trange(mp): save_file(state_dicts[i], os.path.join(save_path, f"model{i}-mp{mp}.safetensors")) for file_path in glob(os.path.join(hf_ckpt_path, "*token*")): new_file_path = os.path.join(save_path, os.path.basename(file_path)) shutil.copyfile(file_path, new_file_path) if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("--hf-ckpt-path", type=str, required=True) parser.add_argument("--save-path", type=str, required=True) parser.add_argument("--n-experts", type=int, required=True) parser.add_argument("--model-parallel", type=int, required=True) args = parser.parse_args() assert args.n_experts % args.model_parallel == 0, "Number of experts must be divisible by model parallelism" main(args.hf_ckpt_path, args.save_path, args.n_experts, args.model_parallel)
python
MIT
9b4e9788e4a3a731f7567338ed15d3ec549ce03b
2026-01-04T14:38:15.450976Z
false
deepseek-ai/DeepSeek-V3
https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/model.py
inference/model.py
import math from dataclasses import dataclass from typing import Tuple, Optional, Literal import torch from torch import nn import torch.nn.functional as F import torch.distributed as dist from kernel import act_quant, weight_dequant, fp8_gemm world_size = 1 rank = 0 block_size = 128 gemm_impl: Literal["bf16", "fp8"] = "bf16" attn_impl: Literal["naive", "absorb"] = "absorb" @dataclass class ModelArgs: """ Data class for defining model arguments and hyperparameters. Attributes: max_batch_size (int): Maximum batch size. max_seq_len (int): Maximum sequence length. dtype (Literal["bf16", "fp8"]): Data type for computations. scale_fmt (Optional[str]): Format for quantization scale. vocab_size (int): Vocabulary size. dim (int): Model dimension. inter_dim (int): Intermediate dimension for MLP layers. moe_inter_dim (int): Intermediate dimension for MoE layers. n_layers (int): Number of transformer layers. n_dense_layers (int): Number of dense layers in the model. n_heads (int): Number of attention heads. n_routed_experts (int): Number of routed experts for MoE layers. n_shared_experts (int): Number of shared experts for MoE layers. n_activated_experts (int): Number of activated experts in MoE layers. n_expert_groups (int): Number of expert groups. n_limited_groups (int): Number of limited groups for MoE routing. score_func (Literal["softmax", "sigmoid"]): Scoring function for MoE routing. route_scale (float): Scaling factor for routing scores. q_lora_rank (int): LoRA rank for query projections. kv_lora_rank (int): LoRA rank for key-value projections. qk_nope_head_dim (int): Dimension for query-key projections without positional embeddings. qk_rope_head_dim (int): Dimension for query-key projections with rotary embeddings. v_head_dim (int): Dimension for value projections. original_seq_len (int): Original sequence length. rope_theta (float): Base for rotary positional encoding. rope_factor (float): Scaling factor for extended sequence lengths. beta_fast (int): Fast beta correction factor. beta_slow (int): Slow beta correction factor. mscale (float): Scaling factor for extended attention. """ max_batch_size: int = 8 max_seq_len: int = 4096 * 4 dtype: Literal["bf16", "fp8"] = "bf16" scale_fmt: Optional[str] = None vocab_size: int = 102400 dim: int = 2048 inter_dim: int = 10944 moe_inter_dim: int = 1408 n_layers: int = 27 n_dense_layers: int = 1 n_heads: int = 16 # moe n_routed_experts: int = 64 n_shared_experts: int = 2 n_activated_experts: int = 6 n_expert_groups: int = 1 n_limited_groups: int = 1 score_func: Literal["softmax", "sigmoid"] = "softmax" route_scale: float = 1. # mla q_lora_rank: int = 0 kv_lora_rank: int = 512 qk_nope_head_dim: int = 128 qk_rope_head_dim: int = 64 v_head_dim: int = 128 # yarn original_seq_len: int = 4096 rope_theta: float = 10000.0 rope_factor: float = 40 beta_fast: int = 32 beta_slow: int = 1 mscale: float = 1. class ParallelEmbedding(nn.Module): """ Embedding layer with parallelism support across distributed processes. Args: vocab_size (int): Vocabulary size. dim (int): Embedding dimension. """ def __init__(self, vocab_size: int, dim: int): super().__init__() self.vocab_size = vocab_size self.dim = dim assert vocab_size % world_size == 0, f"Vocabulary size must be divisible by world size (world_size={world_size})" self.part_vocab_size = (vocab_size // world_size) self.vocab_start_idx = rank * self.part_vocab_size self.vocab_end_idx = self.vocab_start_idx + self.part_vocab_size self.weight = nn.Parameter(torch.empty(self.part_vocab_size, self.dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass for parallel embedding layer. Args: x (torch.Tensor): Input tensor containing token indices. Returns: torch.Tensor: Embedded representations. Raises: ValueError: If `world_size` is not defined. """ if world_size > 1: mask = (x < self.vocab_start_idx) | (x >= self.vocab_end_idx) x = x - self.vocab_start_idx x[mask] = 0 y = F.embedding(x, self.weight) if world_size > 1: y[mask] = 0 dist.all_reduce(y) return y def linear(x: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, scale_fmt: Optional[str] = None) -> torch.Tensor: """ Applies a linear transformation to the incoming data: y = xA^T + b. This function supports specialized implementations based on quantization and tensor formats. Args: x (torch.Tensor): The input tensor. weight (torch.Tensor): The weight tensor. It may be quantized and requires dequantization for certain cases. bias (Optional[torch.Tensor]): The bias tensor to be added. Default is None. Returns: torch.Tensor: The result of the linear transformation, which may involve quantization-aware computations depending on the input parameters. Notes: - If `weight` is quantized (e.g., `element_size() == 1`), a dequantized version is used for computation. - If `gemm_impl == "bf16"`, dequantization and a `bf16` GEMM operation are applied. - For other cases, the function applies quantization to `x` and uses `fp8_gemm` for computation. """ if weight.element_size() > 1: return F.linear(x, weight, bias) elif gemm_impl == "bf16": weight = weight_dequant(weight, weight.scale) return F.linear(x, weight, bias) else: x, scale = act_quant(x, block_size, scale_fmt) y = fp8_gemm(x, scale, weight, weight.scale) if bias is not None: y += bias return y class Linear(nn.Module): """ Custom linear layer with support for quantized weights and optional bias. Args: in_features (int): Number of input features. out_features (int): Number of output features. bias (bool): Whether to include a bias term. Defaults to False. dtype (optional): Data type for the layer. Defaults to `torch.bfloat16`. """ dtype = torch.bfloat16 scale_fmt: Optional[str] = None def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.empty(out_features, in_features, dtype=dtype or Linear.dtype)) if self.weight.element_size() == 1: scale_out_features = (out_features + block_size - 1) // block_size scale_in_features = (in_features + block_size - 1) // block_size self.weight.scale = self.scale = nn.Parameter(torch.empty(scale_out_features, scale_in_features, dtype=torch.float32)) else: self.register_parameter("scale", None) if bias: self.bias = nn.Parameter(torch.empty(out_features)) else: self.register_parameter("bias", None) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass for the custom linear layer. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Transformed tensor after linear computation. """ return linear(x, self.weight, self.bias, self.scale_fmt) class ColumnParallelLinear(Linear): """ Linear layer with column parallelism, splitting output features across distributed processes. Args: in_features (int): Number of input features. out_features (int): Total number of output features. bias (bool): Whether to include a bias term. Defaults to False. dtype (optional): Data type for the layer. Defaults to `torch.bfloat16`. """ def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None): assert out_features % world_size == 0, f"Output features must be divisible by world size (world_size={world_size})" self.part_out_features = out_features // world_size super().__init__(in_features, self.part_out_features, bias, dtype) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass for column parallel linear layer. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Transformed tensor with column-parallel computation. """ y = linear(x, self.weight, self.bias) return y class RowParallelLinear(Linear): """ Linear layer with row parallelism, splitting input features across distributed processes. Args: in_features (int): Total number of input features. out_features (int): Number of output features. bias (bool): Whether to include a bias term. Defaults to False. dtype (optional): Data type for the layer. Defaults to `torch.bfloat16`. """ def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None): assert in_features % world_size == 0, f"Input features must be divisible by world size (world_size={world_size})" self.part_in_features = in_features // world_size super().__init__(self.part_in_features, out_features, bias, dtype) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass for row parallel linear layer. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Transformed tensor with row-parallel computation. """ y = linear(x, self.weight) if world_size > 1: dist.all_reduce(y) if self.bias is not None: y += self.bias return y class RMSNorm(nn.Module): """ Root Mean Square Layer Normalization (RMSNorm). Args: dim (int): Dimension of the input tensor. eps (float): Epsilon value for numerical stability. Defaults to 1e-6. """ def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.dim = dim self.eps = eps self.weight = nn.Parameter(torch.ones(dim)) def forward(self, x: torch.Tensor): """ Forward pass for RMSNorm. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Normalized tensor with the same shape as input. """ return F.rms_norm(x, (self.dim,), self.weight, self.eps) def precompute_freqs_cis(args: ModelArgs) -> torch.Tensor: """ Precomputes frequency-based complex exponential values for rotary positional embeddings. Args: args (ModelArgs): Model arguments containing positional embedding parameters. Returns: torch.Tensor: Precomputed complex exponential values for positional embeddings. """ dim = args.qk_rope_head_dim seqlen = args.max_seq_len beta_fast = args.beta_fast beta_slow = args.beta_slow base = args.rope_theta factor = args.rope_factor def find_correction_dim(num_rotations, dim, base, max_seq_len): """ Computes the correction dimension for a given number of rotations in the rotary positional embedding. Args: num_rotations (float): Number of rotations to compute the correction for. dim (int): Dimensionality of the embedding space. base (float): Base value for the exponential computation. max_seq_len (int): Maximum sequence length. Returns: float: The correction dimension based on the input parameters. """ return dim * math.log(max_seq_len / (num_rotations * 2 * math.pi)) / (2 * math.log(base)) def find_correction_range(low_rot, high_rot, dim, base, max_seq_len): """ Computes the range of correction dimensions for rotary positional embeddings. Args: low_rot (float): Lower bound for the number of rotations. high_rot (float): Upper bound for the number of rotations. dim (int): Dimensionality of the embedding space. base (float): Base value for the exponential computation. max_seq_len (int): Maximum sequence length. Returns: Tuple[int, int]: The range of correction dimensions (low, high), clamped to valid indices. """ low = math.floor(find_correction_dim(low_rot, dim, base, max_seq_len)) high = math.ceil(find_correction_dim(high_rot, dim, base, max_seq_len)) return max(low, 0), min(high, dim-1) def linear_ramp_factor(min, max, dim): """ Computes a linear ramp function used to smooth values between a minimum and maximum range. Args: min (float): Minimum value for the ramp function. max (float): Maximum value for the ramp function. dim (int): Dimensionality of the ramp tensor. Returns: torch.Tensor: A tensor of shape (dim,) with values linearly interpolated between 0 and 1, clamped to the range [0, 1]. """ if min == max: max += 0.001 linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min) ramp_func = torch.clamp(linear_func, 0, 1) return ramp_func freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) if seqlen > args.original_seq_len: low, high = find_correction_range(beta_fast, beta_slow, dim, base, args.original_seq_len) smooth = 1 - linear_ramp_factor(low, high, dim // 2) freqs = freqs / factor * (1 - smooth) + freqs * smooth t = torch.arange(seqlen) freqs = torch.outer(t, freqs) freqs_cis = torch.polar(torch.ones_like(freqs), freqs) return freqs_cis def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor: """ Applies rotary positional embeddings to the input tensor. Args: x (torch.Tensor): Input tensor with positional embeddings to be applied. freqs_cis (torch.Tensor): Precomputed complex exponential values for positional embeddings. Returns: torch.Tensor: Tensor with rotary embeddings applied. """ dtype = x.dtype x = torch.view_as_complex(x.float().view(*x.shape[:-1], -1, 2)) freqs_cis = freqs_cis.view(1, x.size(1), 1, x.size(-1)) y = torch.view_as_real(x * freqs_cis).flatten(3) return y.to(dtype) class MLA(nn.Module): """ Multi-Head Latent Attention (MLA) Layer. Attributes: dim (int): Dimensionality of the input features. n_heads (int): Number of attention heads. n_local_heads (int): Number of local attention heads for distributed systems. q_lora_rank (int): Rank for low-rank query projection. kv_lora_rank (int): Rank for low-rank key/value projection. qk_nope_head_dim (int): Dimensionality of non-positional query/key projections. qk_rope_head_dim (int): Dimensionality of rotary-positional query/key projections. qk_head_dim (int): Total dimensionality of query/key projections. v_head_dim (int): Dimensionality of value projections. softmax_scale (float): Scaling factor for softmax in attention computation. """ def __init__(self, args: ModelArgs): super().__init__() self.dim = args.dim self.n_heads = args.n_heads self.n_local_heads = args.n_heads // world_size self.q_lora_rank = args.q_lora_rank self.kv_lora_rank = args.kv_lora_rank self.qk_nope_head_dim = args.qk_nope_head_dim self.qk_rope_head_dim = args.qk_rope_head_dim self.qk_head_dim = args.qk_nope_head_dim + args.qk_rope_head_dim self.v_head_dim = args.v_head_dim if self.q_lora_rank == 0: self.wq = ColumnParallelLinear(self.dim, self.n_heads * self.qk_head_dim) else: self.wq_a = Linear(self.dim, self.q_lora_rank) self.q_norm = RMSNorm(self.q_lora_rank) self.wq_b = ColumnParallelLinear(self.q_lora_rank, self.n_heads * self.qk_head_dim) self.wkv_a = Linear(self.dim, self.kv_lora_rank + self.qk_rope_head_dim) self.kv_norm = RMSNorm(self.kv_lora_rank) self.wkv_b = ColumnParallelLinear(self.kv_lora_rank, self.n_heads * (self.qk_nope_head_dim + self.v_head_dim)) self.wo = RowParallelLinear(self.n_heads * self.v_head_dim, self.dim) self.softmax_scale = self.qk_head_dim ** -0.5 if args.max_seq_len > args.original_seq_len: mscale = 0.1 * args.mscale * math.log(args.rope_factor) + 1.0 self.softmax_scale = self.softmax_scale * mscale * mscale if attn_impl == "naive": self.register_buffer("k_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.qk_head_dim), persistent=False) self.register_buffer("v_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.v_head_dim), persistent=False) else: self.register_buffer("kv_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.kv_lora_rank), persistent=False) self.register_buffer("pe_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.qk_rope_head_dim), persistent=False) def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]): """ Forward pass for the Multi-Head Latent Attention (MLA) Layer. Args: x (torch.Tensor): Input tensor of shape (batch_size, seq_len, dim). start_pos (int): Starting position in the sequence for caching. freqs_cis (torch.Tensor): Precomputed complex exponential values for rotary embeddings. mask (Optional[torch.Tensor]): Mask tensor to exclude certain positions from attention. Returns: torch.Tensor: Output tensor with the same shape as the input. """ bsz, seqlen, _ = x.size() end_pos = start_pos + seqlen if self.q_lora_rank == 0: q = self.wq(x) else: q = self.wq_b(self.q_norm(self.wq_a(x))) q = q.view(bsz, seqlen, self.n_local_heads, self.qk_head_dim) q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) q_pe = apply_rotary_emb(q_pe, freqs_cis) kv = self.wkv_a(x) kv, k_pe = torch.split(kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) k_pe = apply_rotary_emb(k_pe.unsqueeze(2), freqs_cis) if attn_impl == "naive": q = torch.cat([q_nope, q_pe], dim=-1) kv = self.wkv_b(self.kv_norm(kv)) kv = kv.view(bsz, seqlen, self.n_local_heads, self.qk_nope_head_dim + self.v_head_dim) k_nope, v = torch.split(kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1) k = torch.cat([k_nope, k_pe.expand(-1, -1, self.n_local_heads, -1)], dim=-1) self.k_cache[:bsz, start_pos:end_pos] = k self.v_cache[:bsz, start_pos:end_pos] = v scores = torch.einsum("bshd,bthd->bsht", q, self.k_cache[:bsz, :end_pos]) * self.softmax_scale else: wkv_b = self.wkv_b.weight if self.wkv_b.scale is None else weight_dequant(self.wkv_b.weight, self.wkv_b.scale, block_size) wkv_b = wkv_b.view(self.n_local_heads, -1, self.kv_lora_rank) q_nope = torch.einsum("bshd,hdc->bshc", q_nope, wkv_b[:, :self.qk_nope_head_dim]) self.kv_cache[:bsz, start_pos:end_pos] = self.kv_norm(kv) self.pe_cache[:bsz, start_pos:end_pos] = k_pe.squeeze(2) scores = (torch.einsum("bshc,btc->bsht", q_nope, self.kv_cache[:bsz, :end_pos]) + torch.einsum("bshr,btr->bsht", q_pe, self.pe_cache[:bsz, :end_pos])) * self.softmax_scale if mask is not None: scores += mask.unsqueeze(1) scores = scores.softmax(dim=-1, dtype=torch.float32).type_as(x) if attn_impl == "naive": x = torch.einsum("bsht,bthd->bshd", scores, self.v_cache[:bsz, :end_pos]) else: x = torch.einsum("bsht,btc->bshc", scores, self.kv_cache[:bsz, :end_pos]) x = torch.einsum("bshc,hdc->bshd", x, wkv_b[:, -self.v_head_dim:]) x = self.wo(x.flatten(2)) return x class MLP(nn.Module): """ Multi-Layer Perceptron (MLP) used as a feed-forward layer. Attributes: w1 (nn.Module): Linear layer for input-to-hidden transformation. w2 (nn.Module): Linear layer for hidden-to-output transformation. w3 (nn.Module): Additional linear layer for feature transformation. """ def __init__(self, dim: int, inter_dim: int): """ Initializes the MLP layer. Args: dim (int): Input and output dimensionality. inter_dim (int): Hidden layer dimensionality. """ super().__init__() self.w1 = ColumnParallelLinear(dim, inter_dim) self.w2 = RowParallelLinear(inter_dim, dim) self.w3 = ColumnParallelLinear(dim, inter_dim) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass for the MLP layer. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Output tensor after MLP computation. """ return self.w2(F.silu(self.w1(x)) * self.w3(x)) class Gate(nn.Module): """ Gating mechanism for routing inputs in a mixture-of-experts (MoE) model. Attributes: dim (int): Dimensionality of input features. topk (int): Number of top experts activated for each input. n_groups (int): Number of groups for routing. topk_groups (int): Number of groups to route inputs to. score_func (str): Scoring function ('softmax' or 'sigmoid'). route_scale (float): Scaling factor for routing weights. weight (torch.nn.Parameter): Learnable weights for the gate. bias (Optional[torch.nn.Parameter]): Optional bias term for the gate. """ def __init__(self, args: ModelArgs): """ Initializes the Gate module. Args: args (ModelArgs): Model arguments containing gating parameters. """ super().__init__() self.dim = args.dim self.topk = args.n_activated_experts self.n_groups = args.n_expert_groups self.topk_groups = args.n_limited_groups self.score_func = args.score_func self.route_scale = args.route_scale self.weight = nn.Parameter(torch.empty(args.n_routed_experts, args.dim)) self.bias = nn.Parameter(torch.empty(args.n_routed_experts, dtype=torch.float32)) if self.dim == 7168 else None def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Forward pass for the gating mechanism. Args: x (torch.Tensor): Input tensor. Returns: Tuple[torch.Tensor, torch.Tensor]: Routing weights and selected expert indices. """ scores = linear(x, self.weight) if self.score_func == "softmax": scores = scores.softmax(dim=-1, dtype=torch.float32) else: scores = scores.sigmoid() original_scores = scores if self.bias is not None: scores = scores + self.bias if self.n_groups > 1: scores = scores.view(x.size(0), self.n_groups, -1) if self.bias is None: group_scores = scores.amax(dim=-1) else: group_scores = scores.topk(2, dim=-1)[0].sum(dim=-1) indices = group_scores.topk(self.topk_groups, dim=-1)[1] mask = scores.new_ones(x.size(0), self.n_groups, dtype=bool).scatter_(1, indices, False) scores = scores.masked_fill_(mask.unsqueeze(-1), float("-inf")).flatten(1) indices = torch.topk(scores, self.topk, dim=-1)[1] weights = original_scores.gather(1, indices) if self.score_func == "sigmoid": weights /= weights.sum(dim=-1, keepdim=True) weights *= self.route_scale return weights.type_as(x), indices class Expert(nn.Module): """ Expert layer for Mixture-of-Experts (MoE) models. Attributes: w1 (nn.Module): Linear layer for input-to-hidden transformation. w2 (nn.Module): Linear layer for hidden-to-output transformation. w3 (nn.Module): Additional linear layer for feature transformation. """ def __init__(self, dim: int, inter_dim: int): """ Initializes the Expert layer. Args: dim (int): Input and output dimensionality. inter_dim (int): Hidden layer dimensionality. """ super().__init__() self.w1 = Linear(dim, inter_dim) self.w2 = Linear(inter_dim, dim) self.w3 = Linear(dim, inter_dim) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass for the Expert layer. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Output tensor after expert computation. """ return self.w2(F.silu(self.w1(x)) * self.w3(x)) class MoE(nn.Module): """ Mixture-of-Experts (MoE) module. Attributes: dim (int): Dimensionality of input features. n_routed_experts (int): Total number of experts in the model. n_local_experts (int): Number of experts handled locally in distributed systems. n_activated_experts (int): Number of experts activated for each input. gate (nn.Module): Gating mechanism to route inputs to experts. experts (nn.ModuleList): List of expert modules. shared_experts (nn.Module): Shared experts applied to all inputs. """ def __init__(self, args: ModelArgs): """ Initializes the MoE module. Args: args (ModelArgs): Model arguments containing MoE parameters. """ super().__init__() self.dim = args.dim assert args.n_routed_experts % world_size == 0, f"Number of experts must be divisible by world size (world_size={world_size})" self.n_routed_experts = args.n_routed_experts self.n_local_experts = args.n_routed_experts // world_size self.n_activated_experts = args.n_activated_experts self.experts_start_idx = rank * self.n_local_experts self.experts_end_idx = self.experts_start_idx + self.n_local_experts self.gate = Gate(args) self.experts = nn.ModuleList([Expert(args.dim, args.moe_inter_dim) if self.experts_start_idx <= i < self.experts_end_idx else None for i in range(self.n_routed_experts)]) self.shared_experts = MLP(args.dim, args.n_shared_experts * args.moe_inter_dim) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass for the MoE module. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Output tensor after expert routing and computation. """ shape = x.size() x = x.view(-1, self.dim) weights, indices = self.gate(x) y = torch.zeros_like(x) counts = torch.bincount(indices.flatten(), minlength=self.n_routed_experts).tolist() for i in range(self.experts_start_idx, self.experts_end_idx): if counts[i] == 0: continue expert = self.experts[i] idx, top = torch.where(indices == i) y[idx] += expert(x[idx]) * weights[idx, top, None] z = self.shared_experts(x) if world_size > 1: dist.all_reduce(y) return (y + z).view(shape) class Block(nn.Module): """ Transformer block combining attention and feed-forward layers. Attributes: attn (nn.Module): Attention layer (MLA). ffn (nn.Module): Feed-forward network (MLP or MoE). attn_norm (nn.Module): Layer normalization for attention. ffn_norm (nn.Module): Layer normalization for feed-forward network. """ def __init__(self, layer_id: int, args: ModelArgs): """ Initializes the Transformer block. Args: layer_id (int): Layer index in the transformer. args (ModelArgs): Model arguments containing block parameters. """ super().__init__() self.attn = MLA(args) self.ffn = MLP(args.dim, args.inter_dim) if layer_id < args.n_dense_layers else MoE(args) self.attn_norm = RMSNorm(args.dim) self.ffn_norm = RMSNorm(args.dim) def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]) -> torch.Tensor: """ Forward pass for the Transformer block. Args: x (torch.Tensor): Input tensor. start_pos (int): Starting position in the sequence. freqs_cis (torch.Tensor): Precomputed complex exponential values for rotary embeddings. mask (Optional[torch.Tensor]): Mask tensor to exclude certain positions from attention. Returns: torch.Tensor: Output tensor after block computation. """ x = x + self.attn(self.attn_norm(x), start_pos, freqs_cis, mask) x = x + self.ffn(self.ffn_norm(x)) return x class Transformer(nn.Module): """ Transformer model with positional embeddings, multiple layers, and output projection. Attributes: max_seq_len (int): Maximum sequence length for the transformer. embed (nn.Module): Embedding layer for input tokens. layers (torch.nn.ModuleList): List of transformer blocks. norm (nn.Module): Layer normalization applied after all blocks. head (nn.Module): Output projection layer mapping to vocabulary size. freqs_cis (torch.Tensor): Precomputed complex exponential values for rotary embeddings. """ def __init__(self, args: ModelArgs): """ Initializes the Transformer model. Args: args (ModelArgs): Model arguments containing transformer parameters. """ global world_size, rank world_size = dist.get_world_size() if dist.is_initialized() else 1 rank = dist.get_rank() if dist.is_initialized() else 0 Linear.dtype = torch.float8_e4m3fn if args.dtype == "fp8" else torch.bfloat16 Linear.scale_fmt = args.scale_fmt super().__init__() self.max_seq_len = args.max_seq_len self.embed = ParallelEmbedding(args.vocab_size, args.dim) self.layers = torch.nn.ModuleList() for layer_id in range(args.n_layers): self.layers.append(Block(layer_id, args)) self.norm = RMSNorm(args.dim) self.head = ColumnParallelLinear(args.dim, args.vocab_size, dtype=torch.get_default_dtype()) self.register_buffer("freqs_cis", precompute_freqs_cis(args), persistent=False) @torch.inference_mode() def forward(self, tokens: torch.Tensor, start_pos: int = 0): """ Forward pass for the Transformer model. Args: tokens (torch.Tensor): Input tensor of token IDs with shape (batch_size, seq_len). start_pos (int, optional): Starting position in the sequence for rotary embeddings. Defaults to 0. Returns: torch.Tensor: Logits tensor of shape (batch_size, vocab_size). """ seqlen = tokens.size(1) h = self.embed(tokens) freqs_cis = self.freqs_cis[start_pos:start_pos+seqlen] mask = None if seqlen > 1: mask = torch.full((seqlen, seqlen), float("-inf"), device=tokens.device).triu_(1) for layer in self.layers: h = layer(h, start_pos, freqs_cis, mask) h = self.norm(h)[:, -1] logits = self.head(h) if world_size > 1: all_logits = [torch.empty_like(logits) for _ in range(world_size)] dist.all_gather(all_logits, logits) logits = torch.cat(all_logits, dim=-1) return logits if __name__ == "__main__": torch.set_default_dtype(torch.bfloat16) torch.set_default_device("cuda") torch.manual_seed(0) args = ModelArgs()
python
MIT
9b4e9788e4a3a731f7567338ed15d3ec549ce03b
2026-01-04T14:38:15.450976Z
true
deepseek-ai/DeepSeek-V3
https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/generate.py
inference/generate.py
import os import json from argparse import ArgumentParser from typing import List import torch import torch.distributed as dist from transformers import AutoTokenizer from safetensors.torch import load_model from model import Transformer, ModelArgs def sample(logits, temperature: float = 1.0): """ Samples a token from the logits using temperature scaling. Args: logits (torch.Tensor): The logits tensor for token predictions. temperature (float, optional): Temperature for scaling logits. Defaults to 1.0. Returns: torch.Tensor: The sampled token. """ logits = logits / max(temperature, 1e-5) probs = torch.softmax(logits, dim=-1) return probs.div_(torch.empty_like(probs).exponential_(1)).argmax(dim=-1) @torch.inference_mode() def generate( model: Transformer, prompt_tokens: List[List[int]], max_new_tokens: int, eos_id: int, temperature: float = 1.0 ) -> List[List[int]]: """ Generates new tokens based on the given prompt tokens using the specified model. Args: model (Transformer): The transformer model used for token generation. prompt_tokens (List[List[int]]): A list of lists containing the prompt tokens for each sequence. max_new_tokens (int): The maximum number of new tokens to generate. eos_id (int): The end-of-sequence token ID. temperature (float, optional): The temperature value for sampling. Defaults to 1.0. Returns: List[List[int]]: A list of lists containing the generated tokens for each sequence. """ prompt_lens = [len(t) for t in prompt_tokens] assert max(prompt_lens) <= model.max_seq_len, f"Prompt length exceeds model maximum sequence length (max_seq_len={model.max_seq_len})" total_len = min(model.max_seq_len, max_new_tokens + max(prompt_lens)) tokens = torch.full((len(prompt_tokens), total_len), -1, dtype=torch.long, device="cuda") for i, t in enumerate(prompt_tokens): tokens[i, :len(t)] = torch.tensor(t, dtype=torch.long, device="cuda") prev_pos = 0 finished = torch.tensor([False] * len(prompt_tokens), device="cuda") prompt_mask = tokens != -1 for cur_pos in range(min(prompt_lens), total_len): logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos) if temperature > 0: next_token = sample(logits, temperature) else: next_token = logits.argmax(dim=-1) next_token = torch.where(prompt_mask[:, cur_pos], tokens[:, cur_pos], next_token) tokens[:, cur_pos] = next_token finished |= torch.logical_and(~prompt_mask[:, cur_pos], next_token == eos_id) prev_pos = cur_pos if finished.all(): break completion_tokens = [] for i, toks in enumerate(tokens.tolist()): toks = toks[prompt_lens[i]:prompt_lens[i]+max_new_tokens] if eos_id in toks: toks = toks[:toks.index(eos_id)] completion_tokens.append(toks) return completion_tokens def main( ckpt_path: str, config: str, input_file: str = "", interactive: bool = True, max_new_tokens: int = 100, temperature: float = 1.0, ) -> None: """ Main function to load the model and perform interactive or batch text generation. Args: ckpt_path (str): Path to the model checkpoint directory. config (str): Path to the model configuration file. input_file (str, optional): Path to a file containing input prompts. Defaults to "". interactive (bool, optional): Whether to run in interactive mode. Defaults to True. max_new_tokens (int, optional): Maximum number of new tokens to generate. Defaults to 100. temperature (float, optional): Temperature for sampling. Defaults to 1.0. """ world_size = int(os.getenv("WORLD_SIZE", "1")) rank = int(os.getenv("RANK", "0")) local_rank = int(os.getenv("LOCAL_RANK", "0")) if world_size > 1: dist.init_process_group("nccl") global print if rank != 0: print = lambda *_, **__: None torch.cuda.set_device(local_rank) torch.set_default_dtype(torch.bfloat16) torch.set_num_threads(8) torch.manual_seed(965) with open(config) as f: args = ModelArgs(**json.load(f)) print(args) with torch.device("cuda"): model = Transformer(args) tokenizer = AutoTokenizer.from_pretrained(ckpt_path) tokenizer.decode(generate(model, [tokenizer.encode("DeepSeek")], 2, -1, 1.)[0]) load_model(model, os.path.join(ckpt_path, f"model{rank}-mp{world_size}.safetensors")) if interactive: messages = [] while True: if world_size == 1: prompt = input(">>> ") elif rank == 0: prompt = input(">>> ") objects = [prompt] dist.broadcast_object_list(objects, 0) else: objects = [None] dist.broadcast_object_list(objects, 0) prompt = objects[0] if prompt == "/exit": break elif prompt == "/clear": messages.clear() continue messages.append({"role": "user", "content": prompt}) prompt_tokens = tokenizer.apply_chat_template(messages, add_generation_prompt=True) completion_tokens = generate(model, [prompt_tokens], max_new_tokens, tokenizer.eos_token_id, temperature) completion = tokenizer.decode(completion_tokens[0], skip_special_tokens=True) print(completion) messages.append({"role": "assistant", "content": completion}) else: with open(input_file) as f: prompts = [line.strip() for line in f.readlines()] assert len(prompts) <= args.max_batch_size, f"Number of prompts exceeds maximum batch size ({args.max_batch_size})" prompt_tokens = [tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True) for prompt in prompts] completion_tokens = generate(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature) completions = tokenizer.batch_decode(completion_tokens, skip_special_tokens=True) for prompt, completion in zip(prompts, completions): print("Prompt:", prompt) print("Completion:", completion) print() if world_size > 1: dist.destroy_process_group() if __name__ == "__main__": """ Command-line interface for distributed text generation. Arguments: --ckpt-path (str): Path to the model checkpoint directory. --config (str): Path to the model configuration file. --input-file (str, optional): File containing prompts for batch processing. --interactive (bool, optional): Enable interactive mode for generating text. --max-new-tokens (int, optional): Maximum number of new tokens to generate. Defaults to 200. --temperature (float, optional): Temperature for sampling. Defaults to 0.2. Raises: AssertionError: If neither input-file nor interactive mode is specified. """ parser = ArgumentParser() parser.add_argument("--ckpt-path", type=str, required=True) parser.add_argument("--config", type=str, required=True) parser.add_argument("--input-file", type=str, default="") parser.add_argument("--interactive", action="store_true") parser.add_argument("--max-new-tokens", type=int, default=200) parser.add_argument("--temperature", type=float, default=0.2) args = parser.parse_args() assert args.input_file or args.interactive, "Either input-file or interactive mode must be specified" main(args.ckpt_path, args.config, args.input_file, args.interactive, args.max_new_tokens, args.temperature)
python
MIT
9b4e9788e4a3a731f7567338ed15d3ec549ce03b
2026-01-04T14:38:15.450976Z
false
deepseek-ai/DeepSeek-V3
https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/kernel.py
inference/kernel.py
from typing import Tuple, Optional import torch import triton import triton.language as tl from triton import Config @triton.jit def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr, scale_fmt: tl.constexpr): """ Quantizes the input tensor `x_ptr` and stores the result in `y_ptr` and the scaling factor in `s_ptr`. Args: x_ptr (triton.Pointer): Pointer to the input tensor. y_ptr (triton.Pointer): Pointer to the output tensor where quantized values will be stored. s_ptr (triton.Pointer): Pointer to the output tensor where scaling factors will be stored. BLOCK_SIZE (tl.constexpr): The size of the block to be processed by each program instance. Returns: None """ pid = tl.program_id(axis=0) offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) x = tl.load(x_ptr + offs).to(tl.float32) amax = tl.max(tl.abs(x)) # reduction amax = tl.maximum(amax, 1e-4) # clamp to 1e-4 s = amax / 448. if scale_fmt == "ue8m0": exp = tl.math.ceil(tl.math.log2(s)) s = tl.math.exp2(exp) y = x / s y = y.to(y_ptr.dtype.element_ty) tl.store(y_ptr + offs, y) tl.store(s_ptr + pid, s) def act_quant(x: torch.Tensor, block_size: int = 128, scale_fmt: Optional[str] = None) -> Tuple[torch.Tensor, torch.Tensor]: """ Quantizes the input tensor `x` using block-wise quantization. Args: x (torch.Tensor): The input tensor to be quantized. Must be contiguous and its last dimension size must be divisible by `block_size`. block_size (int, optional): The size of the blocks to be used for quantization. Default is 128. scale_fmt (Optional[str], optional): The format of the scale. Default is None. Returns: Tuple[torch.Tensor, torch.Tensor]: A tuple containing: - The quantized tensor with dtype `torch.float8_e4m3fn`. - A tensor of scaling factors with dtype `torch.float32`. """ assert x.is_contiguous(), 'Input tensor must be contiguous' assert x.size(-1) % block_size == 0, f'Last dimension size must be divisible by block_size (block_size={block_size})' y = torch.empty_like(x, dtype=torch.float8_e4m3fn) s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32) grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']), ) act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size, scale_fmt=scale_fmt) return y, s @triton.jit def weight_dequant_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr): """ Dequantizes weights using the provided scaling factors and stores the result. Args: x_ptr (tl.pointer): Pointer to the quantized weights. s_ptr (tl.pointer): Pointer to the scaling factors. y_ptr (tl.pointer): Pointer to the output buffer for dequantized weights. M (int): Number of rows in the weight matrix. N (int): Number of columns in the weight matrix. BLOCK_SIZE (tl.constexpr): Size of the block for tiling. Returns: None """ pid_m = tl.program_id(axis=0) pid_n = tl.program_id(axis=1) n = tl.cdiv(N, BLOCK_SIZE) offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) offs = offs_m[:, None] * N + offs_n[None, :] mask = (offs_m[:, None] < M) & (offs_n[None, :] < N) x = tl.load(x_ptr + offs, mask=mask).to(tl.float32) s = tl.load(s_ptr + pid_m * n + pid_n) y = x * s tl.store(y_ptr + offs, y, mask=mask) def weight_dequant(x: torch.Tensor, s: torch.Tensor, block_size: int = 128) -> torch.Tensor: """ Dequantizes the given weight tensor using the provided scale tensor. Args: x (torch.Tensor): The quantized weight tensor of shape (M, N). s (torch.Tensor): The scale tensor of shape (M//block_size, N//block_size). block_size (int, optional): The block size to use for dequantization. Defaults to 128. Returns: torch.Tensor: The dequantized weight tensor of the same shape as `x`. Raises: AssertionError: If `x` or `s` are not contiguous or if their dimensions are not 2. """ assert x.is_contiguous() and s.is_contiguous(), 'Input tensors must be contiguous' assert x.dim() == 2 and s.dim() == 2, 'Input tensors must have 2 dimensions' M, N = x.size() y = torch.empty_like(x, dtype=torch.get_default_dtype()) grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE'])) weight_dequant_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size) return y fp8_gemm_configs = [ Config({'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': 128}, num_stages=num_stages, num_warps=8) for block_m in [16, 32, 64] for block_n in [32, 64, 128] for num_stages in [3, 4, 5, 6] ] @triton.autotune(configs=fp8_gemm_configs, key=['N', 'K']) @triton.jit def fp8_gemm_kernel(a_ptr, b_ptr, c_ptr, a_s_ptr, b_s_ptr, M, N: tl.constexpr, K: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr): """ Performs a matrix multiplication operation on FP8 matrices with scaling factors. Args: a_ptr (tl.tensor): Pointer to the first input matrix A. b_ptr (tl.tensor): Pointer to the second input matrix B. c_ptr (tl.tensor): Pointer to the output matrix C. a_s_ptr (tl.tensor): Pointer to the scaling factors for matrix A. b_s_ptr (tl.tensor): Pointer to the scaling factors for matrix B. M (int): Number of rows in matrix A and C. N (tl.constexpr): Number of columns in matrix B and C. K (tl.constexpr): Number of columns in matrix A and rows in matrix B. BLOCK_SIZE_M (tl.constexpr): Block size for the M dimension. BLOCK_SIZE_N (tl.constexpr): Block size for the N dimension. BLOCK_SIZE_K (tl.constexpr): Block size for the K dimension. Returns: None """ pid_m = tl.program_id(axis=0) pid_n = tl.program_id(axis=1) k = tl.cdiv(K, BLOCK_SIZE_K) offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + offs_m[:, None] * K + offs_k[None, :] b_ptrs = b_ptr + offs_n[None, :] * K + offs_k[:, None] a_s_ptrs = a_s_ptr + offs_m * k b_s_ptrs = b_s_ptr + (offs_n // BLOCK_SIZE_K) * k accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for i in range(k): a = tl.load(a_ptrs, mask=offs_k[None, :] < K - i * BLOCK_SIZE_K, other=0.0) b = tl.load(b_ptrs, mask=offs_k[:, None] < K - i * BLOCK_SIZE_K, other=0.0) a_s = tl.load(a_s_ptrs) b_s = tl.load(b_s_ptrs) accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :] a_ptrs += BLOCK_SIZE_K b_ptrs += BLOCK_SIZE_K a_s_ptrs += 1 b_s_ptrs += 1 c = accumulator.to(c_ptr.dtype.element_ty) offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = c_ptr + offs_m[:, None] * N + offs_n[None, :] mask = (offs_m[:, None] < M) & (offs_n[None, :] < N) tl.store(c_ptrs, c, mask=mask) def fp8_gemm(a: torch.Tensor, a_s: torch.Tensor, b: torch.Tensor, b_s: torch.Tensor): """ Perform a matrix multiplication using FP8 precision. Args: a (torch.Tensor): The first input matrix, must be contiguous. a_s (torch.Tensor): The scaling factor for the first input matrix, must be contiguous. b (torch.Tensor): The second input matrix, must be contiguous. b_s (torch.Tensor): The scaling factor for the second input matrix, must be contiguous. Returns: torch.Tensor: The result of the matrix multiplication. """ assert a.is_contiguous() and b.is_contiguous(), 'Input tensors must be contiguous' assert a_s.is_contiguous() and b_s.is_contiguous(), 'Scaling factor tensors must be contiguous' K = a.size(-1) M = a.numel() // K N = b.size(0) c = a.new_empty(*a.size()[:-1], N, dtype=torch.get_default_dtype()) grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']), triton.cdiv(N, META['BLOCK_SIZE_N'])) fp8_gemm_kernel[grid](a, b, c, a_s, b_s, M, N, K) return c
python
MIT
9b4e9788e4a3a731f7567338ed15d3ec549ce03b
2026-01-04T14:38:15.450976Z
false
deepseek-ai/DeepSeek-V3
https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/fp8_cast_bf16.py
inference/fp8_cast_bf16.py
import os import json from argparse import ArgumentParser from glob import glob from tqdm import tqdm import torch from safetensors.torch import load_file, save_file from kernel import weight_dequant def main(fp8_path, bf16_path): """ Converts FP8 weights to BF16 and saves the converted weights. This function reads FP8 weights from the specified directory, converts them to BF16, and saves the converted weights to another specified directory. It also updates the model index file to reflect the changes. Args: fp8_path (str): The path to the directory containing the FP8 weights and model index file. bf16_path (str): The path to the directory where the converted BF16 weights will be saved. Raises: KeyError: If a required scale_inv tensor is missing for a weight. Notes: - The function assumes that the FP8 weights are stored in safetensor files. - The function caches loaded safetensor files to optimize memory usage. - The function updates the model index file to remove references to scale_inv tensors. """ torch.set_default_dtype(torch.bfloat16) os.makedirs(bf16_path, exist_ok=True) model_index_file = os.path.join(fp8_path, "model.safetensors.index.json") with open(model_index_file, "r") as f: model_index = json.load(f) weight_map = model_index["weight_map"] # Cache for loaded safetensor files loaded_files = {} fp8_weight_names = [] # Helper function to get tensor from the correct file def get_tensor(tensor_name): """ Retrieves a tensor from the cached safetensor files or loads it from disk if not cached. Args: tensor_name (str): The name of the tensor to retrieve. Returns: torch.Tensor: The retrieved tensor. Raises: KeyError: If the tensor does not exist in the safetensor file. """ file_name = weight_map[tensor_name] if file_name not in loaded_files: file_path = os.path.join(fp8_path, file_name) loaded_files[file_name] = load_file(file_path, device="cuda") return loaded_files[file_name][tensor_name] safetensor_files = list(glob(os.path.join(fp8_path, "*.safetensors"))) safetensor_files.sort() for safetensor_file in tqdm(safetensor_files): file_name = os.path.basename(safetensor_file) current_state_dict = load_file(safetensor_file, device="cuda") loaded_files[file_name] = current_state_dict new_state_dict = {} for weight_name, weight in current_state_dict.items(): if weight_name.endswith("_scale_inv"): continue elif weight.element_size() == 1: # FP8 weight scale_inv_name = f"{weight_name}_scale_inv" try: # Get scale_inv from the correct file scale_inv = get_tensor(scale_inv_name) fp8_weight_names.append(weight_name) new_state_dict[weight_name] = weight_dequant(weight, scale_inv) except KeyError: print(f"Warning: Missing scale_inv tensor for {weight_name}, skipping conversion") new_state_dict[weight_name] = weight else: new_state_dict[weight_name] = weight new_safetensor_file = os.path.join(bf16_path, file_name) save_file(new_state_dict, new_safetensor_file) # Memory management: keep only the 2 most recently used files if len(loaded_files) > 2: oldest_file = next(iter(loaded_files)) del loaded_files[oldest_file] torch.cuda.empty_cache() # Update model index new_model_index_file = os.path.join(bf16_path, "model.safetensors.index.json") for weight_name in fp8_weight_names: scale_inv_name = f"{weight_name}_scale_inv" if scale_inv_name in weight_map: weight_map.pop(scale_inv_name) with open(new_model_index_file, "w") as f: json.dump({"metadata": {}, "weight_map": weight_map}, f, indent=2) if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("--input-fp8-hf-path", type=str, required=True) parser.add_argument("--output-bf16-hf-path", type=str, required=True) args = parser.parse_args() main(args.input_fp8_hf_path, args.output_bf16_hf_path)
python
MIT
9b4e9788e4a3a731f7567338ed15d3ec549ce03b
2026-01-04T14:38:15.450976Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-mcp/src/markitdown_mcp/__main__.py
packages/markitdown-mcp/src/markitdown_mcp/__main__.py
import contextlib import sys import os from collections.abc import AsyncIterator from mcp.server.fastmcp import FastMCP from starlette.applications import Starlette from mcp.server.sse import SseServerTransport from starlette.requests import Request from starlette.routing import Mount, Route from starlette.types import Receive, Scope, Send from mcp.server import Server from mcp.server.streamable_http_manager import StreamableHTTPSessionManager from markitdown import MarkItDown import uvicorn # Initialize FastMCP server for MarkItDown (SSE) mcp = FastMCP("markitdown") @mcp.tool() async def convert_to_markdown(uri: str) -> str: """Convert a resource described by an http:, https:, file: or data: URI to markdown""" return MarkItDown(enable_plugins=check_plugins_enabled()).convert_uri(uri).markdown def check_plugins_enabled() -> bool: return os.getenv("MARKITDOWN_ENABLE_PLUGINS", "false").strip().lower() in ( "true", "1", "yes", ) def create_starlette_app(mcp_server: Server, *, debug: bool = False) -> Starlette: sse = SseServerTransport("/messages/") session_manager = StreamableHTTPSessionManager( app=mcp_server, event_store=None, json_response=True, stateless=True, ) async def handle_sse(request: Request) -> None: async with sse.connect_sse( request.scope, request.receive, request._send, ) as (read_stream, write_stream): await mcp_server.run( read_stream, write_stream, mcp_server.create_initialization_options(), ) async def handle_streamable_http( scope: Scope, receive: Receive, send: Send ) -> None: await session_manager.handle_request(scope, receive, send) @contextlib.asynccontextmanager async def lifespan(app: Starlette) -> AsyncIterator[None]: """Context manager for session manager.""" async with session_manager.run(): print("Application started with StreamableHTTP session manager!") try: yield finally: print("Application shutting down...") return Starlette( debug=debug, routes=[ Route("/sse", endpoint=handle_sse), Mount("/mcp", app=handle_streamable_http), Mount("/messages/", app=sse.handle_post_message), ], lifespan=lifespan, ) # Main entry point def main(): import argparse mcp_server = mcp._mcp_server parser = argparse.ArgumentParser(description="Run a MarkItDown MCP server") parser.add_argument( "--http", action="store_true", help="Run the server with Streamable HTTP and SSE transport rather than STDIO (default: False)", ) parser.add_argument( "--sse", action="store_true", help="(Deprecated) An alias for --http (default: False)", ) parser.add_argument( "--host", default=None, help="Host to bind to (default: 127.0.0.1)" ) parser.add_argument( "--port", type=int, default=None, help="Port to listen on (default: 3001)" ) args = parser.parse_args() use_http = args.http or args.sse if not use_http and (args.host or args.port): parser.error( "Host and port arguments are only valid when using streamable HTTP or SSE transport (see: --http)." ) sys.exit(1) if use_http: starlette_app = create_starlette_app(mcp_server, debug=True) uvicorn.run( starlette_app, host=args.host if args.host else "127.0.0.1", port=args.port if args.port else 3001, ) else: mcp.run() if __name__ == "__main__": main()
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-mcp/src/markitdown_mcp/__about__.py
packages/markitdown-mcp/src/markitdown_mcp/__about__.py
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com> # # SPDX-License-Identifier: MIT __version__ = "0.0.1a4"
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-mcp/src/markitdown_mcp/__init__.py
packages/markitdown-mcp/src/markitdown_mcp/__init__.py
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com> # # SPDX-License-Identifier: MIT from .__about__ import __version__ __all__ = [ "__version__", ]
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-mcp/tests/__init__.py
packages/markitdown-mcp/tests/__init__.py
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com> # # SPDX-License-Identifier: MIT
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_exceptions.py
packages/markitdown/src/markitdown/_exceptions.py
from typing import Optional, List, Any MISSING_DEPENDENCY_MESSAGE = """{converter} recognized the input as a potential {extension} file, but the dependencies needed to read {extension} files have not been installed. To resolve this error, include the optional dependency [{feature}] or [all] when installing MarkItDown. For example: * pip install markitdown[{feature}] * pip install markitdown[all] * pip install markitdown[{feature}, ...] * etc.""" class MarkItDownException(Exception): """ Base exception class for MarkItDown. """ pass class MissingDependencyException(MarkItDownException): """ Converters shipped with MarkItDown may depend on optional dependencies. This exception is thrown when a converter's convert() method is called, but the required dependency is not installed. This is not necessarily a fatal error, as the converter will simply be skipped (an error will bubble up only if no other suitable converter is found). Error messages should clearly indicate which dependency is missing. """ pass class UnsupportedFormatException(MarkItDownException): """ Thrown when no suitable converter was found for the given file. """ pass class FailedConversionAttempt(object): """ Represents an a single attempt to convert a file. """ def __init__(self, converter: Any, exc_info: Optional[tuple] = None): self.converter = converter self.exc_info = exc_info class FileConversionException(MarkItDownException): """ Thrown when a suitable converter was found, but the conversion process fails for any reason. """ def __init__( self, message: Optional[str] = None, attempts: Optional[List[FailedConversionAttempt]] = None, ): self.attempts = attempts if message is None: if attempts is None: message = "File conversion failed." else: message = f"File conversion failed after {len(attempts)} attempts:\n" for attempt in attempts: if attempt.exc_info is None: message += f" - {type(attempt.converter).__name__} provided no execution info." else: message += f" - {type(attempt.converter).__name__} threw {attempt.exc_info[0].__name__} with message: {attempt.exc_info[1]}\n" super().__init__(message)
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_uri_utils.py
packages/markitdown/src/markitdown/_uri_utils.py
import base64 import os from typing import Tuple, Dict from urllib.request import url2pathname from urllib.parse import urlparse, unquote_to_bytes def file_uri_to_path(file_uri: str) -> Tuple[str | None, str]: """Convert a file URI to a local file path""" parsed = urlparse(file_uri) if parsed.scheme != "file": raise ValueError(f"Not a file URL: {file_uri}") netloc = parsed.netloc if parsed.netloc else None path = os.path.abspath(url2pathname(parsed.path)) return netloc, path def parse_data_uri(uri: str) -> Tuple[str | None, Dict[str, str], bytes]: if not uri.startswith("data:"): raise ValueError("Not a data URI") header, _, data = uri.partition(",") if not _: raise ValueError("Malformed data URI, missing ',' separator") meta = header[5:] # Strip 'data:' parts = meta.split(";") is_base64 = False # Ends with base64? if parts[-1] == "base64": parts.pop() is_base64 = True mime_type = None # Normally this would default to text/plain but we won't assume if len(parts) and len(parts[0]) > 0: # First part is the mime type mime_type = parts.pop(0) attributes: Dict[str, str] = {} for part in parts: # Handle key=value pairs in the middle if "=" in part: key, value = part.split("=", 1) attributes[key] = value elif len(part) > 0: attributes[part] = "" content = base64.b64decode(data) if is_base64 else unquote_to_bytes(data) return mime_type, attributes, content
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/__main__.py
packages/markitdown/src/markitdown/__main__.py
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com> # # SPDX-License-Identifier: MIT import argparse import sys import codecs from textwrap import dedent from importlib.metadata import entry_points from .__about__ import __version__ from ._markitdown import MarkItDown, StreamInfo, DocumentConverterResult def main(): parser = argparse.ArgumentParser( description="Convert various file formats to markdown.", prog="markitdown", formatter_class=argparse.RawDescriptionHelpFormatter, usage=dedent( """ SYNTAX: markitdown <OPTIONAL: FILENAME> If FILENAME is empty, markitdown reads from stdin. EXAMPLE: markitdown example.pdf OR cat example.pdf | markitdown OR markitdown < example.pdf OR to save to a file use markitdown example.pdf -o example.md OR markitdown example.pdf > example.md """ ).strip(), ) parser.add_argument( "-v", "--version", action="version", version=f"%(prog)s {__version__}", help="show the version number and exit", ) parser.add_argument( "-o", "--output", help="Output file name. If not provided, output is written to stdout.", ) parser.add_argument( "-x", "--extension", help="Provide a hint about the file extension (e.g., when reading from stdin).", ) parser.add_argument( "-m", "--mime-type", help="Provide a hint about the file's MIME type.", ) parser.add_argument( "-c", "--charset", help="Provide a hint about the file's charset (e.g, UTF-8).", ) parser.add_argument( "-d", "--use-docintel", action="store_true", help="Use Document Intelligence to extract text instead of offline conversion. Requires a valid Document Intelligence Endpoint.", ) parser.add_argument( "-e", "--endpoint", type=str, help="Document Intelligence Endpoint. Required if using Document Intelligence.", ) parser.add_argument( "-p", "--use-plugins", action="store_true", help="Use 3rd-party plugins to convert files. Use --list-plugins to see installed plugins.", ) parser.add_argument( "--list-plugins", action="store_true", help="List installed 3rd-party plugins. Plugins are loaded when using the -p or --use-plugin option.", ) parser.add_argument( "--keep-data-uris", action="store_true", help="Keep data URIs (like base64-encoded images) in the output. By default, data URIs are truncated.", ) parser.add_argument("filename", nargs="?") args = parser.parse_args() # Parse the extension hint extension_hint = args.extension if extension_hint is not None: extension_hint = extension_hint.strip().lower() if len(extension_hint) > 0: if not extension_hint.startswith("."): extension_hint = "." + extension_hint else: extension_hint = None # Parse the mime type mime_type_hint = args.mime_type if mime_type_hint is not None: mime_type_hint = mime_type_hint.strip() if len(mime_type_hint) > 0: if mime_type_hint.count("/") != 1: _exit_with_error(f"Invalid MIME type: {mime_type_hint}") else: mime_type_hint = None # Parse the charset charset_hint = args.charset if charset_hint is not None: charset_hint = charset_hint.strip() if len(charset_hint) > 0: try: charset_hint = codecs.lookup(charset_hint).name except LookupError: _exit_with_error(f"Invalid charset: {charset_hint}") else: charset_hint = None stream_info = None if ( extension_hint is not None or mime_type_hint is not None or charset_hint is not None ): stream_info = StreamInfo( extension=extension_hint, mimetype=mime_type_hint, charset=charset_hint ) if args.list_plugins: # List installed plugins, then exit print("Installed MarkItDown 3rd-party Plugins:\n") plugin_entry_points = list(entry_points(group="markitdown.plugin")) if len(plugin_entry_points) == 0: print(" * No 3rd-party plugins installed.") print( "\nFind plugins by searching for the hashtag #markitdown-plugin on GitHub.\n" ) else: for entry_point in plugin_entry_points: print(f" * {entry_point.name:<16}\t(package: {entry_point.value})") print( "\nUse the -p (or --use-plugins) option to enable 3rd-party plugins.\n" ) sys.exit(0) if args.use_docintel: if args.endpoint is None: _exit_with_error( "Document Intelligence Endpoint is required when using Document Intelligence." ) elif args.filename is None: _exit_with_error("Filename is required when using Document Intelligence.") markitdown = MarkItDown( enable_plugins=args.use_plugins, docintel_endpoint=args.endpoint ) else: markitdown = MarkItDown(enable_plugins=args.use_plugins) if args.filename is None: result = markitdown.convert_stream( sys.stdin.buffer, stream_info=stream_info, keep_data_uris=args.keep_data_uris, ) else: result = markitdown.convert( args.filename, stream_info=stream_info, keep_data_uris=args.keep_data_uris ) _handle_output(args, result) def _handle_output(args, result: DocumentConverterResult): """Handle output to stdout or file""" if args.output: with open(args.output, "w", encoding="utf-8") as f: f.write(result.markdown) else: # Handle stdout encoding errors more gracefully print( result.markdown.encode(sys.stdout.encoding, errors="replace").decode( sys.stdout.encoding ) ) def _exit_with_error(message: str): print(message) sys.exit(1) if __name__ == "__main__": main()
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_stream_info.py
packages/markitdown/src/markitdown/_stream_info.py
from dataclasses import dataclass, asdict from typing import Optional @dataclass(kw_only=True, frozen=True) class StreamInfo: """The StreamInfo class is used to store information about a file stream. All fields can be None, and will depend on how the stream was opened. """ mimetype: Optional[str] = None extension: Optional[str] = None charset: Optional[str] = None filename: Optional[ str ] = None # From local path, url, or Content-Disposition header local_path: Optional[str] = None # If read from disk url: Optional[str] = None # If read from url def copy_and_update(self, *args, **kwargs): """Copy the StreamInfo object and update it with the given StreamInfo instance and/or other keyword arguments.""" new_info = asdict(self) for si in args: assert isinstance(si, StreamInfo) new_info.update({k: v for k, v in asdict(si).items() if v is not None}) if len(kwargs) > 0: new_info.update(kwargs) return StreamInfo(**new_info)
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/__about__.py
packages/markitdown/src/markitdown/__about__.py
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com> # # SPDX-License-Identifier: MIT __version__ = "0.1.4"
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_markitdown.py
packages/markitdown/src/markitdown/_markitdown.py
import mimetypes import os import re import sys import shutil import traceback import io from dataclasses import dataclass from importlib.metadata import entry_points from typing import Any, List, Dict, Optional, Union, BinaryIO from pathlib import Path from urllib.parse import urlparse from warnings import warn import requests import magika import charset_normalizer import codecs from ._stream_info import StreamInfo from ._uri_utils import parse_data_uri, file_uri_to_path from .converters import ( PlainTextConverter, HtmlConverter, RssConverter, WikipediaConverter, YouTubeConverter, IpynbConverter, BingSerpConverter, PdfConverter, DocxConverter, XlsxConverter, XlsConverter, PptxConverter, ImageConverter, AudioConverter, OutlookMsgConverter, ZipConverter, EpubConverter, DocumentIntelligenceConverter, CsvConverter, ) from ._base_converter import DocumentConverter, DocumentConverterResult from ._exceptions import ( FileConversionException, UnsupportedFormatException, FailedConversionAttempt, ) # Lower priority values are tried first. PRIORITY_SPECIFIC_FILE_FORMAT = ( 0.0 # e.g., .docx, .pdf, .xlsx, Or specific pages, e.g., wikipedia ) PRIORITY_GENERIC_FILE_FORMAT = ( 10.0 # Near catch-all converters for mimetypes like text/*, etc. ) _plugins: Union[None, List[Any]] = None # If None, plugins have not been loaded yet. def _load_plugins() -> Union[None, List[Any]]: """Lazy load plugins, exiting early if already loaded.""" global _plugins # Skip if we've already loaded plugins if _plugins is not None: return _plugins # Load plugins _plugins = [] for entry_point in entry_points(group="markitdown.plugin"): try: _plugins.append(entry_point.load()) except Exception: tb = traceback.format_exc() warn(f"Plugin '{entry_point.name}' failed to load ... skipping:\n{tb}") return _plugins @dataclass(kw_only=True, frozen=True) class ConverterRegistration: """A registration of a converter with its priority and other metadata.""" converter: DocumentConverter priority: float class MarkItDown: """(In preview) An extremely simple text-based document reader, suitable for LLM use. This reader will convert common file-types or webpages to Markdown.""" def __init__( self, *, enable_builtins: Union[None, bool] = None, enable_plugins: Union[None, bool] = None, **kwargs, ): self._builtins_enabled = False self._plugins_enabled = False requests_session = kwargs.get("requests_session") if requests_session is None: self._requests_session = requests.Session() else: self._requests_session = requests_session self._magika = magika.Magika() # TODO - remove these (see enable_builtins) self._llm_client: Any = None self._llm_model: Union[str | None] = None self._llm_prompt: Union[str | None] = None self._exiftool_path: Union[str | None] = None self._style_map: Union[str | None] = None # Register the converters self._converters: List[ConverterRegistration] = [] if ( enable_builtins is None or enable_builtins ): # Default to True when not specified self.enable_builtins(**kwargs) if enable_plugins: self.enable_plugins(**kwargs) def enable_builtins(self, **kwargs) -> None: """ Enable and register built-in converters. Built-in converters are enabled by default. This method should only be called once, if built-ins were initially disabled. """ if not self._builtins_enabled: # TODO: Move these into converter constructors self._llm_client = kwargs.get("llm_client") self._llm_model = kwargs.get("llm_model") self._llm_prompt = kwargs.get("llm_prompt") self._exiftool_path = kwargs.get("exiftool_path") self._style_map = kwargs.get("style_map") if self._exiftool_path is None: self._exiftool_path = os.getenv("EXIFTOOL_PATH") # Still none? Check well-known paths if self._exiftool_path is None: candidate = shutil.which("exiftool") if candidate: candidate = os.path.abspath(candidate) if any( d == os.path.dirname(candidate) for d in [ "/usr/bin", "/usr/local/bin", "/opt", "/opt/bin", "/opt/local/bin", "/opt/homebrew/bin", "C:\\Windows\\System32", "C:\\Program Files", "C:\\Program Files (x86)", ] ): self._exiftool_path = candidate # Register converters for successful browsing operations # Later registrations are tried first / take higher priority than earlier registrations # To this end, the most specific converters should appear below the most generic converters self.register_converter( PlainTextConverter(), priority=PRIORITY_GENERIC_FILE_FORMAT ) self.register_converter( ZipConverter(markitdown=self), priority=PRIORITY_GENERIC_FILE_FORMAT ) self.register_converter( HtmlConverter(), priority=PRIORITY_GENERIC_FILE_FORMAT ) self.register_converter(RssConverter()) self.register_converter(WikipediaConverter()) self.register_converter(YouTubeConverter()) self.register_converter(BingSerpConverter()) self.register_converter(DocxConverter()) self.register_converter(XlsxConverter()) self.register_converter(XlsConverter()) self.register_converter(PptxConverter()) self.register_converter(AudioConverter()) self.register_converter(ImageConverter()) self.register_converter(IpynbConverter()) self.register_converter(PdfConverter()) self.register_converter(OutlookMsgConverter()) self.register_converter(EpubConverter()) self.register_converter(CsvConverter()) # Register Document Intelligence converter at the top of the stack if endpoint is provided docintel_endpoint = kwargs.get("docintel_endpoint") if docintel_endpoint is not None: docintel_args: Dict[str, Any] = {} docintel_args["endpoint"] = docintel_endpoint docintel_credential = kwargs.get("docintel_credential") if docintel_credential is not None: docintel_args["credential"] = docintel_credential docintel_types = kwargs.get("docintel_file_types") if docintel_types is not None: docintel_args["file_types"] = docintel_types docintel_version = kwargs.get("docintel_api_version") if docintel_version is not None: docintel_args["api_version"] = docintel_version self.register_converter( DocumentIntelligenceConverter(**docintel_args), ) self._builtins_enabled = True else: warn("Built-in converters are already enabled.", RuntimeWarning) def enable_plugins(self, **kwargs) -> None: """ Enable and register converters provided by plugins. Plugins are disabled by default. This method should only be called once, if plugins were initially disabled. """ if not self._plugins_enabled: # Load plugins plugins = _load_plugins() assert plugins is not None for plugin in plugins: try: plugin.register_converters(self, **kwargs) except Exception: tb = traceback.format_exc() warn(f"Plugin '{plugin}' failed to register converters:\n{tb}") self._plugins_enabled = True else: warn("Plugins converters are already enabled.", RuntimeWarning) def convert( self, source: Union[str, requests.Response, Path, BinaryIO], *, stream_info: Optional[StreamInfo] = None, **kwargs: Any, ) -> DocumentConverterResult: # TODO: deal with kwargs """ Args: - source: can be a path (str or Path), url, or a requests.response object - stream_info: optional stream info to use for the conversion. If None, infer from source - kwargs: additional arguments to pass to the converter """ # Local path or url if isinstance(source, str): if ( source.startswith("http:") or source.startswith("https:") or source.startswith("file:") or source.startswith("data:") ): # Rename the url argument to mock_url # (Deprecated -- use stream_info) _kwargs = {k: v for k, v in kwargs.items()} if "url" in _kwargs: _kwargs["mock_url"] = _kwargs["url"] del _kwargs["url"] return self.convert_uri(source, stream_info=stream_info, **_kwargs) else: return self.convert_local(source, stream_info=stream_info, **kwargs) # Path object elif isinstance(source, Path): return self.convert_local(source, stream_info=stream_info, **kwargs) # Request response elif isinstance(source, requests.Response): return self.convert_response(source, stream_info=stream_info, **kwargs) # Binary stream elif ( hasattr(source, "read") and callable(source.read) and not isinstance(source, io.TextIOBase) ): return self.convert_stream(source, stream_info=stream_info, **kwargs) else: raise TypeError( f"Invalid source type: {type(source)}. Expected str, requests.Response, BinaryIO." ) def convert_local( self, path: Union[str, Path], *, stream_info: Optional[StreamInfo] = None, file_extension: Optional[str] = None, # Deprecated -- use stream_info url: Optional[str] = None, # Deprecated -- use stream_info **kwargs: Any, ) -> DocumentConverterResult: if isinstance(path, Path): path = str(path) # Build a base StreamInfo object from which to start guesses base_guess = StreamInfo( local_path=path, extension=os.path.splitext(path)[1], filename=os.path.basename(path), ) # Extend the base_guess with any additional info from the arguments if stream_info is not None: base_guess = base_guess.copy_and_update(stream_info) if file_extension is not None: # Deprecated -- use stream_info base_guess = base_guess.copy_and_update(extension=file_extension) if url is not None: # Deprecated -- use stream_info base_guess = base_guess.copy_and_update(url=url) with open(path, "rb") as fh: guesses = self._get_stream_info_guesses( file_stream=fh, base_guess=base_guess ) return self._convert(file_stream=fh, stream_info_guesses=guesses, **kwargs) def convert_stream( self, stream: BinaryIO, *, stream_info: Optional[StreamInfo] = None, file_extension: Optional[str] = None, # Deprecated -- use stream_info url: Optional[str] = None, # Deprecated -- use stream_info **kwargs: Any, ) -> DocumentConverterResult: guesses: List[StreamInfo] = [] # Do we have anything on which to base a guess? base_guess = None if stream_info is not None or file_extension is not None or url is not None: # Start with a non-Null base guess if stream_info is None: base_guess = StreamInfo() else: base_guess = stream_info if file_extension is not None: # Deprecated -- use stream_info assert base_guess is not None # for mypy base_guess = base_guess.copy_and_update(extension=file_extension) if url is not None: # Deprecated -- use stream_info assert base_guess is not None # for mypy base_guess = base_guess.copy_and_update(url=url) # Check if we have a seekable stream. If not, load the entire stream into memory. if not stream.seekable(): buffer = io.BytesIO() while True: chunk = stream.read(4096) if not chunk: break buffer.write(chunk) buffer.seek(0) stream = buffer # Add guesses based on stream content guesses = self._get_stream_info_guesses( file_stream=stream, base_guess=base_guess or StreamInfo() ) return self._convert(file_stream=stream, stream_info_guesses=guesses, **kwargs) def convert_url( self, url: str, *, stream_info: Optional[StreamInfo] = None, file_extension: Optional[str] = None, mock_url: Optional[str] = None, **kwargs: Any, ) -> DocumentConverterResult: """Alias for convert_uri()""" # convert_url will likely be deprecated in the future in favor of convert_uri return self.convert_uri( url, stream_info=stream_info, file_extension=file_extension, mock_url=mock_url, **kwargs, ) def convert_uri( self, uri: str, *, stream_info: Optional[StreamInfo] = None, file_extension: Optional[str] = None, # Deprecated -- use stream_info mock_url: Optional[ str ] = None, # Mock the request as if it came from a different URL **kwargs: Any, ) -> DocumentConverterResult: uri = uri.strip() # File URIs if uri.startswith("file:"): netloc, path = file_uri_to_path(uri) if netloc and netloc != "localhost": raise ValueError( f"Unsupported file URI: {uri}. Netloc must be empty or localhost." ) return self.convert_local( path, stream_info=stream_info, file_extension=file_extension, url=mock_url, **kwargs, ) # Data URIs elif uri.startswith("data:"): mimetype, attributes, data = parse_data_uri(uri) base_guess = StreamInfo( mimetype=mimetype, charset=attributes.get("charset"), ) if stream_info is not None: base_guess = base_guess.copy_and_update(stream_info) return self.convert_stream( io.BytesIO(data), stream_info=base_guess, file_extension=file_extension, url=mock_url, **kwargs, ) # HTTP/HTTPS URIs elif uri.startswith("http:") or uri.startswith("https:"): response = self._requests_session.get(uri, stream=True) response.raise_for_status() return self.convert_response( response, stream_info=stream_info, file_extension=file_extension, url=mock_url, **kwargs, ) else: raise ValueError( f"Unsupported URI scheme: {uri.split(':')[0]}. Supported schemes are: file:, data:, http:, https:" ) def convert_response( self, response: requests.Response, *, stream_info: Optional[StreamInfo] = None, file_extension: Optional[str] = None, # Deprecated -- use stream_info url: Optional[str] = None, # Deprecated -- use stream_info **kwargs: Any, ) -> DocumentConverterResult: # If there is a content-type header, get the mimetype and charset (if present) mimetype: Optional[str] = None charset: Optional[str] = None if "content-type" in response.headers: parts = response.headers["content-type"].split(";") mimetype = parts.pop(0).strip() for part in parts: if part.strip().startswith("charset="): _charset = part.split("=")[1].strip() if len(_charset) > 0: charset = _charset # If there is a content-disposition header, get the filename and possibly the extension filename: Optional[str] = None extension: Optional[str] = None if "content-disposition" in response.headers: m = re.search(r"filename=([^;]+)", response.headers["content-disposition"]) if m: filename = m.group(1).strip("\"'") _, _extension = os.path.splitext(filename) if len(_extension) > 0: extension = _extension # If there is still no filename, try to read it from the url if filename is None: parsed_url = urlparse(response.url) _, _extension = os.path.splitext(parsed_url.path) if len(_extension) > 0: # Looks like this might be a file! filename = os.path.basename(parsed_url.path) extension = _extension # Create an initial guess from all this information base_guess = StreamInfo( mimetype=mimetype, charset=charset, filename=filename, extension=extension, url=response.url, ) # Update with any additional info from the arguments if stream_info is not None: base_guess = base_guess.copy_and_update(stream_info) if file_extension is not None: # Deprecated -- use stream_info base_guess = base_guess.copy_and_update(extension=file_extension) if url is not None: # Deprecated -- use stream_info base_guess = base_guess.copy_and_update(url=url) # Read into BytesIO buffer = io.BytesIO() for chunk in response.iter_content(chunk_size=512): buffer.write(chunk) buffer.seek(0) # Convert guesses = self._get_stream_info_guesses( file_stream=buffer, base_guess=base_guess ) return self._convert(file_stream=buffer, stream_info_guesses=guesses, **kwargs) def _convert( self, *, file_stream: BinaryIO, stream_info_guesses: List[StreamInfo], **kwargs ) -> DocumentConverterResult: res: Union[None, DocumentConverterResult] = None # Keep track of which converters throw exceptions failed_attempts: List[FailedConversionAttempt] = [] # Create a copy of the page_converters list, sorted by priority. # We do this with each call to _convert because the priority of converters may change between calls. # The sort is guaranteed to be stable, so converters with the same priority will remain in the same order. sorted_registrations = sorted(self._converters, key=lambda x: x.priority) # Remember the initial stream position so that we can return to it cur_pos = file_stream.tell() for stream_info in stream_info_guesses + [StreamInfo()]: for converter_registration in sorted_registrations: converter = converter_registration.converter # Sanity check -- make sure the cur_pos is still the same assert ( cur_pos == file_stream.tell() ), "File stream position should NOT change between guess iterations" _kwargs = {k: v for k, v in kwargs.items()} # Copy any additional global options if "llm_client" not in _kwargs and self._llm_client is not None: _kwargs["llm_client"] = self._llm_client if "llm_model" not in _kwargs and self._llm_model is not None: _kwargs["llm_model"] = self._llm_model if "llm_prompt" not in _kwargs and self._llm_prompt is not None: _kwargs["llm_prompt"] = self._llm_prompt if "style_map" not in _kwargs and self._style_map is not None: _kwargs["style_map"] = self._style_map if "exiftool_path" not in _kwargs and self._exiftool_path is not None: _kwargs["exiftool_path"] = self._exiftool_path # Add the list of converters for nested processing _kwargs["_parent_converters"] = self._converters # Add legaxy kwargs if stream_info is not None: if stream_info.extension is not None: _kwargs["file_extension"] = stream_info.extension if stream_info.url is not None: _kwargs["url"] = stream_info.url # Check if the converter will accept the file, and if so, try to convert it _accepts = False try: _accepts = converter.accepts(file_stream, stream_info, **_kwargs) except NotImplementedError: pass # accept() should not have changed the file stream position assert ( cur_pos == file_stream.tell() ), f"{type(converter).__name__}.accept() should NOT change the file_stream position" # Attempt the conversion if _accepts: try: res = converter.convert(file_stream, stream_info, **_kwargs) except Exception: failed_attempts.append( FailedConversionAttempt( converter=converter, exc_info=sys.exc_info() ) ) finally: file_stream.seek(cur_pos) if res is not None: # Normalize the content res.text_content = "\n".join( [line.rstrip() for line in re.split(r"\r?\n", res.text_content)] ) res.text_content = re.sub(r"\n{3,}", "\n\n", res.text_content) return res # If we got this far without success, report any exceptions if len(failed_attempts) > 0: raise FileConversionException(attempts=failed_attempts) # Nothing can handle it! raise UnsupportedFormatException( "Could not convert stream to Markdown. No converter attempted a conversion, suggesting that the filetype is simply not supported." ) def register_page_converter(self, converter: DocumentConverter) -> None: """DEPRECATED: User register_converter instead.""" warn( "register_page_converter is deprecated. Use register_converter instead.", DeprecationWarning, ) self.register_converter(converter) def register_converter( self, converter: DocumentConverter, *, priority: float = PRIORITY_SPECIFIC_FILE_FORMAT, ) -> None: """ Register a DocumentConverter with a given priority. Priorities work as follows: By default, most converters get priority DocumentConverter.PRIORITY_SPECIFIC_FILE_FORMAT (== 0). The exception is the PlainTextConverter, HtmlConverter, and ZipConverter, which get priority PRIORITY_SPECIFIC_FILE_FORMAT (== 10), with lower values being tried first (i.e., higher priority). Just prior to conversion, the converters are sorted by priority, using a stable sort. This means that converters with the same priority will remain in the same order, with the most recently registered converters appearing first. We have tight control over the order of built-in converters, but plugins can register converters in any order. The registration's priority field reasserts some control over the order of converters. Plugins can register converters with any priority, to appear before or after the built-ins. For example, a plugin with priority 9 will run before the PlainTextConverter, but after the built-in converters. """ self._converters.insert( 0, ConverterRegistration(converter=converter, priority=priority) ) def _get_stream_info_guesses( self, file_stream: BinaryIO, base_guess: StreamInfo ) -> List[StreamInfo]: """ Given a base guess, attempt to guess or expand on the stream info using the stream content (via magika). """ guesses: List[StreamInfo] = [] # Enhance the base guess with information based on the extension or mimetype enhanced_guess = base_guess.copy_and_update() # If there's an extension and no mimetype, try to guess the mimetype if base_guess.mimetype is None and base_guess.extension is not None: _m, _ = mimetypes.guess_type( "placeholder" + base_guess.extension, strict=False ) if _m is not None: enhanced_guess = enhanced_guess.copy_and_update(mimetype=_m) # If there's a mimetype and no extension, try to guess the extension if base_guess.mimetype is not None and base_guess.extension is None: _e = mimetypes.guess_all_extensions(base_guess.mimetype, strict=False) if len(_e) > 0: enhanced_guess = enhanced_guess.copy_and_update(extension=_e[0]) # Call magika to guess from the stream cur_pos = file_stream.tell() try: result = self._magika.identify_stream(file_stream) if result.status == "ok" and result.prediction.output.label != "unknown": # If it's text, also guess the charset charset = None if result.prediction.output.is_text: # Read the first 4k to guess the charset file_stream.seek(cur_pos) stream_page = file_stream.read(4096) charset_result = charset_normalizer.from_bytes(stream_page).best() if charset_result is not None: charset = self._normalize_charset(charset_result.encoding) # Normalize the first extension listed guessed_extension = None if len(result.prediction.output.extensions) > 0: guessed_extension = "." + result.prediction.output.extensions[0] # Determine if the guess is compatible with the base guess compatible = True if ( base_guess.mimetype is not None and base_guess.mimetype != result.prediction.output.mime_type ): compatible = False if ( base_guess.extension is not None and base_guess.extension.lstrip(".") not in result.prediction.output.extensions ): compatible = False if ( base_guess.charset is not None and self._normalize_charset(base_guess.charset) != charset ): compatible = False if compatible: # Add the compatible base guess guesses.append( StreamInfo( mimetype=base_guess.mimetype or result.prediction.output.mime_type, extension=base_guess.extension or guessed_extension, charset=base_guess.charset or charset, filename=base_guess.filename, local_path=base_guess.local_path, url=base_guess.url, ) ) else: # The magika guess was incompatible with the base guess, so add both guesses guesses.append(enhanced_guess) guesses.append( StreamInfo( mimetype=result.prediction.output.mime_type, extension=guessed_extension, charset=charset, filename=base_guess.filename, local_path=base_guess.local_path, url=base_guess.url, ) ) else: # There were no other guesses, so just add the base guess guesses.append(enhanced_guess) finally: file_stream.seek(cur_pos) return guesses def _normalize_charset(self, charset: str | None) -> str | None: """ Normalize a charset string to a canonical form. """ if charset is None: return None try: return codecs.lookup(charset).name except LookupError: return charset
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/__init__.py
packages/markitdown/src/markitdown/__init__.py
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com> # # SPDX-License-Identifier: MIT from .__about__ import __version__ from ._markitdown import ( MarkItDown, PRIORITY_SPECIFIC_FILE_FORMAT, PRIORITY_GENERIC_FILE_FORMAT, ) from ._base_converter import DocumentConverterResult, DocumentConverter from ._stream_info import StreamInfo from ._exceptions import ( MarkItDownException, MissingDependencyException, FailedConversionAttempt, FileConversionException, UnsupportedFormatException, ) __all__ = [ "__version__", "MarkItDown", "DocumentConverter", "DocumentConverterResult", "MarkItDownException", "MissingDependencyException", "FailedConversionAttempt", "FileConversionException", "UnsupportedFormatException", "StreamInfo", "PRIORITY_SPECIFIC_FILE_FORMAT", "PRIORITY_GENERIC_FILE_FORMAT", ]
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_base_converter.py
packages/markitdown/src/markitdown/_base_converter.py
from typing import Any, BinaryIO, Optional from ._stream_info import StreamInfo class DocumentConverterResult: """The result of converting a document to Markdown.""" def __init__( self, markdown: str, *, title: Optional[str] = None, ): """ Initialize the DocumentConverterResult. The only required parameter is the converted Markdown text. The title, and any other metadata that may be added in the future, are optional. Parameters: - markdown: The converted Markdown text. - title: Optional title of the document. """ self.markdown = markdown self.title = title @property def text_content(self) -> str: """Soft-deprecated alias for `markdown`. New code should migrate to using `markdown` or __str__.""" return self.markdown @text_content.setter def text_content(self, markdown: str): """Soft-deprecated alias for `markdown`. New code should migrate to using `markdown` or __str__.""" self.markdown = markdown def __str__(self) -> str: """Return the converted Markdown text.""" return self.markdown class DocumentConverter: """Abstract superclass of all DocumentConverters.""" def accepts( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> bool: """ Return a quick determination on if the converter should attempt converting the document. This is primarily based `stream_info` (typically, `stream_info.mimetype`, `stream_info.extension`). In cases where the data is retrieved via HTTP, the `steam_info.url` might also be referenced to make a determination (e.g., special converters for Wikipedia, YouTube etc). Finally, it is conceivable that the `stream_info.filename` might be used to in cases where the filename is well-known (e.g., `Dockerfile`, `Makefile`, etc) NOTE: The method signature is designed to match that of the convert() method. This provides some assurance that, if accepts() returns True, the convert() method will also be able to handle the document. IMPORTANT: In rare cases, (e.g., OutlookMsgConverter) we need to read more from the stream to make a final determination. Read operations inevitably advances the position in file_stream. In these case, the position MUST be reset it MUST be reset before returning. This is because the convert() method may be called immediately after accepts(), and will expect the file_stream to be at the original position. E.g., cur_pos = file_stream.tell() # Save the current position data = file_stream.read(100) # ... peek at the first 100 bytes, etc. file_stream.seek(cur_pos) # Reset the position to the original position Parameters: - file_stream: The file-like object to convert. Must support seek(), tell(), and read() methods. - stream_info: The StreamInfo object containing metadata about the file (mimetype, extension, charset, set) - kwargs: Additional keyword arguments for the converter. Returns: - bool: True if the converter can handle the document, False otherwise. """ raise NotImplementedError( f"The subclass, {type(self).__name__}, must implement the accepts() method to determine if they can handle the document." ) def convert( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> DocumentConverterResult: """ Convert a document to Markdown text. Parameters: - file_stream: The file-like object to convert. Must support seek(), tell(), and read() methods. - stream_info: The StreamInfo object containing metadata about the file (mimetype, extension, charset, set) - kwargs: Additional keyword arguments for the converter. Returns: - DocumentConverterResult: The result of the conversion, which includes the title and markdown content. Raises: - FileConversionException: If the mimetype is recognized, but the conversion fails for some other reason. - MissingDependencyException: If the converter requires a dependency that is not installed. """ raise NotImplementedError("Subclasses must implement this method")
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_bing_serp_converter.py
packages/markitdown/src/markitdown/converters/_bing_serp_converter.py
import re import base64 import binascii from urllib.parse import parse_qs, urlparse from typing import Any, BinaryIO from bs4 import BeautifulSoup from .._base_converter import DocumentConverter, DocumentConverterResult from .._stream_info import StreamInfo from ._markdownify import _CustomMarkdownify ACCEPTED_MIME_TYPE_PREFIXES = [ "text/html", "application/xhtml", ] ACCEPTED_FILE_EXTENSIONS = [ ".html", ".htm", ] class BingSerpConverter(DocumentConverter): """ Handle Bing results pages (only the organic search results). NOTE: It is better to use the Bing API """ def accepts( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> bool: """ Make sure we're dealing with HTML content *from* Bing. """ url = stream_info.url or "" mimetype = (stream_info.mimetype or "").lower() extension = (stream_info.extension or "").lower() if not re.search(r"^https://www\.bing\.com/search\?q=", url): # Not a Bing SERP URL return False if extension in ACCEPTED_FILE_EXTENSIONS: return True for prefix in ACCEPTED_MIME_TYPE_PREFIXES: if mimetype.startswith(prefix): return True # Not HTML content return False def convert( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> DocumentConverterResult: assert stream_info.url is not None # Parse the query parameters parsed_params = parse_qs(urlparse(stream_info.url).query) query = parsed_params.get("q", [""])[0] # Parse the stream encoding = "utf-8" if stream_info.charset is None else stream_info.charset soup = BeautifulSoup(file_stream, "html.parser", from_encoding=encoding) # Clean up some formatting for tptt in soup.find_all(class_="tptt"): if hasattr(tptt, "string") and tptt.string: tptt.string += " " for slug in soup.find_all(class_="algoSlug_icon"): slug.extract() # Parse the algorithmic results _markdownify = _CustomMarkdownify(**kwargs) results = list() for result in soup.find_all(class_="b_algo"): if not hasattr(result, "find_all"): continue # Rewrite redirect urls for a in result.find_all("a", href=True): parsed_href = urlparse(a["href"]) qs = parse_qs(parsed_href.query) # The destination is contained in the u parameter, # but appears to be base64 encoded, with some prefix if "u" in qs: u = ( qs["u"][0][2:].strip() + "==" ) # Python 3 doesn't care about extra padding try: # RFC 4648 / Base64URL" variant, which uses "-" and "_" a["href"] = base64.b64decode(u, altchars="-_").decode("utf-8") except UnicodeDecodeError: pass except binascii.Error: pass # Convert to markdown md_result = _markdownify.convert_soup(result).strip() lines = [line.strip() for line in re.split(r"\n+", md_result)] results.append("\n".join([line for line in lines if len(line) > 0])) webpage_text = ( f"## A Bing search for '{query}' found the following results:\n\n" + "\n\n".join(results) ) return DocumentConverterResult( markdown=webpage_text, title=None if soup.title is None else soup.title.string, )
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_llm_caption.py
packages/markitdown/src/markitdown/converters/_llm_caption.py
from typing import BinaryIO, Union import base64 import mimetypes from .._stream_info import StreamInfo def llm_caption( file_stream: BinaryIO, stream_info: StreamInfo, *, client, model, prompt=None ) -> Union[None, str]: if prompt is None or prompt.strip() == "": prompt = "Write a detailed caption for this image." # Get the content type content_type = stream_info.mimetype if not content_type: content_type, _ = mimetypes.guess_type("_dummy" + (stream_info.extension or "")) if not content_type: content_type = "application/octet-stream" # Convert to base64 cur_pos = file_stream.tell() try: base64_image = base64.b64encode(file_stream.read()).decode("utf-8") except Exception as e: return None finally: file_stream.seek(cur_pos) # Prepare the data-uri data_uri = f"data:{content_type};base64,{base64_image}" # Prepare the OpenAI API request messages = [ { "role": "user", "content": [ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": { "url": data_uri, }, }, ], } ] # Call the OpenAI API response = client.chat.completions.create(model=model, messages=messages) return response.choices[0].message.content
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_wikipedia_converter.py
packages/markitdown/src/markitdown/converters/_wikipedia_converter.py
import re import bs4 from typing import Any, BinaryIO from .._base_converter import DocumentConverter, DocumentConverterResult from .._stream_info import StreamInfo from ._markdownify import _CustomMarkdownify ACCEPTED_MIME_TYPE_PREFIXES = [ "text/html", "application/xhtml", ] ACCEPTED_FILE_EXTENSIONS = [ ".html", ".htm", ] class WikipediaConverter(DocumentConverter): """Handle Wikipedia pages separately, focusing only on the main document content.""" def accepts( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> bool: """ Make sure we're dealing with HTML content *from* Wikipedia. """ url = stream_info.url or "" mimetype = (stream_info.mimetype or "").lower() extension = (stream_info.extension or "").lower() if not re.search(r"^https?:\/\/[a-zA-Z]{2,3}\.wikipedia.org\/", url): # Not a Wikipedia URL return False if extension in ACCEPTED_FILE_EXTENSIONS: return True for prefix in ACCEPTED_MIME_TYPE_PREFIXES: if mimetype.startswith(prefix): return True # Not HTML content return False def convert( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> DocumentConverterResult: # Parse the stream encoding = "utf-8" if stream_info.charset is None else stream_info.charset soup = bs4.BeautifulSoup(file_stream, "html.parser", from_encoding=encoding) # Remove javascript and style blocks for script in soup(["script", "style"]): script.extract() # Print only the main content body_elm = soup.find("div", {"id": "mw-content-text"}) title_elm = soup.find("span", {"class": "mw-page-title-main"}) webpage_text = "" main_title = None if soup.title is None else soup.title.string if body_elm: # What's the title if title_elm and isinstance(title_elm, bs4.Tag): main_title = title_elm.string # Convert the page webpage_text = f"# {main_title}\n\n" + _CustomMarkdownify( **kwargs ).convert_soup(body_elm) else: webpage_text = _CustomMarkdownify(**kwargs).convert_soup(soup) return DocumentConverterResult( markdown=webpage_text, title=main_title, )
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_plain_text_converter.py
packages/markitdown/src/markitdown/converters/_plain_text_converter.py
import sys from typing import BinaryIO, Any from charset_normalizer import from_bytes from .._base_converter import DocumentConverter, DocumentConverterResult from .._stream_info import StreamInfo # Try loading optional (but in this case, required) dependencies # Save reporting of any exceptions for later _dependency_exc_info = None try: import mammoth # noqa: F401 except ImportError: # Preserve the error and stack trace for later _dependency_exc_info = sys.exc_info() ACCEPTED_MIME_TYPE_PREFIXES = [ "text/", "application/json", "application/markdown", ] ACCEPTED_FILE_EXTENSIONS = [ ".txt", ".text", ".md", ".markdown", ".json", ".jsonl", ] class PlainTextConverter(DocumentConverter): """Anything with content type text/plain""" def accepts( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> bool: mimetype = (stream_info.mimetype or "").lower() extension = (stream_info.extension or "").lower() # If we have a charset, we can safely assume it's text # With Magika in the earlier stages, this handles most cases if stream_info.charset is not None: return True # Otherwise, check the mimetype and extension if extension in ACCEPTED_FILE_EXTENSIONS: return True for prefix in ACCEPTED_MIME_TYPE_PREFIXES: if mimetype.startswith(prefix): return True return False def convert( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> DocumentConverterResult: if stream_info.charset: text_content = file_stream.read().decode(stream_info.charset) else: text_content = str(from_bytes(file_stream.read()).best()) return DocumentConverterResult(markdown=text_content)
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_transcribe_audio.py
packages/markitdown/src/markitdown/converters/_transcribe_audio.py
import io import sys from typing import BinaryIO from .._exceptions import MissingDependencyException # Try loading optional (but in this case, required) dependencies # Save reporting of any exceptions for later _dependency_exc_info = None try: # Suppress some warnings on library import import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=SyntaxWarning) import speech_recognition as sr import pydub except ImportError: # Preserve the error and stack trace for later _dependency_exc_info = sys.exc_info() def transcribe_audio(file_stream: BinaryIO, *, audio_format: str = "wav") -> str: # Check for installed dependencies if _dependency_exc_info is not None: raise MissingDependencyException( "Speech transcription requires installing MarkItdown with the [audio-transcription] optional dependencies. E.g., `pip install markitdown[audio-transcription]` or `pip install markitdown[all]`" ) from _dependency_exc_info[ 1 ].with_traceback( # type: ignore[union-attr] _dependency_exc_info[2] ) if audio_format in ["wav", "aiff", "flac"]: audio_source = file_stream elif audio_format in ["mp3", "mp4"]: audio_segment = pydub.AudioSegment.from_file(file_stream, format=audio_format) audio_source = io.BytesIO() audio_segment.export(audio_source, format="wav") audio_source.seek(0) else: raise ValueError(f"Unsupported audio format: {audio_format}") recognizer = sr.Recognizer() with sr.AudioFile(audio_source) as source: audio = recognizer.record(source) transcript = recognizer.recognize_google(audio).strip() return "[No speech detected]" if transcript == "" else transcript
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_epub_converter.py
packages/markitdown/src/markitdown/converters/_epub_converter.py
import os import zipfile from defusedxml import minidom from xml.dom.minidom import Document from typing import BinaryIO, Any, Dict, List from ._html_converter import HtmlConverter from .._base_converter import DocumentConverterResult from .._stream_info import StreamInfo ACCEPTED_MIME_TYPE_PREFIXES = [ "application/epub", "application/epub+zip", "application/x-epub+zip", ] ACCEPTED_FILE_EXTENSIONS = [".epub"] MIME_TYPE_MAPPING = { ".html": "text/html", ".xhtml": "application/xhtml+xml", } class EpubConverter(HtmlConverter): """ Converts EPUB files to Markdown. Style information (e.g.m headings) and tables are preserved where possible. """ def __init__(self): super().__init__() self._html_converter = HtmlConverter() def accepts( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> bool: mimetype = (stream_info.mimetype or "").lower() extension = (stream_info.extension or "").lower() if extension in ACCEPTED_FILE_EXTENSIONS: return True for prefix in ACCEPTED_MIME_TYPE_PREFIXES: if mimetype.startswith(prefix): return True return False def convert( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> DocumentConverterResult: with zipfile.ZipFile(file_stream, "r") as z: # Extracts metadata (title, authors, language, publisher, date, description, cover) from an EPUB file.""" # Locate content.opf container_dom = minidom.parse(z.open("META-INF/container.xml")) opf_path = container_dom.getElementsByTagName("rootfile")[0].getAttribute( "full-path" ) # Parse content.opf opf_dom = minidom.parse(z.open(opf_path)) metadata: Dict[str, Any] = { "title": self._get_text_from_node(opf_dom, "dc:title"), "authors": self._get_all_texts_from_nodes(opf_dom, "dc:creator"), "language": self._get_text_from_node(opf_dom, "dc:language"), "publisher": self._get_text_from_node(opf_dom, "dc:publisher"), "date": self._get_text_from_node(opf_dom, "dc:date"), "description": self._get_text_from_node(opf_dom, "dc:description"), "identifier": self._get_text_from_node(opf_dom, "dc:identifier"), } # Extract manifest items (ID → href mapping) manifest = { item.getAttribute("id"): item.getAttribute("href") for item in opf_dom.getElementsByTagName("item") } # Extract spine order (ID refs) spine_items = opf_dom.getElementsByTagName("itemref") spine_order = [item.getAttribute("idref") for item in spine_items] # Convert spine order to actual file paths base_path = "/".join( opf_path.split("/")[:-1] ) # Get base directory of content.opf spine = [ f"{base_path}/{manifest[item_id]}" if base_path else manifest[item_id] for item_id in spine_order if item_id in manifest ] # Extract and convert the content markdown_content: List[str] = [] for file in spine: if file in z.namelist(): with z.open(file) as f: filename = os.path.basename(file) extension = os.path.splitext(filename)[1].lower() mimetype = MIME_TYPE_MAPPING.get(extension) converted_content = self._html_converter.convert( f, StreamInfo( mimetype=mimetype, extension=extension, filename=filename, ), ) markdown_content.append(converted_content.markdown.strip()) # Format and add the metadata metadata_markdown = [] for key, value in metadata.items(): if isinstance(value, list): value = ", ".join(value) if value: metadata_markdown.append(f"**{key.capitalize()}:** {value}") markdown_content.insert(0, "\n".join(metadata_markdown)) return DocumentConverterResult( markdown="\n\n".join(markdown_content), title=metadata["title"] ) def _get_text_from_node(self, dom: Document, tag_name: str) -> str | None: """Convenience function to extract a single occurrence of a tag (e.g., title).""" texts = self._get_all_texts_from_nodes(dom, tag_name) if len(texts) > 0: return texts[0] else: return None def _get_all_texts_from_nodes(self, dom: Document, tag_name: str) -> List[str]: """Helper function to extract all occurrences of a tag (e.g., multiple authors).""" texts: List[str] = [] for node in dom.getElementsByTagName(tag_name): if node.firstChild and hasattr(node.firstChild, "nodeValue"): texts.append(node.firstChild.nodeValue.strip()) return texts
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_csv_converter.py
packages/markitdown/src/markitdown/converters/_csv_converter.py
import csv import io from typing import BinaryIO, Any from charset_normalizer import from_bytes from .._base_converter import DocumentConverter, DocumentConverterResult from .._stream_info import StreamInfo ACCEPTED_MIME_TYPE_PREFIXES = [ "text/csv", "application/csv", ] ACCEPTED_FILE_EXTENSIONS = [".csv"] class CsvConverter(DocumentConverter): """ Converts CSV files to Markdown tables. """ def __init__(self): super().__init__() def accepts( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> bool: mimetype = (stream_info.mimetype or "").lower() extension = (stream_info.extension or "").lower() if extension in ACCEPTED_FILE_EXTENSIONS: return True for prefix in ACCEPTED_MIME_TYPE_PREFIXES: if mimetype.startswith(prefix): return True return False def convert( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> DocumentConverterResult: # Read the file content if stream_info.charset: content = file_stream.read().decode(stream_info.charset) else: content = str(from_bytes(file_stream.read()).best()) # Parse CSV content reader = csv.reader(io.StringIO(content)) rows = list(reader) if not rows: return DocumentConverterResult(markdown="") # Create markdown table markdown_table = [] # Add header row markdown_table.append("| " + " | ".join(rows[0]) + " |") # Add separator row markdown_table.append("| " + " | ".join(["---"] * len(rows[0])) + " |") # Add data rows for row in rows[1:]: # Make sure row has the same number of columns as header while len(row) < len(rows[0]): row.append("") # Truncate if row has more columns than header row = row[: len(rows[0])] markdown_table.append("| " + " | ".join(row) + " |") result = "\n".join(markdown_table) return DocumentConverterResult(markdown=result)
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_html_converter.py
packages/markitdown/src/markitdown/converters/_html_converter.py
import io from typing import Any, BinaryIO, Optional from bs4 import BeautifulSoup from .._base_converter import DocumentConverter, DocumentConverterResult from .._stream_info import StreamInfo from ._markdownify import _CustomMarkdownify ACCEPTED_MIME_TYPE_PREFIXES = [ "text/html", "application/xhtml", ] ACCEPTED_FILE_EXTENSIONS = [ ".html", ".htm", ] class HtmlConverter(DocumentConverter): """Anything with content type text/html""" def accepts( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> bool: mimetype = (stream_info.mimetype or "").lower() extension = (stream_info.extension or "").lower() if extension in ACCEPTED_FILE_EXTENSIONS: return True for prefix in ACCEPTED_MIME_TYPE_PREFIXES: if mimetype.startswith(prefix): return True return False def convert( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> DocumentConverterResult: # Parse the stream encoding = "utf-8" if stream_info.charset is None else stream_info.charset soup = BeautifulSoup(file_stream, "html.parser", from_encoding=encoding) # Remove javascript and style blocks for script in soup(["script", "style"]): script.extract() # Print only the main content body_elm = soup.find("body") webpage_text = "" if body_elm: webpage_text = _CustomMarkdownify(**kwargs).convert_soup(body_elm) else: webpage_text = _CustomMarkdownify(**kwargs).convert_soup(soup) assert isinstance(webpage_text, str) # remove leading and trailing \n webpage_text = webpage_text.strip() return DocumentConverterResult( markdown=webpage_text, title=None if soup.title is None else soup.title.string, ) def convert_string( self, html_content: str, *, url: Optional[str] = None, **kwargs ) -> DocumentConverterResult: """ Non-standard convenience method to convert a string to markdown. Given that many converters produce HTML as intermediate output, this allows for easy conversion of HTML to markdown. """ return self.convert( file_stream=io.BytesIO(html_content.encode("utf-8")), stream_info=StreamInfo( mimetype="text/html", extension=".html", charset="utf-8", url=url, ), **kwargs, )
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_pdf_converter.py
packages/markitdown/src/markitdown/converters/_pdf_converter.py
import sys import io from typing import BinaryIO, Any from .._base_converter import DocumentConverter, DocumentConverterResult from .._stream_info import StreamInfo from .._exceptions import MissingDependencyException, MISSING_DEPENDENCY_MESSAGE # Try loading optional (but in this case, required) dependencies # Save reporting of any exceptions for later _dependency_exc_info = None try: import pdfminer import pdfminer.high_level except ImportError: # Preserve the error and stack trace for later _dependency_exc_info = sys.exc_info() ACCEPTED_MIME_TYPE_PREFIXES = [ "application/pdf", "application/x-pdf", ] ACCEPTED_FILE_EXTENSIONS = [".pdf"] class PdfConverter(DocumentConverter): """ Converts PDFs to Markdown. Most style information is ignored, so the results are essentially plain-text. """ def accepts( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> bool: mimetype = (stream_info.mimetype or "").lower() extension = (stream_info.extension or "").lower() if extension in ACCEPTED_FILE_EXTENSIONS: return True for prefix in ACCEPTED_MIME_TYPE_PREFIXES: if mimetype.startswith(prefix): return True return False def convert( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> DocumentConverterResult: # Check the dependencies if _dependency_exc_info is not None: raise MissingDependencyException( MISSING_DEPENDENCY_MESSAGE.format( converter=type(self).__name__, extension=".pdf", feature="pdf", ) ) from _dependency_exc_info[ 1 ].with_traceback( # type: ignore[union-attr] _dependency_exc_info[2] ) assert isinstance(file_stream, io.IOBase) # for mypy return DocumentConverterResult( markdown=pdfminer.high_level.extract_text(file_stream), )
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
microsoft/markitdown
https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_ipynb_converter.py
packages/markitdown/src/markitdown/converters/_ipynb_converter.py
from typing import BinaryIO, Any import json from .._base_converter import DocumentConverter, DocumentConverterResult from .._exceptions import FileConversionException from .._stream_info import StreamInfo CANDIDATE_MIME_TYPE_PREFIXES = [ "application/json", ] ACCEPTED_FILE_EXTENSIONS = [".ipynb"] class IpynbConverter(DocumentConverter): """Converts Jupyter Notebook (.ipynb) files to Markdown.""" def accepts( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> bool: mimetype = (stream_info.mimetype or "").lower() extension = (stream_info.extension or "").lower() if extension in ACCEPTED_FILE_EXTENSIONS: return True for prefix in CANDIDATE_MIME_TYPE_PREFIXES: if mimetype.startswith(prefix): # Read further to see if it's a notebook cur_pos = file_stream.tell() try: encoding = stream_info.charset or "utf-8" notebook_content = file_stream.read().decode(encoding) return ( "nbformat" in notebook_content and "nbformat_minor" in notebook_content ) finally: file_stream.seek(cur_pos) return False def convert( self, file_stream: BinaryIO, stream_info: StreamInfo, **kwargs: Any, # Options to pass to the converter ) -> DocumentConverterResult: # Parse and convert the notebook encoding = stream_info.charset or "utf-8" notebook_content = file_stream.read().decode(encoding=encoding) return self._convert(json.loads(notebook_content)) def _convert(self, notebook_content: dict) -> DocumentConverterResult: """Helper function that converts notebook JSON content to Markdown.""" try: md_output = [] title = None for cell in notebook_content.get("cells", []): cell_type = cell.get("cell_type", "") source_lines = cell.get("source", []) if cell_type == "markdown": md_output.append("".join(source_lines)) # Extract the first # heading as title if not already found if title is None: for line in source_lines: if line.startswith("# "): title = line.lstrip("# ").strip() break elif cell_type == "code": # Code cells are wrapped in Markdown code blocks md_output.append(f"```python\n{''.join(source_lines)}\n```") elif cell_type == "raw": md_output.append(f"```\n{''.join(source_lines)}\n```") md_text = "\n\n".join(md_output) # Check for title in notebook metadata title = notebook_content.get("metadata", {}).get("title", title) return DocumentConverterResult( markdown=md_text, title=title, ) except Exception as e: raise FileConversionException( f"Error converting .ipynb file: {str(e)}" ) from e
python
MIT
dde250a456d178fe344fce17ef10d00fe929f680
2026-01-04T14:38:15.496810Z
false
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
106

Models trained or fine-tuned on Shuu12121/github-file-programs-dataset-python