repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
public-apis/public-apis | https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/tests/__init__.py | scripts/tests/__init__.py | # -*- coding: utf-8 -*-
| python | MIT | a58c76cd32ef345da3e4b7252c7b47275e866ae7 | 2026-01-04T14:38:15.124778Z | false |
public-apis/public-apis | https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/tests/test_validate_format.py | scripts/tests/test_validate_format.py | # -*- coding: utf-8 -*-
import unittest
from validate.format import error_message
from validate.format import get_categories_content
from validate.format import check_alphabetical_order
from validate.format import check_title
from validate.format import check_description, max_description_length
from validate.format import check_auth, auth_keys
from validate.format import check_https, https_keys
from validate.format import check_cors, cors_keys
from validate.format import check_entry
from validate.format import check_file_format, min_entries_per_category, num_segments
class TestValidadeFormat(unittest.TestCase):
def test_error_message_return_and_return_type(self):
line_num_unity = 1
line_num_ten = 10
line_num_hundred = 100
line_num_thousand = 1000
msg = 'This is a unit test'
err_msg_unity = error_message(line_num_unity, msg)
err_msg_ten = error_message(line_num_ten, msg)
err_msg_hundred = error_message(line_num_hundred, msg)
err_msg_thousand = error_message(line_num_thousand, msg)
self.assertIsInstance(err_msg_unity, str)
self.assertIsInstance(err_msg_ten, str)
self.assertIsInstance(err_msg_hundred, str)
self.assertIsInstance(err_msg_thousand, str)
self.assertEqual(err_msg_unity, '(L002) This is a unit test')
self.assertEqual(err_msg_ten, '(L011) This is a unit test')
self.assertEqual(err_msg_hundred, '(L101) This is a unit test')
self.assertEqual(err_msg_thousand, '(L1001) This is a unit test')
def test_if_get_categories_content_return_correct_data_of_categories(self):
fake_contents = [
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
result = get_categories_content(fake_contents)
self.assertIsInstance(result, tuple)
categories, category_line_num = result
self.assertIsInstance(categories, dict)
self.assertIsInstance(category_line_num, dict)
expected_result = ({'A': ['AA', 'AB'], 'B': ['BA', 'BB']}, {'A': 0, 'B': 6})
for res, ex_res in zip(result, expected_result):
with self.subTest():
self.assertEqual(res, ex_res)
def test_if_check_alphabetical_order_return_correct_msg_error(self):
correct_lines = [
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
incorrect_lines = [
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
err_msgs_1 = check_alphabetical_order(correct_lines)
err_msgs_2 = check_alphabetical_order(incorrect_lines)
self.assertIsInstance(err_msgs_1, list)
self.assertIsInstance(err_msgs_2, list)
self.assertEqual(len(err_msgs_1), 0)
self.assertEqual(len(err_msgs_2), 2)
expected_err_msgs = [
'(L001) A category is not alphabetical order',
'(L007) B category is not alphabetical order'
]
for err_msg, ex_err_msg in zip(err_msgs_2, expected_err_msgs):
with self.subTest():
self.assertEqual(err_msg, ex_err_msg)
def test_check_title_with_correct_title(self):
raw_title = '[A](https://www.ex.com)'
err_msgs = check_title(0, raw_title)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_title_with_markdown_syntax_incorrect(self):
raw_title = '[A(https://www.ex.com)'
err_msgs = check_title(0, raw_title)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = '(L001) Title syntax should be "[TITLE](LINK)"'
self.assertEqual(err_msg, expected_err_msg)
def test_check_title_with_api_at_the_end_of_the_title(self):
raw_title = '[A API](https://www.ex.com)'
err_msgs = check_title(0, raw_title)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = '(L001) Title should not end with "... API". Every entry is an API here!'
self.assertEqual(err_msg, expected_err_msg)
def test_check_description_with_correct_description(self):
desc = 'This is a fake description'
err_msgs = check_description(0, desc)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_description_with_first_char_is_not_capitalized(self):
desc = 'this is a fake description'
err_msgs = check_description(0, desc)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = '(L001) first character of description is not capitalized'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_description_with_punctuation_in_the_end(self):
base_desc = 'This is a fake description'
punctuation = r"""!"#$%&'*+,-./:;<=>?@[\]^_`{|}~"""
desc_with_punc = [base_desc + punc for punc in punctuation]
for desc in desc_with_punc:
with self.subTest():
err_msgs = check_description(0, desc)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) description should not end with {desc[-1]}'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_description_that_exceeds_the_character_limit(self):
long_desc = 'Desc' * max_description_length
long_desc_length = len(long_desc)
err_msgs = check_description(0, long_desc)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) description should not exceed {max_description_length} characters (currently {long_desc_length})'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_auth_with_valid_auth(self):
auth_valid = [f'`{auth}`' for auth in auth_keys if auth != 'No']
auth_valid.append('No')
for auth in auth_valid:
with self.subTest():
err_msgs = check_auth(0, auth)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_auth_without_backtick(self):
auth_without_backtick = [auth for auth in auth_keys if auth != 'No']
for auth in auth_without_backtick:
with self.subTest():
err_msgs = check_auth(0, auth)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = '(L001) auth value is not enclosed with `backticks`'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_auth_with_invalid_auth(self):
auth_invalid_without_backtick = ['Yes', 'yes', 'no', 'random', 'Unknown']
auth_invalid_with_backtick = ['`Yes`', '`yes`', '`no`', '`random`', '`Unknown`']
for auth in auth_invalid_without_backtick:
with self.subTest():
err_msgs = check_auth(0, auth)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 2)
err_msg_1 = err_msgs[0]
err_msg_2 = err_msgs[1]
expected_err_msg_1 = f'(L001) auth value is not enclosed with `backticks`'
expected_err_msg_2 = f'(L001) {auth} is not a valid Auth option'
self.assertIsInstance(err_msg_1, str)
self.assertIsInstance(err_msg_2, str)
self.assertEqual(err_msg_1, expected_err_msg_1)
self.assertEqual(err_msg_2, expected_err_msg_2)
for auth in auth_invalid_with_backtick:
with self.subTest():
err_msgs = check_auth(0, auth)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) {auth} is not a valid Auth option'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_https_with_valid_https(self):
for https in https_keys:
with self.subTest():
err_msgs = check_https(0, https)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_https_with_invalid_https(self):
invalid_https_keys = ['yes', 'no', 'Unknown', 'https', 'http']
for https in invalid_https_keys:
with self.subTest():
err_msgs = check_https(0, https)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) {https} is not a valid HTTPS option'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_cors_with_valid_cors(self):
for cors in cors_keys:
with self.subTest():
err_msgs = check_cors(0, cors)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_cors_with_invalid_cors(self):
invalid_cors_keys = ['yes', 'no', 'unknown', 'cors']
for cors in invalid_cors_keys:
with self.subTest():
err_msgs = check_cors(0, cors)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
expected_err_msg = f'(L001) {cors} is not a valid CORS option'
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msg, expected_err_msg)
def test_check_entry_with_correct_segments(self):
correct_segments = ['[A](https://www.ex.com)', 'Desc', '`apiKey`', 'Yes', 'Yes']
err_msgs = check_entry(0, correct_segments)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_entry_with_incorrect_segments(self):
incorrect_segments = ['[A API](https://www.ex.com)', 'desc.', 'yes', 'yes', 'yes']
err_msgs = check_entry(0, incorrect_segments)
expected_err_msgs = [
'(L001) Title should not end with "... API". Every entry is an API here!',
'(L001) first character of description is not capitalized',
'(L001) description should not end with .',
'(L001) auth value is not enclosed with `backticks`',
'(L001) yes is not a valid Auth option',
'(L001) yes is not a valid HTTPS option',
'(L001) yes is not a valid CORS option'
]
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 7)
for err_msg in err_msgs:
with self.subTest():
self.assertIsInstance(err_msg, str)
self.assertEqual(err_msgs, expected_err_msgs)
def test_check_file_format_with_correct_format(self):
correct_format = [
'## Index',
'* [A](#a)',
'* [B](#b)',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
err_msgs = check_file_format(lines=correct_format)
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 0)
self.assertEqual(err_msgs, [])
def test_check_file_format_with_category_header_not_added_to_index(self):
incorrect_format = [
'## Index',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
]
err_msgs = check_file_format(lines=incorrect_format)
expected_err_msg = '(L003) category header (A) not added to Index section'
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
self.assertEqual(err_msg, expected_err_msg)
def test_check_file_format_with_category_without_min_entries(self):
incorrect_format = [
'## Index',
'* [A](#a)',
'* [B](#b)',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'',
'### B',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [BA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [BC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |'
]
category_with_err = 'A'
num_in_category = 1
err_msgs = check_file_format(lines=incorrect_format)
expected_err_msg = f'(L005) {category_with_err} category does not have the minimum {min_entries_per_category} entries (only has {num_in_category})'
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
self.assertEqual(err_msg, expected_err_msg)
def test_check_file_format_entry_without_all_necessary_columns(self):
incorrect_format = [
'## Index',
'* [A](#a)',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AB](https://www.ex.com) | Desc | `apiKey` |', # missing https and cors
'| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
]
current_segments_num = 3
err_msgs = check_file_format(lines=incorrect_format)
expected_err_msg = f'(L008) entry does not have all the required columns (have {current_segments_num}, need {num_segments})'
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
self.assertEqual(err_msg, expected_err_msg)
def test_check_file_format_without_1_space_between_the_segments(self):
incorrect_format = [
'## Index',
'* [A](#a)',
'',
'### A',
'API | Description | Auth | HTTPS | CORS |',
'|---|---|---|---|---|',
'| [AA](https://www.ex.com) | Desc |`apiKey`| Yes | Yes |', # space between segment of auth column missing
'| [AB](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
'| [AC](https://www.ex.com) | Desc | `apiKey` | Yes | Yes |',
]
err_msgs = check_file_format(lines=incorrect_format)
expected_err_msg = f'(L007) each segment must start and end with exactly 1 space'
self.assertIsInstance(err_msgs, list)
self.assertEqual(len(err_msgs), 1)
err_msg = err_msgs[0]
self.assertEqual(err_msg, expected_err_msg)
| python | MIT | a58c76cd32ef345da3e4b7252c7b47275e866ae7 | 2026-01-04T14:38:15.124778Z | false |
public-apis/public-apis | https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/tests/test_validate_links.py | scripts/tests/test_validate_links.py | # -*- coding: utf-8 -*-
import unittest
from validate.links import find_links_in_text
from validate.links import check_duplicate_links
from validate.links import fake_user_agent
from validate.links import get_host_from_link
from validate.links import has_cloudflare_protection
class FakeResponse():
def __init__(self, code: int, headers: dict, text: str) -> None:
self.status_code = code
self.headers = headers
self.text = text
class TestValidateLinks(unittest.TestCase):
def setUp(self):
self.duplicate_links = [
'https://www.example.com',
'https://www.example.com',
'https://www.example.com',
'https://www.anotherexample.com',
]
self.no_duplicate_links = [
'https://www.firstexample.com',
'https://www.secondexample.com',
'https://www.anotherexample.com',
]
self.code_200 = 200
self.code_403 = 403
self.code_503 = 503
self.cloudflare_headers = {'Server': 'cloudflare'}
self.no_cloudflare_headers = {'Server': 'google'}
self.text_with_cloudflare_flags = '403 Forbidden Cloudflare We are checking your browser...'
self.text_without_cloudflare_flags = 'Lorem Ipsum'
def test_find_link_in_text(self):
text = """
# this is valid
http://example.com?param1=1¶m2=2#anchor
https://www.example.com?param1=1¶m2=2#anchor
https://www.example.com.br
https://www.example.com.gov.br
[Example](https://www.example.com?param1=1¶m2=2#anchor)
lorem ipsum https://www.example.com?param1=1¶m2=2#anchor
https://www.example.com?param1=1¶m2=2#anchor lorem ipsum
# this not is valid
example.com
https:example.com
https:/example.com
https//example.com
https//.com
"""
links = find_links_in_text(text)
self.assertIsInstance(links, list)
self.assertEqual(len(links), 7)
for link in links:
with self.subTest():
self.assertIsInstance(link, str)
def test_find_link_in_text_with_invalid_argument(self):
with self.assertRaises(TypeError):
find_links_in_text()
find_links_in_text(1)
find_links_in_text(True)
def test_if_check_duplicate_links_has_the_correct_return(self):
result_1 = check_duplicate_links(self.duplicate_links)
result_2 = check_duplicate_links(self.no_duplicate_links)
self.assertIsInstance(result_1, tuple)
self.assertIsInstance(result_2, tuple)
has_duplicate_links, links = result_1
no_duplicate_links, no_links = result_2
self.assertTrue(has_duplicate_links)
self.assertFalse(no_duplicate_links)
self.assertIsInstance(links, list)
self.assertIsInstance(no_links, list)
self.assertEqual(len(links), 2)
self.assertEqual(len(no_links), 0)
def test_if_fake_user_agent_has_a_str_as_return(self):
user_agent = fake_user_agent()
self.assertIsInstance(user_agent, str)
def test_get_host_from_link(self):
links = [
'example.com',
'https://example.com',
'https://www.example.com',
'https://www.example.com.br',
'https://www.example.com/route',
'https://www.example.com?p=1&q=2',
'https://www.example.com#anchor'
]
for link in links:
host = get_host_from_link(link)
with self.subTest():
self.assertIsInstance(host, str)
self.assertNotIn('://', host)
self.assertNotIn('/', host)
self.assertNotIn('?', host)
self.assertNotIn('#', host)
with self.assertRaises(TypeError):
get_host_from_link()
def test_has_cloudflare_protection_with_code_403_and_503_in_response(self):
resp_with_cloudflare_protection_code_403 = FakeResponse(
code=self.code_403,
headers=self.cloudflare_headers,
text=self.text_with_cloudflare_flags
)
resp_with_cloudflare_protection_code_503 = FakeResponse(
code=self.code_503,
headers=self.cloudflare_headers,
text=self.text_with_cloudflare_flags
)
result1 = has_cloudflare_protection(resp_with_cloudflare_protection_code_403)
result2 = has_cloudflare_protection(resp_with_cloudflare_protection_code_503)
self.assertTrue(result1)
self.assertTrue(result2)
def test_has_cloudflare_protection_when_there_is_no_protection(self):
resp_without_cloudflare_protection1 = FakeResponse(
code=self.code_200,
headers=self.no_cloudflare_headers,
text=self.text_without_cloudflare_flags
)
resp_without_cloudflare_protection2 = FakeResponse(
code=self.code_403,
headers=self.no_cloudflare_headers,
text=self.text_without_cloudflare_flags
)
resp_without_cloudflare_protection3 = FakeResponse(
code=self.code_503,
headers=self.no_cloudflare_headers,
text=self.text_without_cloudflare_flags
)
result1 = has_cloudflare_protection(resp_without_cloudflare_protection1)
result2 = has_cloudflare_protection(resp_without_cloudflare_protection2)
result3 = has_cloudflare_protection(resp_without_cloudflare_protection3)
self.assertFalse(result1)
self.assertFalse(result2)
self.assertFalse(result3)
| python | MIT | a58c76cd32ef345da3e4b7252c7b47275e866ae7 | 2026-01-04T14:38:15.124778Z | false |
public-apis/public-apis | https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/validate/format.py | scripts/validate/format.py | # -*- coding: utf-8 -*-
import re
import sys
from string import punctuation
from typing import List, Tuple, Dict
# Temporary replacement
# The descriptions that contain () at the end must adapt to the new policy later
punctuation = punctuation.replace('()', '')
anchor = '###'
auth_keys = ['apiKey', 'OAuth', 'X-Mashape-Key', 'User-Agent', 'No']
https_keys = ['Yes', 'No']
cors_keys = ['Yes', 'No', 'Unknown']
index_title = 0
index_desc = 1
index_auth = 2
index_https = 3
index_cors = 4
num_segments = 5
min_entries_per_category = 3
max_description_length = 100
anchor_re = re.compile(anchor + '\s(.+)')
category_title_in_index_re = re.compile('\*\s\[(.*)\]')
link_re = re.compile('\[(.+)\]\((http.*)\)')
# Type aliases
APIList = List[str]
Categories = Dict[str, APIList]
CategoriesLineNumber = Dict[str, int]
def error_message(line_number: int, message: str) -> str:
line = line_number + 1
return f'(L{line:03d}) {message}'
def get_categories_content(contents: List[str]) -> Tuple[Categories, CategoriesLineNumber]:
categories = {}
category_line_num = {}
for line_num, line_content in enumerate(contents):
if line_content.startswith(anchor):
category = line_content.split(anchor)[1].strip()
categories[category] = []
category_line_num[category] = line_num
continue
if not line_content.startswith('|') or line_content.startswith('|---'):
continue
raw_title = [
raw_content.strip() for raw_content in line_content.split('|')[1:-1]
][0]
title_match = link_re.match(raw_title)
if title_match:
title = title_match.group(1).upper()
categories[category].append(title)
return (categories, category_line_num)
def check_alphabetical_order(lines: List[str]) -> List[str]:
err_msgs = []
categories, category_line_num = get_categories_content(contents=lines)
for category, api_list in categories.items():
if sorted(api_list) != api_list:
err_msg = error_message(
category_line_num[category],
f'{category} category is not alphabetical order'
)
err_msgs.append(err_msg)
return err_msgs
def check_title(line_num: int, raw_title: str) -> List[str]:
err_msgs = []
title_match = link_re.match(raw_title)
# url should be wrapped in "[TITLE](LINK)" Markdown syntax
if not title_match:
err_msg = error_message(line_num, 'Title syntax should be "[TITLE](LINK)"')
err_msgs.append(err_msg)
else:
# do not allow "... API" in the entry title
title = title_match.group(1)
if title.upper().endswith(' API'):
err_msg = error_message(line_num, 'Title should not end with "... API". Every entry is an API here!')
err_msgs.append(err_msg)
return err_msgs
def check_description(line_num: int, description: str) -> List[str]:
err_msgs = []
first_char = description[0]
if first_char.upper() != first_char:
err_msg = error_message(line_num, 'first character of description is not capitalized')
err_msgs.append(err_msg)
last_char = description[-1]
if last_char in punctuation:
err_msg = error_message(line_num, f'description should not end with {last_char}')
err_msgs.append(err_msg)
desc_length = len(description)
if desc_length > max_description_length:
err_msg = error_message(line_num, f'description should not exceed {max_description_length} characters (currently {desc_length})')
err_msgs.append(err_msg)
return err_msgs
def check_auth(line_num: int, auth: str) -> List[str]:
err_msgs = []
backtick = '`'
if auth != 'No' and (not auth.startswith(backtick) or not auth.endswith(backtick)):
err_msg = error_message(line_num, 'auth value is not enclosed with `backticks`')
err_msgs.append(err_msg)
if auth.replace(backtick, '') not in auth_keys:
err_msg = error_message(line_num, f'{auth} is not a valid Auth option')
err_msgs.append(err_msg)
return err_msgs
def check_https(line_num: int, https: str) -> List[str]:
err_msgs = []
if https not in https_keys:
err_msg = error_message(line_num, f'{https} is not a valid HTTPS option')
err_msgs.append(err_msg)
return err_msgs
def check_cors(line_num: int, cors: str) -> List[str]:
err_msgs = []
if cors not in cors_keys:
err_msg = error_message(line_num, f'{cors} is not a valid CORS option')
err_msgs.append(err_msg)
return err_msgs
def check_entry(line_num: int, segments: List[str]) -> List[str]:
raw_title = segments[index_title]
description = segments[index_desc]
auth = segments[index_auth]
https = segments[index_https]
cors = segments[index_cors]
title_err_msgs = check_title(line_num, raw_title)
desc_err_msgs = check_description(line_num, description)
auth_err_msgs = check_auth(line_num, auth)
https_err_msgs = check_https(line_num, https)
cors_err_msgs = check_cors(line_num, cors)
err_msgs = [
*title_err_msgs,
*desc_err_msgs,
*auth_err_msgs,
*https_err_msgs,
*cors_err_msgs
]
return err_msgs
def check_file_format(lines: List[str]) -> List[str]:
err_msgs = []
category_title_in_index = []
alphabetical_err_msgs = check_alphabetical_order(lines)
err_msgs.extend(alphabetical_err_msgs)
num_in_category = min_entries_per_category + 1
category = ''
category_line = 0
for line_num, line_content in enumerate(lines):
category_title_match = category_title_in_index_re.match(line_content)
if category_title_match:
category_title_in_index.append(category_title_match.group(1))
# check each category for the minimum number of entries
if line_content.startswith(anchor):
category_match = anchor_re.match(line_content)
if category_match:
if category_match.group(1) not in category_title_in_index:
err_msg = error_message(line_num, f'category header ({category_match.group(1)}) not added to Index section')
err_msgs.append(err_msg)
else:
err_msg = error_message(line_num, 'category header is not formatted correctly')
err_msgs.append(err_msg)
if num_in_category < min_entries_per_category:
err_msg = error_message(category_line, f'{category} category does not have the minimum {min_entries_per_category} entries (only has {num_in_category})')
err_msgs.append(err_msg)
category = line_content.split(' ')[1]
category_line = line_num
num_in_category = 0
continue
# skips lines that we do not care about
if not line_content.startswith('|') or line_content.startswith('|---'):
continue
num_in_category += 1
segments = line_content.split('|')[1:-1]
if len(segments) < num_segments:
err_msg = error_message(line_num, f'entry does not have all the required columns (have {len(segments)}, need {num_segments})')
err_msgs.append(err_msg)
continue
for segment in segments:
# every line segment should start and end with exactly 1 space
if len(segment) - len(segment.lstrip()) != 1 or len(segment) - len(segment.rstrip()) != 1:
err_msg = error_message(line_num, 'each segment must start and end with exactly 1 space')
err_msgs.append(err_msg)
segments = [segment.strip() for segment in segments]
entry_err_msgs = check_entry(line_num, segments)
err_msgs.extend(entry_err_msgs)
return err_msgs
def main(filename: str) -> None:
with open(filename, mode='r', encoding='utf-8') as file:
lines = list(line.rstrip() for line in file)
file_format_err_msgs = check_file_format(lines)
if file_format_err_msgs:
for err_msg in file_format_err_msgs:
print(err_msg)
sys.exit(1)
if __name__ == '__main__':
num_args = len(sys.argv)
if num_args < 2:
print('No .md file passed (file should contain Markdown table syntax)')
sys.exit(1)
filename = sys.argv[1]
main(filename)
| python | MIT | a58c76cd32ef345da3e4b7252c7b47275e866ae7 | 2026-01-04T14:38:15.124778Z | false |
public-apis/public-apis | https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/validate/links.py | scripts/validate/links.py | # -*- coding: utf-8 -*-
import re
import sys
import random
from typing import List, Tuple
import requests
from requests.models import Response
def find_links_in_text(text: str) -> List[str]:
"""Find links in a text and return a list of URLs."""
link_pattern = re.compile(r'((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'\".,<>?«»ββββ]))')
raw_links = re.findall(link_pattern, text)
links = [
str(raw_link[0]) for raw_link in raw_links
]
return links
def find_links_in_file(filename: str) -> List[str]:
"""Find links in a file and return a list of URLs from text file."""
with open(filename, mode='r', encoding='utf-8') as file:
readme = file.read()
index_section = readme.find('## Index')
if index_section == -1:
index_section = 0
content = readme[index_section:]
links = find_links_in_text(content)
return links
def check_duplicate_links(links: List[str]) -> Tuple[bool, List]:
"""Check for duplicated links.
Returns a tuple with True or False and duplicate list.
"""
seen = {}
duplicates = []
has_duplicate = False
for link in links:
link = link.rstrip('/')
if link not in seen:
seen[link] = 1
else:
if seen[link] == 1:
duplicates.append(link)
if duplicates:
has_duplicate = True
return (has_duplicate, duplicates)
def fake_user_agent() -> str:
"""Faking user agent as some hosting services block not-whitelisted UA."""
user_agents = [
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko)',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
]
return random.choice(user_agents)
def get_host_from_link(link: str) -> str:
host = link.split('://', 1)[1] if '://' in link else link
# Remove routes, arguments and anchors
if '/' in host:
host = host.split('/', 1)[0]
elif '?' in host:
host = host.split('?', 1)[0]
elif '#' in host:
host = host.split('#', 1)[0]
return host
def has_cloudflare_protection(resp: Response) -> bool:
"""Checks if there is any cloudflare protection in the response.
Cloudflare implements multiple network protections on a given link,
this script tries to detect if any of them exist in the response from request.
Common protections have the following HTTP code as a response:
- 403: When host header is missing or incorrect (and more)
- 503: When DDOS protection exists
See more about it at:
- https://support.cloudflare.com/hc/en-us/articles/115003014512-4xx-Client-Error
- https://support.cloudflare.com/hc/en-us/articles/115003011431-Troubleshooting-Cloudflare-5XX-errors
- https://www.cloudflare.com/ddos/
- https://superuser.com/a/888526
Discussions in issues and pull requests:
- https://github.com/public-apis/public-apis/pull/2409
- https://github.com/public-apis/public-apis/issues/2960
"""
code = resp.status_code
server = resp.headers.get('Server') or resp.headers.get('server')
cloudflare_flags = [
'403 Forbidden',
'cloudflare',
'Cloudflare',
'Security check',
'Please Wait... | Cloudflare',
'We are checking your browser...',
'Please stand by, while we are checking your browser...',
'Checking your browser before accessing',
'This process is automatic.',
'Your browser will redirect to your requested content shortly.',
'Please allow up to 5 seconds',
'DDoS protection by',
'Ray ID:',
'Cloudflare Ray ID:',
'_cf_chl',
'_cf_chl_opt',
'__cf_chl_rt_tk',
'cf-spinner-please-wait',
'cf-spinner-redirecting'
]
if code in [403, 503] and server == 'cloudflare':
html = resp.text
flags_found = [flag in html for flag in cloudflare_flags]
any_flag_found = any(flags_found)
if any_flag_found:
return True
return False
def check_if_link_is_working(link: str) -> Tuple[bool, str]:
"""Checks if a link is working.
If an error is identified when the request for the link occurs,
the return will be a tuple with the first value True and the second
value a string containing the error message.
If no errors are identified, the return will be a tuple with the
first value False and the second an empty string.
"""
has_error = False
error_message = ''
try:
resp = requests.get(link, timeout=25, headers={
'User-Agent': fake_user_agent(),
'host': get_host_from_link(link)
})
code = resp.status_code
if code >= 400 and not has_cloudflare_protection(resp):
has_error = True
error_message = f'ERR:CLT: {code} : {link}'
except requests.exceptions.SSLError as error:
has_error = True
error_message = f'ERR:SSL: {error} : {link}'
except requests.exceptions.ConnectionError as error:
has_error = True
error_message = f'ERR:CNT: {error} : {link}'
except (TimeoutError, requests.exceptions.ConnectTimeout):
has_error = True
error_message = f'ERR:TMO: {link}'
except requests.exceptions.TooManyRedirects as error:
has_error = True
error_message = f'ERR:TMR: {error} : {link}'
except (Exception, requests.exceptions.RequestException) as error:
has_error = True
error_message = f'ERR:UKN: {error} : {link}'
return (has_error, error_message)
def check_if_list_of_links_are_working(list_of_links: List[str]) -> List[str]:
error_messages = []
for link in list_of_links:
has_error, error_message = check_if_link_is_working(link)
if has_error:
error_messages.append(error_message)
return error_messages
def start_duplicate_links_checker(links: List[str]) -> None:
print('Checking for duplicate links...')
has_duplicate_link, duplicates_links = check_duplicate_links(links)
if has_duplicate_link:
print(f'Found duplicate links:')
for duplicate_link in duplicates_links:
print(duplicate_link)
sys.exit(1)
else:
print('No duplicate links.')
def start_links_working_checker(links: List[str]) -> None:
print(f'Checking if {len(links)} links are working...')
errors = check_if_list_of_links_are_working(links)
if errors:
num_errors = len(errors)
print(f'Apparently {num_errors} links are not working properly. See in:')
for error_message in errors:
print(error_message)
sys.exit(1)
def main(filename: str, only_duplicate_links_checker: bool) -> None:
links = find_links_in_file(filename)
start_duplicate_links_checker(links)
if not only_duplicate_links_checker:
start_links_working_checker(links)
if __name__ == '__main__':
num_args = len(sys.argv)
only_duplicate_links_checker = False
if num_args < 2:
print('No .md file passed')
sys.exit(1)
elif num_args == 3:
third_arg = sys.argv[2].lower()
if third_arg == '-odlc' or third_arg == '--only_duplicate_links_checker':
only_duplicate_links_checker = True
else:
print(f'Third invalid argument. Usage: python {__file__} [-odlc | --only_duplicate_links_checker]')
sys.exit(1)
filename = sys.argv[1]
main(filename, only_duplicate_links_checker)
| python | MIT | a58c76cd32ef345da3e4b7252c7b47275e866ae7 | 2026-01-04T14:38:15.124778Z | false |
public-apis/public-apis | https://github.com/public-apis/public-apis/blob/a58c76cd32ef345da3e4b7252c7b47275e866ae7/scripts/validate/__init__.py | scripts/validate/__init__.py | # -*- coding: utf-8 -*-
from validate import format
from validate import links
| python | MIT | a58c76cd32ef345da3e4b7252c7b47275e866ae7 | 2026-01-04T14:38:15.124778Z | false |
deepseek-ai/DeepSeek-V3 | https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/convert.py | inference/convert.py | import os
import shutil
from argparse import ArgumentParser
from glob import glob
from tqdm import tqdm, trange
import torch
from safetensors.torch import safe_open, save_file
mapping = {
"embed_tokens": ("embed", 0),
"input_layernorm": ("attn_norm", None),
"post_attention_layernorm": ("ffn_norm", None),
"q_proj": ("wq", 0),
"q_a_proj": ("wq_a", None),
"q_a_layernorm": ("q_norm", None),
"q_b_proj": ("wq_b", 0),
"kv_a_proj_with_mqa": ("wkv_a", None),
"kv_a_layernorm": ("kv_norm", None),
"kv_b_proj": ("wkv_b", 0),
"o_proj": ("wo", 1),
"gate": ("gate", None),
"gate_proj": ("w1", 0),
"down_proj": ("w2", 1),
"up_proj": ("w3", 0),
"norm": ("norm", None),
"lm_head": ("head", 0),
"scale": ("scale", None),
}
def main(hf_ckpt_path, save_path, n_experts, mp):
"""
Converts and saves model checkpoint files into a specified format.
Args:
hf_ckpt_path (str): Path to the directory containing the input checkpoint files.
save_path (str): Path to the directory where the converted checkpoint files will be saved.
n_experts (int): Total number of experts in the model.
mp (int): Model parallelism factor.
Returns:
None
"""
torch.set_num_threads(8)
n_local_experts = n_experts // mp
state_dicts = [{} for _ in range(mp)]
for file_path in tqdm(glob(os.path.join(hf_ckpt_path, "*.safetensors"))):
with safe_open(file_path, framework="pt", device="cpu") as f:
for name in f.keys():
if "model.layers.61" in name:
continue
param: torch.Tensor = f.get_tensor(name)
if name.startswith("model."):
name = name[len("model."):]
name = name.replace("self_attn", "attn")
name = name.replace("mlp", "ffn")
name = name.replace("weight_scale_inv", "scale")
name = name.replace("e_score_correction_bias", "bias")
key = name.split(".")[-2]
assert key in mapping, f"Key {key} not found in mapping"
new_key, dim = mapping[key]
name = name.replace(key, new_key)
for i in range(mp):
new_param = param
if "experts" in name and "shared_experts" not in name:
idx = int(name.split(".")[-3])
if idx < i * n_local_experts or idx >= (i + 1) * n_local_experts:
continue
elif dim is not None:
assert param.size(dim) % mp == 0, f"Dimension {dim} must be divisible by {mp}"
shard_size = param.size(dim) // mp
new_param = param.narrow(dim, i * shard_size, shard_size).contiguous()
state_dicts[i][name] = new_param
os.makedirs(save_path, exist_ok=True)
for i in trange(mp):
save_file(state_dicts[i], os.path.join(save_path, f"model{i}-mp{mp}.safetensors"))
for file_path in glob(os.path.join(hf_ckpt_path, "*token*")):
new_file_path = os.path.join(save_path, os.path.basename(file_path))
shutil.copyfile(file_path, new_file_path)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--hf-ckpt-path", type=str, required=True)
parser.add_argument("--save-path", type=str, required=True)
parser.add_argument("--n-experts", type=int, required=True)
parser.add_argument("--model-parallel", type=int, required=True)
args = parser.parse_args()
assert args.n_experts % args.model_parallel == 0, "Number of experts must be divisible by model parallelism"
main(args.hf_ckpt_path, args.save_path, args.n_experts, args.model_parallel)
| python | MIT | 9b4e9788e4a3a731f7567338ed15d3ec549ce03b | 2026-01-04T14:38:15.450976Z | false |
deepseek-ai/DeepSeek-V3 | https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/model.py | inference/model.py | import math
from dataclasses import dataclass
from typing import Tuple, Optional, Literal
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
from kernel import act_quant, weight_dequant, fp8_gemm
world_size = 1
rank = 0
block_size = 128
gemm_impl: Literal["bf16", "fp8"] = "bf16"
attn_impl: Literal["naive", "absorb"] = "absorb"
@dataclass
class ModelArgs:
"""
Data class for defining model arguments and hyperparameters.
Attributes:
max_batch_size (int): Maximum batch size.
max_seq_len (int): Maximum sequence length.
dtype (Literal["bf16", "fp8"]): Data type for computations.
scale_fmt (Optional[str]): Format for quantization scale.
vocab_size (int): Vocabulary size.
dim (int): Model dimension.
inter_dim (int): Intermediate dimension for MLP layers.
moe_inter_dim (int): Intermediate dimension for MoE layers.
n_layers (int): Number of transformer layers.
n_dense_layers (int): Number of dense layers in the model.
n_heads (int): Number of attention heads.
n_routed_experts (int): Number of routed experts for MoE layers.
n_shared_experts (int): Number of shared experts for MoE layers.
n_activated_experts (int): Number of activated experts in MoE layers.
n_expert_groups (int): Number of expert groups.
n_limited_groups (int): Number of limited groups for MoE routing.
score_func (Literal["softmax", "sigmoid"]): Scoring function for MoE routing.
route_scale (float): Scaling factor for routing scores.
q_lora_rank (int): LoRA rank for query projections.
kv_lora_rank (int): LoRA rank for key-value projections.
qk_nope_head_dim (int): Dimension for query-key projections without positional embeddings.
qk_rope_head_dim (int): Dimension for query-key projections with rotary embeddings.
v_head_dim (int): Dimension for value projections.
original_seq_len (int): Original sequence length.
rope_theta (float): Base for rotary positional encoding.
rope_factor (float): Scaling factor for extended sequence lengths.
beta_fast (int): Fast beta correction factor.
beta_slow (int): Slow beta correction factor.
mscale (float): Scaling factor for extended attention.
"""
max_batch_size: int = 8
max_seq_len: int = 4096 * 4
dtype: Literal["bf16", "fp8"] = "bf16"
scale_fmt: Optional[str] = None
vocab_size: int = 102400
dim: int = 2048
inter_dim: int = 10944
moe_inter_dim: int = 1408
n_layers: int = 27
n_dense_layers: int = 1
n_heads: int = 16
# moe
n_routed_experts: int = 64
n_shared_experts: int = 2
n_activated_experts: int = 6
n_expert_groups: int = 1
n_limited_groups: int = 1
score_func: Literal["softmax", "sigmoid"] = "softmax"
route_scale: float = 1.
# mla
q_lora_rank: int = 0
kv_lora_rank: int = 512
qk_nope_head_dim: int = 128
qk_rope_head_dim: int = 64
v_head_dim: int = 128
# yarn
original_seq_len: int = 4096
rope_theta: float = 10000.0
rope_factor: float = 40
beta_fast: int = 32
beta_slow: int = 1
mscale: float = 1.
class ParallelEmbedding(nn.Module):
"""
Embedding layer with parallelism support across distributed processes.
Args:
vocab_size (int): Vocabulary size.
dim (int): Embedding dimension.
"""
def __init__(self, vocab_size: int, dim: int):
super().__init__()
self.vocab_size = vocab_size
self.dim = dim
assert vocab_size % world_size == 0, f"Vocabulary size must be divisible by world size (world_size={world_size})"
self.part_vocab_size = (vocab_size // world_size)
self.vocab_start_idx = rank * self.part_vocab_size
self.vocab_end_idx = self.vocab_start_idx + self.part_vocab_size
self.weight = nn.Parameter(torch.empty(self.part_vocab_size, self.dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for parallel embedding layer.
Args:
x (torch.Tensor): Input tensor containing token indices.
Returns:
torch.Tensor: Embedded representations.
Raises:
ValueError: If `world_size` is not defined.
"""
if world_size > 1:
mask = (x < self.vocab_start_idx) | (x >= self.vocab_end_idx)
x = x - self.vocab_start_idx
x[mask] = 0
y = F.embedding(x, self.weight)
if world_size > 1:
y[mask] = 0
dist.all_reduce(y)
return y
def linear(x: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, scale_fmt: Optional[str] = None) -> torch.Tensor:
"""
Applies a linear transformation to the incoming data: y = xA^T + b.
This function supports specialized implementations based on quantization
and tensor formats.
Args:
x (torch.Tensor): The input tensor.
weight (torch.Tensor): The weight tensor. It may be quantized and
requires dequantization for certain cases.
bias (Optional[torch.Tensor]): The bias tensor to be added. Default is None.
Returns:
torch.Tensor: The result of the linear transformation, which may involve
quantization-aware computations depending on the input parameters.
Notes:
- If `weight` is quantized (e.g., `element_size() == 1`), a dequantized version
is used for computation.
- If `gemm_impl == "bf16"`, dequantization and a `bf16` GEMM operation are applied.
- For other cases, the function applies quantization to `x` and uses `fp8_gemm` for computation.
"""
if weight.element_size() > 1:
return F.linear(x, weight, bias)
elif gemm_impl == "bf16":
weight = weight_dequant(weight, weight.scale)
return F.linear(x, weight, bias)
else:
x, scale = act_quant(x, block_size, scale_fmt)
y = fp8_gemm(x, scale, weight, weight.scale)
if bias is not None:
y += bias
return y
class Linear(nn.Module):
"""
Custom linear layer with support for quantized weights and optional bias.
Args:
in_features (int): Number of input features.
out_features (int): Number of output features.
bias (bool): Whether to include a bias term. Defaults to False.
dtype (optional): Data type for the layer. Defaults to `torch.bfloat16`.
"""
dtype = torch.bfloat16
scale_fmt: Optional[str] = None
def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.empty(out_features, in_features, dtype=dtype or Linear.dtype))
if self.weight.element_size() == 1:
scale_out_features = (out_features + block_size - 1) // block_size
scale_in_features = (in_features + block_size - 1) // block_size
self.weight.scale = self.scale = nn.Parameter(torch.empty(scale_out_features, scale_in_features, dtype=torch.float32))
else:
self.register_parameter("scale", None)
if bias:
self.bias = nn.Parameter(torch.empty(out_features))
else:
self.register_parameter("bias", None)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for the custom linear layer.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Transformed tensor after linear computation.
"""
return linear(x, self.weight, self.bias, self.scale_fmt)
class ColumnParallelLinear(Linear):
"""
Linear layer with column parallelism, splitting output features across distributed processes.
Args:
in_features (int): Number of input features.
out_features (int): Total number of output features.
bias (bool): Whether to include a bias term. Defaults to False.
dtype (optional): Data type for the layer. Defaults to `torch.bfloat16`.
"""
def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
assert out_features % world_size == 0, f"Output features must be divisible by world size (world_size={world_size})"
self.part_out_features = out_features // world_size
super().__init__(in_features, self.part_out_features, bias, dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for column parallel linear layer.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Transformed tensor with column-parallel computation.
"""
y = linear(x, self.weight, self.bias)
return y
class RowParallelLinear(Linear):
"""
Linear layer with row parallelism, splitting input features across distributed processes.
Args:
in_features (int): Total number of input features.
out_features (int): Number of output features.
bias (bool): Whether to include a bias term. Defaults to False.
dtype (optional): Data type for the layer. Defaults to `torch.bfloat16`.
"""
def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
assert in_features % world_size == 0, f"Input features must be divisible by world size (world_size={world_size})"
self.part_in_features = in_features // world_size
super().__init__(self.part_in_features, out_features, bias, dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for row parallel linear layer.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Transformed tensor with row-parallel computation.
"""
y = linear(x, self.weight)
if world_size > 1:
dist.all_reduce(y)
if self.bias is not None:
y += self.bias
return y
class RMSNorm(nn.Module):
"""
Root Mean Square Layer Normalization (RMSNorm).
Args:
dim (int): Dimension of the input tensor.
eps (float): Epsilon value for numerical stability. Defaults to 1e-6.
"""
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.dim = dim
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def forward(self, x: torch.Tensor):
"""
Forward pass for RMSNorm.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Normalized tensor with the same shape as input.
"""
return F.rms_norm(x, (self.dim,), self.weight, self.eps)
def precompute_freqs_cis(args: ModelArgs) -> torch.Tensor:
"""
Precomputes frequency-based complex exponential values for rotary positional embeddings.
Args:
args (ModelArgs): Model arguments containing positional embedding parameters.
Returns:
torch.Tensor: Precomputed complex exponential values for positional embeddings.
"""
dim = args.qk_rope_head_dim
seqlen = args.max_seq_len
beta_fast = args.beta_fast
beta_slow = args.beta_slow
base = args.rope_theta
factor = args.rope_factor
def find_correction_dim(num_rotations, dim, base, max_seq_len):
"""
Computes the correction dimension for a given number of rotations in the rotary positional embedding.
Args:
num_rotations (float): Number of rotations to compute the correction for.
dim (int): Dimensionality of the embedding space.
base (float): Base value for the exponential computation.
max_seq_len (int): Maximum sequence length.
Returns:
float: The correction dimension based on the input parameters.
"""
return dim * math.log(max_seq_len / (num_rotations * 2 * math.pi)) / (2 * math.log(base))
def find_correction_range(low_rot, high_rot, dim, base, max_seq_len):
"""
Computes the range of correction dimensions for rotary positional embeddings.
Args:
low_rot (float): Lower bound for the number of rotations.
high_rot (float): Upper bound for the number of rotations.
dim (int): Dimensionality of the embedding space.
base (float): Base value for the exponential computation.
max_seq_len (int): Maximum sequence length.
Returns:
Tuple[int, int]: The range of correction dimensions (low, high), clamped to valid indices.
"""
low = math.floor(find_correction_dim(low_rot, dim, base, max_seq_len))
high = math.ceil(find_correction_dim(high_rot, dim, base, max_seq_len))
return max(low, 0), min(high, dim-1)
def linear_ramp_factor(min, max, dim):
"""
Computes a linear ramp function used to smooth values between a minimum and maximum range.
Args:
min (float): Minimum value for the ramp function.
max (float): Maximum value for the ramp function.
dim (int): Dimensionality of the ramp tensor.
Returns:
torch.Tensor: A tensor of shape (dim,) with values linearly interpolated between 0 and 1,
clamped to the range [0, 1].
"""
if min == max:
max += 0.001
linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
ramp_func = torch.clamp(linear_func, 0, 1)
return ramp_func
freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
if seqlen > args.original_seq_len:
low, high = find_correction_range(beta_fast, beta_slow, dim, base, args.original_seq_len)
smooth = 1 - linear_ramp_factor(low, high, dim // 2)
freqs = freqs / factor * (1 - smooth) + freqs * smooth
t = torch.arange(seqlen)
freqs = torch.outer(t, freqs)
freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
return freqs_cis
def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor:
"""
Applies rotary positional embeddings to the input tensor.
Args:
x (torch.Tensor): Input tensor with positional embeddings to be applied.
freqs_cis (torch.Tensor): Precomputed complex exponential values for positional embeddings.
Returns:
torch.Tensor: Tensor with rotary embeddings applied.
"""
dtype = x.dtype
x = torch.view_as_complex(x.float().view(*x.shape[:-1], -1, 2))
freqs_cis = freqs_cis.view(1, x.size(1), 1, x.size(-1))
y = torch.view_as_real(x * freqs_cis).flatten(3)
return y.to(dtype)
class MLA(nn.Module):
"""
Multi-Head Latent Attention (MLA) Layer.
Attributes:
dim (int): Dimensionality of the input features.
n_heads (int): Number of attention heads.
n_local_heads (int): Number of local attention heads for distributed systems.
q_lora_rank (int): Rank for low-rank query projection.
kv_lora_rank (int): Rank for low-rank key/value projection.
qk_nope_head_dim (int): Dimensionality of non-positional query/key projections.
qk_rope_head_dim (int): Dimensionality of rotary-positional query/key projections.
qk_head_dim (int): Total dimensionality of query/key projections.
v_head_dim (int): Dimensionality of value projections.
softmax_scale (float): Scaling factor for softmax in attention computation.
"""
def __init__(self, args: ModelArgs):
super().__init__()
self.dim = args.dim
self.n_heads = args.n_heads
self.n_local_heads = args.n_heads // world_size
self.q_lora_rank = args.q_lora_rank
self.kv_lora_rank = args.kv_lora_rank
self.qk_nope_head_dim = args.qk_nope_head_dim
self.qk_rope_head_dim = args.qk_rope_head_dim
self.qk_head_dim = args.qk_nope_head_dim + args.qk_rope_head_dim
self.v_head_dim = args.v_head_dim
if self.q_lora_rank == 0:
self.wq = ColumnParallelLinear(self.dim, self.n_heads * self.qk_head_dim)
else:
self.wq_a = Linear(self.dim, self.q_lora_rank)
self.q_norm = RMSNorm(self.q_lora_rank)
self.wq_b = ColumnParallelLinear(self.q_lora_rank, self.n_heads * self.qk_head_dim)
self.wkv_a = Linear(self.dim, self.kv_lora_rank + self.qk_rope_head_dim)
self.kv_norm = RMSNorm(self.kv_lora_rank)
self.wkv_b = ColumnParallelLinear(self.kv_lora_rank, self.n_heads * (self.qk_nope_head_dim + self.v_head_dim))
self.wo = RowParallelLinear(self.n_heads * self.v_head_dim, self.dim)
self.softmax_scale = self.qk_head_dim ** -0.5
if args.max_seq_len > args.original_seq_len:
mscale = 0.1 * args.mscale * math.log(args.rope_factor) + 1.0
self.softmax_scale = self.softmax_scale * mscale * mscale
if attn_impl == "naive":
self.register_buffer("k_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.qk_head_dim), persistent=False)
self.register_buffer("v_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.v_head_dim), persistent=False)
else:
self.register_buffer("kv_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.kv_lora_rank), persistent=False)
self.register_buffer("pe_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.qk_rope_head_dim), persistent=False)
def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]):
"""
Forward pass for the Multi-Head Latent Attention (MLA) Layer.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, seq_len, dim).
start_pos (int): Starting position in the sequence for caching.
freqs_cis (torch.Tensor): Precomputed complex exponential values for rotary embeddings.
mask (Optional[torch.Tensor]): Mask tensor to exclude certain positions from attention.
Returns:
torch.Tensor: Output tensor with the same shape as the input.
"""
bsz, seqlen, _ = x.size()
end_pos = start_pos + seqlen
if self.q_lora_rank == 0:
q = self.wq(x)
else:
q = self.wq_b(self.q_norm(self.wq_a(x)))
q = q.view(bsz, seqlen, self.n_local_heads, self.qk_head_dim)
q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
q_pe = apply_rotary_emb(q_pe, freqs_cis)
kv = self.wkv_a(x)
kv, k_pe = torch.split(kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
k_pe = apply_rotary_emb(k_pe.unsqueeze(2), freqs_cis)
if attn_impl == "naive":
q = torch.cat([q_nope, q_pe], dim=-1)
kv = self.wkv_b(self.kv_norm(kv))
kv = kv.view(bsz, seqlen, self.n_local_heads, self.qk_nope_head_dim + self.v_head_dim)
k_nope, v = torch.split(kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
k = torch.cat([k_nope, k_pe.expand(-1, -1, self.n_local_heads, -1)], dim=-1)
self.k_cache[:bsz, start_pos:end_pos] = k
self.v_cache[:bsz, start_pos:end_pos] = v
scores = torch.einsum("bshd,bthd->bsht", q, self.k_cache[:bsz, :end_pos]) * self.softmax_scale
else:
wkv_b = self.wkv_b.weight if self.wkv_b.scale is None else weight_dequant(self.wkv_b.weight, self.wkv_b.scale, block_size)
wkv_b = wkv_b.view(self.n_local_heads, -1, self.kv_lora_rank)
q_nope = torch.einsum("bshd,hdc->bshc", q_nope, wkv_b[:, :self.qk_nope_head_dim])
self.kv_cache[:bsz, start_pos:end_pos] = self.kv_norm(kv)
self.pe_cache[:bsz, start_pos:end_pos] = k_pe.squeeze(2)
scores = (torch.einsum("bshc,btc->bsht", q_nope, self.kv_cache[:bsz, :end_pos]) +
torch.einsum("bshr,btr->bsht", q_pe, self.pe_cache[:bsz, :end_pos])) * self.softmax_scale
if mask is not None:
scores += mask.unsqueeze(1)
scores = scores.softmax(dim=-1, dtype=torch.float32).type_as(x)
if attn_impl == "naive":
x = torch.einsum("bsht,bthd->bshd", scores, self.v_cache[:bsz, :end_pos])
else:
x = torch.einsum("bsht,btc->bshc", scores, self.kv_cache[:bsz, :end_pos])
x = torch.einsum("bshc,hdc->bshd", x, wkv_b[:, -self.v_head_dim:])
x = self.wo(x.flatten(2))
return x
class MLP(nn.Module):
"""
Multi-Layer Perceptron (MLP) used as a feed-forward layer.
Attributes:
w1 (nn.Module): Linear layer for input-to-hidden transformation.
w2 (nn.Module): Linear layer for hidden-to-output transformation.
w3 (nn.Module): Additional linear layer for feature transformation.
"""
def __init__(self, dim: int, inter_dim: int):
"""
Initializes the MLP layer.
Args:
dim (int): Input and output dimensionality.
inter_dim (int): Hidden layer dimensionality.
"""
super().__init__()
self.w1 = ColumnParallelLinear(dim, inter_dim)
self.w2 = RowParallelLinear(inter_dim, dim)
self.w3 = ColumnParallelLinear(dim, inter_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for the MLP layer.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after MLP computation.
"""
return self.w2(F.silu(self.w1(x)) * self.w3(x))
class Gate(nn.Module):
"""
Gating mechanism for routing inputs in a mixture-of-experts (MoE) model.
Attributes:
dim (int): Dimensionality of input features.
topk (int): Number of top experts activated for each input.
n_groups (int): Number of groups for routing.
topk_groups (int): Number of groups to route inputs to.
score_func (str): Scoring function ('softmax' or 'sigmoid').
route_scale (float): Scaling factor for routing weights.
weight (torch.nn.Parameter): Learnable weights for the gate.
bias (Optional[torch.nn.Parameter]): Optional bias term for the gate.
"""
def __init__(self, args: ModelArgs):
"""
Initializes the Gate module.
Args:
args (ModelArgs): Model arguments containing gating parameters.
"""
super().__init__()
self.dim = args.dim
self.topk = args.n_activated_experts
self.n_groups = args.n_expert_groups
self.topk_groups = args.n_limited_groups
self.score_func = args.score_func
self.route_scale = args.route_scale
self.weight = nn.Parameter(torch.empty(args.n_routed_experts, args.dim))
self.bias = nn.Parameter(torch.empty(args.n_routed_experts, dtype=torch.float32)) if self.dim == 7168 else None
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Forward pass for the gating mechanism.
Args:
x (torch.Tensor): Input tensor.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Routing weights and selected expert indices.
"""
scores = linear(x, self.weight)
if self.score_func == "softmax":
scores = scores.softmax(dim=-1, dtype=torch.float32)
else:
scores = scores.sigmoid()
original_scores = scores
if self.bias is not None:
scores = scores + self.bias
if self.n_groups > 1:
scores = scores.view(x.size(0), self.n_groups, -1)
if self.bias is None:
group_scores = scores.amax(dim=-1)
else:
group_scores = scores.topk(2, dim=-1)[0].sum(dim=-1)
indices = group_scores.topk(self.topk_groups, dim=-1)[1]
mask = scores.new_ones(x.size(0), self.n_groups, dtype=bool).scatter_(1, indices, False)
scores = scores.masked_fill_(mask.unsqueeze(-1), float("-inf")).flatten(1)
indices = torch.topk(scores, self.topk, dim=-1)[1]
weights = original_scores.gather(1, indices)
if self.score_func == "sigmoid":
weights /= weights.sum(dim=-1, keepdim=True)
weights *= self.route_scale
return weights.type_as(x), indices
class Expert(nn.Module):
"""
Expert layer for Mixture-of-Experts (MoE) models.
Attributes:
w1 (nn.Module): Linear layer for input-to-hidden transformation.
w2 (nn.Module): Linear layer for hidden-to-output transformation.
w3 (nn.Module): Additional linear layer for feature transformation.
"""
def __init__(self, dim: int, inter_dim: int):
"""
Initializes the Expert layer.
Args:
dim (int): Input and output dimensionality.
inter_dim (int): Hidden layer dimensionality.
"""
super().__init__()
self.w1 = Linear(dim, inter_dim)
self.w2 = Linear(inter_dim, dim)
self.w3 = Linear(dim, inter_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for the Expert layer.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after expert computation.
"""
return self.w2(F.silu(self.w1(x)) * self.w3(x))
class MoE(nn.Module):
"""
Mixture-of-Experts (MoE) module.
Attributes:
dim (int): Dimensionality of input features.
n_routed_experts (int): Total number of experts in the model.
n_local_experts (int): Number of experts handled locally in distributed systems.
n_activated_experts (int): Number of experts activated for each input.
gate (nn.Module): Gating mechanism to route inputs to experts.
experts (nn.ModuleList): List of expert modules.
shared_experts (nn.Module): Shared experts applied to all inputs.
"""
def __init__(self, args: ModelArgs):
"""
Initializes the MoE module.
Args:
args (ModelArgs): Model arguments containing MoE parameters.
"""
super().__init__()
self.dim = args.dim
assert args.n_routed_experts % world_size == 0, f"Number of experts must be divisible by world size (world_size={world_size})"
self.n_routed_experts = args.n_routed_experts
self.n_local_experts = args.n_routed_experts // world_size
self.n_activated_experts = args.n_activated_experts
self.experts_start_idx = rank * self.n_local_experts
self.experts_end_idx = self.experts_start_idx + self.n_local_experts
self.gate = Gate(args)
self.experts = nn.ModuleList([Expert(args.dim, args.moe_inter_dim) if self.experts_start_idx <= i < self.experts_end_idx else None
for i in range(self.n_routed_experts)])
self.shared_experts = MLP(args.dim, args.n_shared_experts * args.moe_inter_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for the MoE module.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after expert routing and computation.
"""
shape = x.size()
x = x.view(-1, self.dim)
weights, indices = self.gate(x)
y = torch.zeros_like(x)
counts = torch.bincount(indices.flatten(), minlength=self.n_routed_experts).tolist()
for i in range(self.experts_start_idx, self.experts_end_idx):
if counts[i] == 0:
continue
expert = self.experts[i]
idx, top = torch.where(indices == i)
y[idx] += expert(x[idx]) * weights[idx, top, None]
z = self.shared_experts(x)
if world_size > 1:
dist.all_reduce(y)
return (y + z).view(shape)
class Block(nn.Module):
"""
Transformer block combining attention and feed-forward layers.
Attributes:
attn (nn.Module): Attention layer (MLA).
ffn (nn.Module): Feed-forward network (MLP or MoE).
attn_norm (nn.Module): Layer normalization for attention.
ffn_norm (nn.Module): Layer normalization for feed-forward network.
"""
def __init__(self, layer_id: int, args: ModelArgs):
"""
Initializes the Transformer block.
Args:
layer_id (int): Layer index in the transformer.
args (ModelArgs): Model arguments containing block parameters.
"""
super().__init__()
self.attn = MLA(args)
self.ffn = MLP(args.dim, args.inter_dim) if layer_id < args.n_dense_layers else MoE(args)
self.attn_norm = RMSNorm(args.dim)
self.ffn_norm = RMSNorm(args.dim)
def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]) -> torch.Tensor:
"""
Forward pass for the Transformer block.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position in the sequence.
freqs_cis (torch.Tensor): Precomputed complex exponential values for rotary embeddings.
mask (Optional[torch.Tensor]): Mask tensor to exclude certain positions from attention.
Returns:
torch.Tensor: Output tensor after block computation.
"""
x = x + self.attn(self.attn_norm(x), start_pos, freqs_cis, mask)
x = x + self.ffn(self.ffn_norm(x))
return x
class Transformer(nn.Module):
"""
Transformer model with positional embeddings, multiple layers, and output projection.
Attributes:
max_seq_len (int): Maximum sequence length for the transformer.
embed (nn.Module): Embedding layer for input tokens.
layers (torch.nn.ModuleList): List of transformer blocks.
norm (nn.Module): Layer normalization applied after all blocks.
head (nn.Module): Output projection layer mapping to vocabulary size.
freqs_cis (torch.Tensor): Precomputed complex exponential values for rotary embeddings.
"""
def __init__(self, args: ModelArgs):
"""
Initializes the Transformer model.
Args:
args (ModelArgs): Model arguments containing transformer parameters.
"""
global world_size, rank
world_size = dist.get_world_size() if dist.is_initialized() else 1
rank = dist.get_rank() if dist.is_initialized() else 0
Linear.dtype = torch.float8_e4m3fn if args.dtype == "fp8" else torch.bfloat16
Linear.scale_fmt = args.scale_fmt
super().__init__()
self.max_seq_len = args.max_seq_len
self.embed = ParallelEmbedding(args.vocab_size, args.dim)
self.layers = torch.nn.ModuleList()
for layer_id in range(args.n_layers):
self.layers.append(Block(layer_id, args))
self.norm = RMSNorm(args.dim)
self.head = ColumnParallelLinear(args.dim, args.vocab_size, dtype=torch.get_default_dtype())
self.register_buffer("freqs_cis", precompute_freqs_cis(args), persistent=False)
@torch.inference_mode()
def forward(self, tokens: torch.Tensor, start_pos: int = 0):
"""
Forward pass for the Transformer model.
Args:
tokens (torch.Tensor): Input tensor of token IDs with shape (batch_size, seq_len).
start_pos (int, optional): Starting position in the sequence for rotary embeddings. Defaults to 0.
Returns:
torch.Tensor: Logits tensor of shape (batch_size, vocab_size).
"""
seqlen = tokens.size(1)
h = self.embed(tokens)
freqs_cis = self.freqs_cis[start_pos:start_pos+seqlen]
mask = None
if seqlen > 1:
mask = torch.full((seqlen, seqlen), float("-inf"), device=tokens.device).triu_(1)
for layer in self.layers:
h = layer(h, start_pos, freqs_cis, mask)
h = self.norm(h)[:, -1]
logits = self.head(h)
if world_size > 1:
all_logits = [torch.empty_like(logits) for _ in range(world_size)]
dist.all_gather(all_logits, logits)
logits = torch.cat(all_logits, dim=-1)
return logits
if __name__ == "__main__":
torch.set_default_dtype(torch.bfloat16)
torch.set_default_device("cuda")
torch.manual_seed(0)
args = ModelArgs()
| python | MIT | 9b4e9788e4a3a731f7567338ed15d3ec549ce03b | 2026-01-04T14:38:15.450976Z | true |
deepseek-ai/DeepSeek-V3 | https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/generate.py | inference/generate.py | import os
import json
from argparse import ArgumentParser
from typing import List
import torch
import torch.distributed as dist
from transformers import AutoTokenizer
from safetensors.torch import load_model
from model import Transformer, ModelArgs
def sample(logits, temperature: float = 1.0):
"""
Samples a token from the logits using temperature scaling.
Args:
logits (torch.Tensor): The logits tensor for token predictions.
temperature (float, optional): Temperature for scaling logits. Defaults to 1.0.
Returns:
torch.Tensor: The sampled token.
"""
logits = logits / max(temperature, 1e-5)
probs = torch.softmax(logits, dim=-1)
return probs.div_(torch.empty_like(probs).exponential_(1)).argmax(dim=-1)
@torch.inference_mode()
def generate(
model: Transformer,
prompt_tokens: List[List[int]],
max_new_tokens: int,
eos_id: int,
temperature: float = 1.0
) -> List[List[int]]:
"""
Generates new tokens based on the given prompt tokens using the specified model.
Args:
model (Transformer): The transformer model used for token generation.
prompt_tokens (List[List[int]]): A list of lists containing the prompt tokens for each sequence.
max_new_tokens (int): The maximum number of new tokens to generate.
eos_id (int): The end-of-sequence token ID.
temperature (float, optional): The temperature value for sampling. Defaults to 1.0.
Returns:
List[List[int]]: A list of lists containing the generated tokens for each sequence.
"""
prompt_lens = [len(t) for t in prompt_tokens]
assert max(prompt_lens) <= model.max_seq_len, f"Prompt length exceeds model maximum sequence length (max_seq_len={model.max_seq_len})"
total_len = min(model.max_seq_len, max_new_tokens + max(prompt_lens))
tokens = torch.full((len(prompt_tokens), total_len), -1, dtype=torch.long, device="cuda")
for i, t in enumerate(prompt_tokens):
tokens[i, :len(t)] = torch.tensor(t, dtype=torch.long, device="cuda")
prev_pos = 0
finished = torch.tensor([False] * len(prompt_tokens), device="cuda")
prompt_mask = tokens != -1
for cur_pos in range(min(prompt_lens), total_len):
logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
if temperature > 0:
next_token = sample(logits, temperature)
else:
next_token = logits.argmax(dim=-1)
next_token = torch.where(prompt_mask[:, cur_pos], tokens[:, cur_pos], next_token)
tokens[:, cur_pos] = next_token
finished |= torch.logical_and(~prompt_mask[:, cur_pos], next_token == eos_id)
prev_pos = cur_pos
if finished.all():
break
completion_tokens = []
for i, toks in enumerate(tokens.tolist()):
toks = toks[prompt_lens[i]:prompt_lens[i]+max_new_tokens]
if eos_id in toks:
toks = toks[:toks.index(eos_id)]
completion_tokens.append(toks)
return completion_tokens
def main(
ckpt_path: str,
config: str,
input_file: str = "",
interactive: bool = True,
max_new_tokens: int = 100,
temperature: float = 1.0,
) -> None:
"""
Main function to load the model and perform interactive or batch text generation.
Args:
ckpt_path (str): Path to the model checkpoint directory.
config (str): Path to the model configuration file.
input_file (str, optional): Path to a file containing input prompts. Defaults to "".
interactive (bool, optional): Whether to run in interactive mode. Defaults to True.
max_new_tokens (int, optional): Maximum number of new tokens to generate. Defaults to 100.
temperature (float, optional): Temperature for sampling. Defaults to 1.0.
"""
world_size = int(os.getenv("WORLD_SIZE", "1"))
rank = int(os.getenv("RANK", "0"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
if world_size > 1:
dist.init_process_group("nccl")
global print
if rank != 0:
print = lambda *_, **__: None
torch.cuda.set_device(local_rank)
torch.set_default_dtype(torch.bfloat16)
torch.set_num_threads(8)
torch.manual_seed(965)
with open(config) as f:
args = ModelArgs(**json.load(f))
print(args)
with torch.device("cuda"):
model = Transformer(args)
tokenizer = AutoTokenizer.from_pretrained(ckpt_path)
tokenizer.decode(generate(model, [tokenizer.encode("DeepSeek")], 2, -1, 1.)[0])
load_model(model, os.path.join(ckpt_path, f"model{rank}-mp{world_size}.safetensors"))
if interactive:
messages = []
while True:
if world_size == 1:
prompt = input(">>> ")
elif rank == 0:
prompt = input(">>> ")
objects = [prompt]
dist.broadcast_object_list(objects, 0)
else:
objects = [None]
dist.broadcast_object_list(objects, 0)
prompt = objects[0]
if prompt == "/exit":
break
elif prompt == "/clear":
messages.clear()
continue
messages.append({"role": "user", "content": prompt})
prompt_tokens = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
completion_tokens = generate(model, [prompt_tokens], max_new_tokens, tokenizer.eos_token_id, temperature)
completion = tokenizer.decode(completion_tokens[0], skip_special_tokens=True)
print(completion)
messages.append({"role": "assistant", "content": completion})
else:
with open(input_file) as f:
prompts = [line.strip() for line in f.readlines()]
assert len(prompts) <= args.max_batch_size, f"Number of prompts exceeds maximum batch size ({args.max_batch_size})"
prompt_tokens = [tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True) for prompt in prompts]
completion_tokens = generate(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature)
completions = tokenizer.batch_decode(completion_tokens, skip_special_tokens=True)
for prompt, completion in zip(prompts, completions):
print("Prompt:", prompt)
print("Completion:", completion)
print()
if world_size > 1:
dist.destroy_process_group()
if __name__ == "__main__":
"""
Command-line interface for distributed text generation.
Arguments:
--ckpt-path (str): Path to the model checkpoint directory.
--config (str): Path to the model configuration file.
--input-file (str, optional): File containing prompts for batch processing.
--interactive (bool, optional): Enable interactive mode for generating text.
--max-new-tokens (int, optional): Maximum number of new tokens to generate. Defaults to 200.
--temperature (float, optional): Temperature for sampling. Defaults to 0.2.
Raises:
AssertionError: If neither input-file nor interactive mode is specified.
"""
parser = ArgumentParser()
parser.add_argument("--ckpt-path", type=str, required=True)
parser.add_argument("--config", type=str, required=True)
parser.add_argument("--input-file", type=str, default="")
parser.add_argument("--interactive", action="store_true")
parser.add_argument("--max-new-tokens", type=int, default=200)
parser.add_argument("--temperature", type=float, default=0.2)
args = parser.parse_args()
assert args.input_file or args.interactive, "Either input-file or interactive mode must be specified"
main(args.ckpt_path, args.config, args.input_file, args.interactive, args.max_new_tokens, args.temperature)
| python | MIT | 9b4e9788e4a3a731f7567338ed15d3ec549ce03b | 2026-01-04T14:38:15.450976Z | false |
deepseek-ai/DeepSeek-V3 | https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/kernel.py | inference/kernel.py | from typing import Tuple, Optional
import torch
import triton
import triton.language as tl
from triton import Config
@triton.jit
def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr, scale_fmt: tl.constexpr):
"""
Quantizes the input tensor `x_ptr` and stores the result in `y_ptr` and the scaling factor in `s_ptr`.
Args:
x_ptr (triton.Pointer): Pointer to the input tensor.
y_ptr (triton.Pointer): Pointer to the output tensor where quantized values will be stored.
s_ptr (triton.Pointer): Pointer to the output tensor where scaling factors will be stored.
BLOCK_SIZE (tl.constexpr): The size of the block to be processed by each program instance.
Returns:
None
"""
pid = tl.program_id(axis=0)
offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(x_ptr + offs).to(tl.float32)
amax = tl.max(tl.abs(x)) # reduction
amax = tl.maximum(amax, 1e-4) # clamp to 1e-4
s = amax / 448.
if scale_fmt == "ue8m0":
exp = tl.math.ceil(tl.math.log2(s))
s = tl.math.exp2(exp)
y = x / s
y = y.to(y_ptr.dtype.element_ty)
tl.store(y_ptr + offs, y)
tl.store(s_ptr + pid, s)
def act_quant(x: torch.Tensor, block_size: int = 128, scale_fmt: Optional[str] = None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Quantizes the input tensor `x` using block-wise quantization.
Args:
x (torch.Tensor): The input tensor to be quantized. Must be contiguous and its last dimension size must be divisible by `block_size`.
block_size (int, optional): The size of the blocks to be used for quantization. Default is 128.
scale_fmt (Optional[str], optional): The format of the scale. Default is None.
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple containing:
- The quantized tensor with dtype `torch.float8_e4m3fn`.
- A tensor of scaling factors with dtype `torch.float32`.
"""
assert x.is_contiguous(), 'Input tensor must be contiguous'
assert x.size(-1) % block_size == 0, f'Last dimension size must be divisible by block_size (block_size={block_size})'
y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32)
grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']), )
act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size, scale_fmt=scale_fmt)
return y, s
@triton.jit
def weight_dequant_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr):
"""
Dequantizes weights using the provided scaling factors and stores the result.
Args:
x_ptr (tl.pointer): Pointer to the quantized weights.
s_ptr (tl.pointer): Pointer to the scaling factors.
y_ptr (tl.pointer): Pointer to the output buffer for dequantized weights.
M (int): Number of rows in the weight matrix.
N (int): Number of columns in the weight matrix.
BLOCK_SIZE (tl.constexpr): Size of the block for tiling.
Returns:
None
"""
pid_m = tl.program_id(axis=0)
pid_n = tl.program_id(axis=1)
n = tl.cdiv(N, BLOCK_SIZE)
offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
offs = offs_m[:, None] * N + offs_n[None, :]
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
x = tl.load(x_ptr + offs, mask=mask).to(tl.float32)
s = tl.load(s_ptr + pid_m * n + pid_n)
y = x * s
tl.store(y_ptr + offs, y, mask=mask)
def weight_dequant(x: torch.Tensor, s: torch.Tensor, block_size: int = 128) -> torch.Tensor:
"""
Dequantizes the given weight tensor using the provided scale tensor.
Args:
x (torch.Tensor): The quantized weight tensor of shape (M, N).
s (torch.Tensor): The scale tensor of shape (M//block_size, N//block_size).
block_size (int, optional): The block size to use for dequantization. Defaults to 128.
Returns:
torch.Tensor: The dequantized weight tensor of the same shape as `x`.
Raises:
AssertionError: If `x` or `s` are not contiguous or if their dimensions are not 2.
"""
assert x.is_contiguous() and s.is_contiguous(), 'Input tensors must be contiguous'
assert x.dim() == 2 and s.dim() == 2, 'Input tensors must have 2 dimensions'
M, N = x.size()
y = torch.empty_like(x, dtype=torch.get_default_dtype())
grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE']))
weight_dequant_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size)
return y
fp8_gemm_configs = [
Config({'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': 128}, num_stages=num_stages, num_warps=8)
for block_m in [16, 32, 64] for block_n in [32, 64, 128] for num_stages in [3, 4, 5, 6]
]
@triton.autotune(configs=fp8_gemm_configs, key=['N', 'K'])
@triton.jit
def fp8_gemm_kernel(a_ptr, b_ptr, c_ptr,
a_s_ptr, b_s_ptr,
M, N: tl.constexpr, K: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr):
"""
Performs a matrix multiplication operation on FP8 matrices with scaling factors.
Args:
a_ptr (tl.tensor): Pointer to the first input matrix A.
b_ptr (tl.tensor): Pointer to the second input matrix B.
c_ptr (tl.tensor): Pointer to the output matrix C.
a_s_ptr (tl.tensor): Pointer to the scaling factors for matrix A.
b_s_ptr (tl.tensor): Pointer to the scaling factors for matrix B.
M (int): Number of rows in matrix A and C.
N (tl.constexpr): Number of columns in matrix B and C.
K (tl.constexpr): Number of columns in matrix A and rows in matrix B.
BLOCK_SIZE_M (tl.constexpr): Block size for the M dimension.
BLOCK_SIZE_N (tl.constexpr): Block size for the N dimension.
BLOCK_SIZE_K (tl.constexpr): Block size for the K dimension.
Returns:
None
"""
pid_m = tl.program_id(axis=0)
pid_n = tl.program_id(axis=1)
k = tl.cdiv(K, BLOCK_SIZE_K)
offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + offs_m[:, None] * K + offs_k[None, :]
b_ptrs = b_ptr + offs_n[None, :] * K + offs_k[:, None]
a_s_ptrs = a_s_ptr + offs_m * k
b_s_ptrs = b_s_ptr + (offs_n // BLOCK_SIZE_K) * k
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(k):
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - i * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - i * BLOCK_SIZE_K, other=0.0)
a_s = tl.load(a_s_ptrs)
b_s = tl.load(b_s_ptrs)
accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
a_ptrs += BLOCK_SIZE_K
b_ptrs += BLOCK_SIZE_K
a_s_ptrs += 1
b_s_ptrs += 1
c = accumulator.to(c_ptr.dtype.element_ty)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + offs_m[:, None] * N + offs_n[None, :]
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
tl.store(c_ptrs, c, mask=mask)
def fp8_gemm(a: torch.Tensor, a_s: torch.Tensor, b: torch.Tensor, b_s: torch.Tensor):
"""
Perform a matrix multiplication using FP8 precision.
Args:
a (torch.Tensor): The first input matrix, must be contiguous.
a_s (torch.Tensor): The scaling factor for the first input matrix, must be contiguous.
b (torch.Tensor): The second input matrix, must be contiguous.
b_s (torch.Tensor): The scaling factor for the second input matrix, must be contiguous.
Returns:
torch.Tensor: The result of the matrix multiplication.
"""
assert a.is_contiguous() and b.is_contiguous(), 'Input tensors must be contiguous'
assert a_s.is_contiguous() and b_s.is_contiguous(), 'Scaling factor tensors must be contiguous'
K = a.size(-1)
M = a.numel() // K
N = b.size(0)
c = a.new_empty(*a.size()[:-1], N, dtype=torch.get_default_dtype())
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']), triton.cdiv(N, META['BLOCK_SIZE_N']))
fp8_gemm_kernel[grid](a, b, c, a_s, b_s, M, N, K)
return c
| python | MIT | 9b4e9788e4a3a731f7567338ed15d3ec549ce03b | 2026-01-04T14:38:15.450976Z | false |
deepseek-ai/DeepSeek-V3 | https://github.com/deepseek-ai/DeepSeek-V3/blob/9b4e9788e4a3a731f7567338ed15d3ec549ce03b/inference/fp8_cast_bf16.py | inference/fp8_cast_bf16.py | import os
import json
from argparse import ArgumentParser
from glob import glob
from tqdm import tqdm
import torch
from safetensors.torch import load_file, save_file
from kernel import weight_dequant
def main(fp8_path, bf16_path):
"""
Converts FP8 weights to BF16 and saves the converted weights.
This function reads FP8 weights from the specified directory, converts them to BF16,
and saves the converted weights to another specified directory. It also updates the
model index file to reflect the changes.
Args:
fp8_path (str): The path to the directory containing the FP8 weights and model index file.
bf16_path (str): The path to the directory where the converted BF16 weights will be saved.
Raises:
KeyError: If a required scale_inv tensor is missing for a weight.
Notes:
- The function assumes that the FP8 weights are stored in safetensor files.
- The function caches loaded safetensor files to optimize memory usage.
- The function updates the model index file to remove references to scale_inv tensors.
"""
torch.set_default_dtype(torch.bfloat16)
os.makedirs(bf16_path, exist_ok=True)
model_index_file = os.path.join(fp8_path, "model.safetensors.index.json")
with open(model_index_file, "r") as f:
model_index = json.load(f)
weight_map = model_index["weight_map"]
# Cache for loaded safetensor files
loaded_files = {}
fp8_weight_names = []
# Helper function to get tensor from the correct file
def get_tensor(tensor_name):
"""
Retrieves a tensor from the cached safetensor files or loads it from disk if not cached.
Args:
tensor_name (str): The name of the tensor to retrieve.
Returns:
torch.Tensor: The retrieved tensor.
Raises:
KeyError: If the tensor does not exist in the safetensor file.
"""
file_name = weight_map[tensor_name]
if file_name not in loaded_files:
file_path = os.path.join(fp8_path, file_name)
loaded_files[file_name] = load_file(file_path, device="cuda")
return loaded_files[file_name][tensor_name]
safetensor_files = list(glob(os.path.join(fp8_path, "*.safetensors")))
safetensor_files.sort()
for safetensor_file in tqdm(safetensor_files):
file_name = os.path.basename(safetensor_file)
current_state_dict = load_file(safetensor_file, device="cuda")
loaded_files[file_name] = current_state_dict
new_state_dict = {}
for weight_name, weight in current_state_dict.items():
if weight_name.endswith("_scale_inv"):
continue
elif weight.element_size() == 1: # FP8 weight
scale_inv_name = f"{weight_name}_scale_inv"
try:
# Get scale_inv from the correct file
scale_inv = get_tensor(scale_inv_name)
fp8_weight_names.append(weight_name)
new_state_dict[weight_name] = weight_dequant(weight, scale_inv)
except KeyError:
print(f"Warning: Missing scale_inv tensor for {weight_name}, skipping conversion")
new_state_dict[weight_name] = weight
else:
new_state_dict[weight_name] = weight
new_safetensor_file = os.path.join(bf16_path, file_name)
save_file(new_state_dict, new_safetensor_file)
# Memory management: keep only the 2 most recently used files
if len(loaded_files) > 2:
oldest_file = next(iter(loaded_files))
del loaded_files[oldest_file]
torch.cuda.empty_cache()
# Update model index
new_model_index_file = os.path.join(bf16_path, "model.safetensors.index.json")
for weight_name in fp8_weight_names:
scale_inv_name = f"{weight_name}_scale_inv"
if scale_inv_name in weight_map:
weight_map.pop(scale_inv_name)
with open(new_model_index_file, "w") as f:
json.dump({"metadata": {}, "weight_map": weight_map}, f, indent=2)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--input-fp8-hf-path", type=str, required=True)
parser.add_argument("--output-bf16-hf-path", type=str, required=True)
args = parser.parse_args()
main(args.input_fp8_hf_path, args.output_bf16_hf_path)
| python | MIT | 9b4e9788e4a3a731f7567338ed15d3ec549ce03b | 2026-01-04T14:38:15.450976Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-mcp/src/markitdown_mcp/__main__.py | packages/markitdown-mcp/src/markitdown_mcp/__main__.py | import contextlib
import sys
import os
from collections.abc import AsyncIterator
from mcp.server.fastmcp import FastMCP
from starlette.applications import Starlette
from mcp.server.sse import SseServerTransport
from starlette.requests import Request
from starlette.routing import Mount, Route
from starlette.types import Receive, Scope, Send
from mcp.server import Server
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
from markitdown import MarkItDown
import uvicorn
# Initialize FastMCP server for MarkItDown (SSE)
mcp = FastMCP("markitdown")
@mcp.tool()
async def convert_to_markdown(uri: str) -> str:
"""Convert a resource described by an http:, https:, file: or data: URI to markdown"""
return MarkItDown(enable_plugins=check_plugins_enabled()).convert_uri(uri).markdown
def check_plugins_enabled() -> bool:
return os.getenv("MARKITDOWN_ENABLE_PLUGINS", "false").strip().lower() in (
"true",
"1",
"yes",
)
def create_starlette_app(mcp_server: Server, *, debug: bool = False) -> Starlette:
sse = SseServerTransport("/messages/")
session_manager = StreamableHTTPSessionManager(
app=mcp_server,
event_store=None,
json_response=True,
stateless=True,
)
async def handle_sse(request: Request) -> None:
async with sse.connect_sse(
request.scope,
request.receive,
request._send,
) as (read_stream, write_stream):
await mcp_server.run(
read_stream,
write_stream,
mcp_server.create_initialization_options(),
)
async def handle_streamable_http(
scope: Scope, receive: Receive, send: Send
) -> None:
await session_manager.handle_request(scope, receive, send)
@contextlib.asynccontextmanager
async def lifespan(app: Starlette) -> AsyncIterator[None]:
"""Context manager for session manager."""
async with session_manager.run():
print("Application started with StreamableHTTP session manager!")
try:
yield
finally:
print("Application shutting down...")
return Starlette(
debug=debug,
routes=[
Route("/sse", endpoint=handle_sse),
Mount("/mcp", app=handle_streamable_http),
Mount("/messages/", app=sse.handle_post_message),
],
lifespan=lifespan,
)
# Main entry point
def main():
import argparse
mcp_server = mcp._mcp_server
parser = argparse.ArgumentParser(description="Run a MarkItDown MCP server")
parser.add_argument(
"--http",
action="store_true",
help="Run the server with Streamable HTTP and SSE transport rather than STDIO (default: False)",
)
parser.add_argument(
"--sse",
action="store_true",
help="(Deprecated) An alias for --http (default: False)",
)
parser.add_argument(
"--host", default=None, help="Host to bind to (default: 127.0.0.1)"
)
parser.add_argument(
"--port", type=int, default=None, help="Port to listen on (default: 3001)"
)
args = parser.parse_args()
use_http = args.http or args.sse
if not use_http and (args.host or args.port):
parser.error(
"Host and port arguments are only valid when using streamable HTTP or SSE transport (see: --http)."
)
sys.exit(1)
if use_http:
starlette_app = create_starlette_app(mcp_server, debug=True)
uvicorn.run(
starlette_app,
host=args.host if args.host else "127.0.0.1",
port=args.port if args.port else 3001,
)
else:
mcp.run()
if __name__ == "__main__":
main()
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-mcp/src/markitdown_mcp/__about__.py | packages/markitdown-mcp/src/markitdown_mcp/__about__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
__version__ = "0.0.1a4"
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-mcp/src/markitdown_mcp/__init__.py | packages/markitdown-mcp/src/markitdown_mcp/__init__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
from .__about__ import __version__
__all__ = [
"__version__",
]
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-mcp/tests/__init__.py | packages/markitdown-mcp/tests/__init__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_exceptions.py | packages/markitdown/src/markitdown/_exceptions.py | from typing import Optional, List, Any
MISSING_DEPENDENCY_MESSAGE = """{converter} recognized the input as a potential {extension} file, but the dependencies needed to read {extension} files have not been installed. To resolve this error, include the optional dependency [{feature}] or [all] when installing MarkItDown. For example:
* pip install markitdown[{feature}]
* pip install markitdown[all]
* pip install markitdown[{feature}, ...]
* etc."""
class MarkItDownException(Exception):
"""
Base exception class for MarkItDown.
"""
pass
class MissingDependencyException(MarkItDownException):
"""
Converters shipped with MarkItDown may depend on optional
dependencies. This exception is thrown when a converter's
convert() method is called, but the required dependency is not
installed. This is not necessarily a fatal error, as the converter
will simply be skipped (an error will bubble up only if no other
suitable converter is found).
Error messages should clearly indicate which dependency is missing.
"""
pass
class UnsupportedFormatException(MarkItDownException):
"""
Thrown when no suitable converter was found for the given file.
"""
pass
class FailedConversionAttempt(object):
"""
Represents an a single attempt to convert a file.
"""
def __init__(self, converter: Any, exc_info: Optional[tuple] = None):
self.converter = converter
self.exc_info = exc_info
class FileConversionException(MarkItDownException):
"""
Thrown when a suitable converter was found, but the conversion
process fails for any reason.
"""
def __init__(
self,
message: Optional[str] = None,
attempts: Optional[List[FailedConversionAttempt]] = None,
):
self.attempts = attempts
if message is None:
if attempts is None:
message = "File conversion failed."
else:
message = f"File conversion failed after {len(attempts)} attempts:\n"
for attempt in attempts:
if attempt.exc_info is None:
message += f" - {type(attempt.converter).__name__} provided no execution info."
else:
message += f" - {type(attempt.converter).__name__} threw {attempt.exc_info[0].__name__} with message: {attempt.exc_info[1]}\n"
super().__init__(message)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_uri_utils.py | packages/markitdown/src/markitdown/_uri_utils.py | import base64
import os
from typing import Tuple, Dict
from urllib.request import url2pathname
from urllib.parse import urlparse, unquote_to_bytes
def file_uri_to_path(file_uri: str) -> Tuple[str | None, str]:
"""Convert a file URI to a local file path"""
parsed = urlparse(file_uri)
if parsed.scheme != "file":
raise ValueError(f"Not a file URL: {file_uri}")
netloc = parsed.netloc if parsed.netloc else None
path = os.path.abspath(url2pathname(parsed.path))
return netloc, path
def parse_data_uri(uri: str) -> Tuple[str | None, Dict[str, str], bytes]:
if not uri.startswith("data:"):
raise ValueError("Not a data URI")
header, _, data = uri.partition(",")
if not _:
raise ValueError("Malformed data URI, missing ',' separator")
meta = header[5:] # Strip 'data:'
parts = meta.split(";")
is_base64 = False
# Ends with base64?
if parts[-1] == "base64":
parts.pop()
is_base64 = True
mime_type = None # Normally this would default to text/plain but we won't assume
if len(parts) and len(parts[0]) > 0:
# First part is the mime type
mime_type = parts.pop(0)
attributes: Dict[str, str] = {}
for part in parts:
# Handle key=value pairs in the middle
if "=" in part:
key, value = part.split("=", 1)
attributes[key] = value
elif len(part) > 0:
attributes[part] = ""
content = base64.b64decode(data) if is_base64 else unquote_to_bytes(data)
return mime_type, attributes, content
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/__main__.py | packages/markitdown/src/markitdown/__main__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
import argparse
import sys
import codecs
from textwrap import dedent
from importlib.metadata import entry_points
from .__about__ import __version__
from ._markitdown import MarkItDown, StreamInfo, DocumentConverterResult
def main():
parser = argparse.ArgumentParser(
description="Convert various file formats to markdown.",
prog="markitdown",
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=dedent(
"""
SYNTAX:
markitdown <OPTIONAL: FILENAME>
If FILENAME is empty, markitdown reads from stdin.
EXAMPLE:
markitdown example.pdf
OR
cat example.pdf | markitdown
OR
markitdown < example.pdf
OR to save to a file use
markitdown example.pdf -o example.md
OR
markitdown example.pdf > example.md
"""
).strip(),
)
parser.add_argument(
"-v",
"--version",
action="version",
version=f"%(prog)s {__version__}",
help="show the version number and exit",
)
parser.add_argument(
"-o",
"--output",
help="Output file name. If not provided, output is written to stdout.",
)
parser.add_argument(
"-x",
"--extension",
help="Provide a hint about the file extension (e.g., when reading from stdin).",
)
parser.add_argument(
"-m",
"--mime-type",
help="Provide a hint about the file's MIME type.",
)
parser.add_argument(
"-c",
"--charset",
help="Provide a hint about the file's charset (e.g, UTF-8).",
)
parser.add_argument(
"-d",
"--use-docintel",
action="store_true",
help="Use Document Intelligence to extract text instead of offline conversion. Requires a valid Document Intelligence Endpoint.",
)
parser.add_argument(
"-e",
"--endpoint",
type=str,
help="Document Intelligence Endpoint. Required if using Document Intelligence.",
)
parser.add_argument(
"-p",
"--use-plugins",
action="store_true",
help="Use 3rd-party plugins to convert files. Use --list-plugins to see installed plugins.",
)
parser.add_argument(
"--list-plugins",
action="store_true",
help="List installed 3rd-party plugins. Plugins are loaded when using the -p or --use-plugin option.",
)
parser.add_argument(
"--keep-data-uris",
action="store_true",
help="Keep data URIs (like base64-encoded images) in the output. By default, data URIs are truncated.",
)
parser.add_argument("filename", nargs="?")
args = parser.parse_args()
# Parse the extension hint
extension_hint = args.extension
if extension_hint is not None:
extension_hint = extension_hint.strip().lower()
if len(extension_hint) > 0:
if not extension_hint.startswith("."):
extension_hint = "." + extension_hint
else:
extension_hint = None
# Parse the mime type
mime_type_hint = args.mime_type
if mime_type_hint is not None:
mime_type_hint = mime_type_hint.strip()
if len(mime_type_hint) > 0:
if mime_type_hint.count("/") != 1:
_exit_with_error(f"Invalid MIME type: {mime_type_hint}")
else:
mime_type_hint = None
# Parse the charset
charset_hint = args.charset
if charset_hint is not None:
charset_hint = charset_hint.strip()
if len(charset_hint) > 0:
try:
charset_hint = codecs.lookup(charset_hint).name
except LookupError:
_exit_with_error(f"Invalid charset: {charset_hint}")
else:
charset_hint = None
stream_info = None
if (
extension_hint is not None
or mime_type_hint is not None
or charset_hint is not None
):
stream_info = StreamInfo(
extension=extension_hint, mimetype=mime_type_hint, charset=charset_hint
)
if args.list_plugins:
# List installed plugins, then exit
print("Installed MarkItDown 3rd-party Plugins:\n")
plugin_entry_points = list(entry_points(group="markitdown.plugin"))
if len(plugin_entry_points) == 0:
print(" * No 3rd-party plugins installed.")
print(
"\nFind plugins by searching for the hashtag #markitdown-plugin on GitHub.\n"
)
else:
for entry_point in plugin_entry_points:
print(f" * {entry_point.name:<16}\t(package: {entry_point.value})")
print(
"\nUse the -p (or --use-plugins) option to enable 3rd-party plugins.\n"
)
sys.exit(0)
if args.use_docintel:
if args.endpoint is None:
_exit_with_error(
"Document Intelligence Endpoint is required when using Document Intelligence."
)
elif args.filename is None:
_exit_with_error("Filename is required when using Document Intelligence.")
markitdown = MarkItDown(
enable_plugins=args.use_plugins, docintel_endpoint=args.endpoint
)
else:
markitdown = MarkItDown(enable_plugins=args.use_plugins)
if args.filename is None:
result = markitdown.convert_stream(
sys.stdin.buffer,
stream_info=stream_info,
keep_data_uris=args.keep_data_uris,
)
else:
result = markitdown.convert(
args.filename, stream_info=stream_info, keep_data_uris=args.keep_data_uris
)
_handle_output(args, result)
def _handle_output(args, result: DocumentConverterResult):
"""Handle output to stdout or file"""
if args.output:
with open(args.output, "w", encoding="utf-8") as f:
f.write(result.markdown)
else:
# Handle stdout encoding errors more gracefully
print(
result.markdown.encode(sys.stdout.encoding, errors="replace").decode(
sys.stdout.encoding
)
)
def _exit_with_error(message: str):
print(message)
sys.exit(1)
if __name__ == "__main__":
main()
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_stream_info.py | packages/markitdown/src/markitdown/_stream_info.py | from dataclasses import dataclass, asdict
from typing import Optional
@dataclass(kw_only=True, frozen=True)
class StreamInfo:
"""The StreamInfo class is used to store information about a file stream.
All fields can be None, and will depend on how the stream was opened.
"""
mimetype: Optional[str] = None
extension: Optional[str] = None
charset: Optional[str] = None
filename: Optional[
str
] = None # From local path, url, or Content-Disposition header
local_path: Optional[str] = None # If read from disk
url: Optional[str] = None # If read from url
def copy_and_update(self, *args, **kwargs):
"""Copy the StreamInfo object and update it with the given StreamInfo
instance and/or other keyword arguments."""
new_info = asdict(self)
for si in args:
assert isinstance(si, StreamInfo)
new_info.update({k: v for k, v in asdict(si).items() if v is not None})
if len(kwargs) > 0:
new_info.update(kwargs)
return StreamInfo(**new_info)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/__about__.py | packages/markitdown/src/markitdown/__about__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
__version__ = "0.1.4"
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_markitdown.py | packages/markitdown/src/markitdown/_markitdown.py | import mimetypes
import os
import re
import sys
import shutil
import traceback
import io
from dataclasses import dataclass
from importlib.metadata import entry_points
from typing import Any, List, Dict, Optional, Union, BinaryIO
from pathlib import Path
from urllib.parse import urlparse
from warnings import warn
import requests
import magika
import charset_normalizer
import codecs
from ._stream_info import StreamInfo
from ._uri_utils import parse_data_uri, file_uri_to_path
from .converters import (
PlainTextConverter,
HtmlConverter,
RssConverter,
WikipediaConverter,
YouTubeConverter,
IpynbConverter,
BingSerpConverter,
PdfConverter,
DocxConverter,
XlsxConverter,
XlsConverter,
PptxConverter,
ImageConverter,
AudioConverter,
OutlookMsgConverter,
ZipConverter,
EpubConverter,
DocumentIntelligenceConverter,
CsvConverter,
)
from ._base_converter import DocumentConverter, DocumentConverterResult
from ._exceptions import (
FileConversionException,
UnsupportedFormatException,
FailedConversionAttempt,
)
# Lower priority values are tried first.
PRIORITY_SPECIFIC_FILE_FORMAT = (
0.0 # e.g., .docx, .pdf, .xlsx, Or specific pages, e.g., wikipedia
)
PRIORITY_GENERIC_FILE_FORMAT = (
10.0 # Near catch-all converters for mimetypes like text/*, etc.
)
_plugins: Union[None, List[Any]] = None # If None, plugins have not been loaded yet.
def _load_plugins() -> Union[None, List[Any]]:
"""Lazy load plugins, exiting early if already loaded."""
global _plugins
# Skip if we've already loaded plugins
if _plugins is not None:
return _plugins
# Load plugins
_plugins = []
for entry_point in entry_points(group="markitdown.plugin"):
try:
_plugins.append(entry_point.load())
except Exception:
tb = traceback.format_exc()
warn(f"Plugin '{entry_point.name}' failed to load ... skipping:\n{tb}")
return _plugins
@dataclass(kw_only=True, frozen=True)
class ConverterRegistration:
"""A registration of a converter with its priority and other metadata."""
converter: DocumentConverter
priority: float
class MarkItDown:
"""(In preview) An extremely simple text-based document reader, suitable for LLM use.
This reader will convert common file-types or webpages to Markdown."""
def __init__(
self,
*,
enable_builtins: Union[None, bool] = None,
enable_plugins: Union[None, bool] = None,
**kwargs,
):
self._builtins_enabled = False
self._plugins_enabled = False
requests_session = kwargs.get("requests_session")
if requests_session is None:
self._requests_session = requests.Session()
else:
self._requests_session = requests_session
self._magika = magika.Magika()
# TODO - remove these (see enable_builtins)
self._llm_client: Any = None
self._llm_model: Union[str | None] = None
self._llm_prompt: Union[str | None] = None
self._exiftool_path: Union[str | None] = None
self._style_map: Union[str | None] = None
# Register the converters
self._converters: List[ConverterRegistration] = []
if (
enable_builtins is None or enable_builtins
): # Default to True when not specified
self.enable_builtins(**kwargs)
if enable_plugins:
self.enable_plugins(**kwargs)
def enable_builtins(self, **kwargs) -> None:
"""
Enable and register built-in converters.
Built-in converters are enabled by default.
This method should only be called once, if built-ins were initially disabled.
"""
if not self._builtins_enabled:
# TODO: Move these into converter constructors
self._llm_client = kwargs.get("llm_client")
self._llm_model = kwargs.get("llm_model")
self._llm_prompt = kwargs.get("llm_prompt")
self._exiftool_path = kwargs.get("exiftool_path")
self._style_map = kwargs.get("style_map")
if self._exiftool_path is None:
self._exiftool_path = os.getenv("EXIFTOOL_PATH")
# Still none? Check well-known paths
if self._exiftool_path is None:
candidate = shutil.which("exiftool")
if candidate:
candidate = os.path.abspath(candidate)
if any(
d == os.path.dirname(candidate)
for d in [
"/usr/bin",
"/usr/local/bin",
"/opt",
"/opt/bin",
"/opt/local/bin",
"/opt/homebrew/bin",
"C:\\Windows\\System32",
"C:\\Program Files",
"C:\\Program Files (x86)",
]
):
self._exiftool_path = candidate
# Register converters for successful browsing operations
# Later registrations are tried first / take higher priority than earlier registrations
# To this end, the most specific converters should appear below the most generic converters
self.register_converter(
PlainTextConverter(), priority=PRIORITY_GENERIC_FILE_FORMAT
)
self.register_converter(
ZipConverter(markitdown=self), priority=PRIORITY_GENERIC_FILE_FORMAT
)
self.register_converter(
HtmlConverter(), priority=PRIORITY_GENERIC_FILE_FORMAT
)
self.register_converter(RssConverter())
self.register_converter(WikipediaConverter())
self.register_converter(YouTubeConverter())
self.register_converter(BingSerpConverter())
self.register_converter(DocxConverter())
self.register_converter(XlsxConverter())
self.register_converter(XlsConverter())
self.register_converter(PptxConverter())
self.register_converter(AudioConverter())
self.register_converter(ImageConverter())
self.register_converter(IpynbConverter())
self.register_converter(PdfConverter())
self.register_converter(OutlookMsgConverter())
self.register_converter(EpubConverter())
self.register_converter(CsvConverter())
# Register Document Intelligence converter at the top of the stack if endpoint is provided
docintel_endpoint = kwargs.get("docintel_endpoint")
if docintel_endpoint is not None:
docintel_args: Dict[str, Any] = {}
docintel_args["endpoint"] = docintel_endpoint
docintel_credential = kwargs.get("docintel_credential")
if docintel_credential is not None:
docintel_args["credential"] = docintel_credential
docintel_types = kwargs.get("docintel_file_types")
if docintel_types is not None:
docintel_args["file_types"] = docintel_types
docintel_version = kwargs.get("docintel_api_version")
if docintel_version is not None:
docintel_args["api_version"] = docintel_version
self.register_converter(
DocumentIntelligenceConverter(**docintel_args),
)
self._builtins_enabled = True
else:
warn("Built-in converters are already enabled.", RuntimeWarning)
def enable_plugins(self, **kwargs) -> None:
"""
Enable and register converters provided by plugins.
Plugins are disabled by default.
This method should only be called once, if plugins were initially disabled.
"""
if not self._plugins_enabled:
# Load plugins
plugins = _load_plugins()
assert plugins is not None
for plugin in plugins:
try:
plugin.register_converters(self, **kwargs)
except Exception:
tb = traceback.format_exc()
warn(f"Plugin '{plugin}' failed to register converters:\n{tb}")
self._plugins_enabled = True
else:
warn("Plugins converters are already enabled.", RuntimeWarning)
def convert(
self,
source: Union[str, requests.Response, Path, BinaryIO],
*,
stream_info: Optional[StreamInfo] = None,
**kwargs: Any,
) -> DocumentConverterResult: # TODO: deal with kwargs
"""
Args:
- source: can be a path (str or Path), url, or a requests.response object
- stream_info: optional stream info to use for the conversion. If None, infer from source
- kwargs: additional arguments to pass to the converter
"""
# Local path or url
if isinstance(source, str):
if (
source.startswith("http:")
or source.startswith("https:")
or source.startswith("file:")
or source.startswith("data:")
):
# Rename the url argument to mock_url
# (Deprecated -- use stream_info)
_kwargs = {k: v for k, v in kwargs.items()}
if "url" in _kwargs:
_kwargs["mock_url"] = _kwargs["url"]
del _kwargs["url"]
return self.convert_uri(source, stream_info=stream_info, **_kwargs)
else:
return self.convert_local(source, stream_info=stream_info, **kwargs)
# Path object
elif isinstance(source, Path):
return self.convert_local(source, stream_info=stream_info, **kwargs)
# Request response
elif isinstance(source, requests.Response):
return self.convert_response(source, stream_info=stream_info, **kwargs)
# Binary stream
elif (
hasattr(source, "read")
and callable(source.read)
and not isinstance(source, io.TextIOBase)
):
return self.convert_stream(source, stream_info=stream_info, **kwargs)
else:
raise TypeError(
f"Invalid source type: {type(source)}. Expected str, requests.Response, BinaryIO."
)
def convert_local(
self,
path: Union[str, Path],
*,
stream_info: Optional[StreamInfo] = None,
file_extension: Optional[str] = None, # Deprecated -- use stream_info
url: Optional[str] = None, # Deprecated -- use stream_info
**kwargs: Any,
) -> DocumentConverterResult:
if isinstance(path, Path):
path = str(path)
# Build a base StreamInfo object from which to start guesses
base_guess = StreamInfo(
local_path=path,
extension=os.path.splitext(path)[1],
filename=os.path.basename(path),
)
# Extend the base_guess with any additional info from the arguments
if stream_info is not None:
base_guess = base_guess.copy_and_update(stream_info)
if file_extension is not None:
# Deprecated -- use stream_info
base_guess = base_guess.copy_and_update(extension=file_extension)
if url is not None:
# Deprecated -- use stream_info
base_guess = base_guess.copy_and_update(url=url)
with open(path, "rb") as fh:
guesses = self._get_stream_info_guesses(
file_stream=fh, base_guess=base_guess
)
return self._convert(file_stream=fh, stream_info_guesses=guesses, **kwargs)
def convert_stream(
self,
stream: BinaryIO,
*,
stream_info: Optional[StreamInfo] = None,
file_extension: Optional[str] = None, # Deprecated -- use stream_info
url: Optional[str] = None, # Deprecated -- use stream_info
**kwargs: Any,
) -> DocumentConverterResult:
guesses: List[StreamInfo] = []
# Do we have anything on which to base a guess?
base_guess = None
if stream_info is not None or file_extension is not None or url is not None:
# Start with a non-Null base guess
if stream_info is None:
base_guess = StreamInfo()
else:
base_guess = stream_info
if file_extension is not None:
# Deprecated -- use stream_info
assert base_guess is not None # for mypy
base_guess = base_guess.copy_and_update(extension=file_extension)
if url is not None:
# Deprecated -- use stream_info
assert base_guess is not None # for mypy
base_guess = base_guess.copy_and_update(url=url)
# Check if we have a seekable stream. If not, load the entire stream into memory.
if not stream.seekable():
buffer = io.BytesIO()
while True:
chunk = stream.read(4096)
if not chunk:
break
buffer.write(chunk)
buffer.seek(0)
stream = buffer
# Add guesses based on stream content
guesses = self._get_stream_info_guesses(
file_stream=stream, base_guess=base_guess or StreamInfo()
)
return self._convert(file_stream=stream, stream_info_guesses=guesses, **kwargs)
def convert_url(
self,
url: str,
*,
stream_info: Optional[StreamInfo] = None,
file_extension: Optional[str] = None,
mock_url: Optional[str] = None,
**kwargs: Any,
) -> DocumentConverterResult:
"""Alias for convert_uri()"""
# convert_url will likely be deprecated in the future in favor of convert_uri
return self.convert_uri(
url,
stream_info=stream_info,
file_extension=file_extension,
mock_url=mock_url,
**kwargs,
)
def convert_uri(
self,
uri: str,
*,
stream_info: Optional[StreamInfo] = None,
file_extension: Optional[str] = None, # Deprecated -- use stream_info
mock_url: Optional[
str
] = None, # Mock the request as if it came from a different URL
**kwargs: Any,
) -> DocumentConverterResult:
uri = uri.strip()
# File URIs
if uri.startswith("file:"):
netloc, path = file_uri_to_path(uri)
if netloc and netloc != "localhost":
raise ValueError(
f"Unsupported file URI: {uri}. Netloc must be empty or localhost."
)
return self.convert_local(
path,
stream_info=stream_info,
file_extension=file_extension,
url=mock_url,
**kwargs,
)
# Data URIs
elif uri.startswith("data:"):
mimetype, attributes, data = parse_data_uri(uri)
base_guess = StreamInfo(
mimetype=mimetype,
charset=attributes.get("charset"),
)
if stream_info is not None:
base_guess = base_guess.copy_and_update(stream_info)
return self.convert_stream(
io.BytesIO(data),
stream_info=base_guess,
file_extension=file_extension,
url=mock_url,
**kwargs,
)
# HTTP/HTTPS URIs
elif uri.startswith("http:") or uri.startswith("https:"):
response = self._requests_session.get(uri, stream=True)
response.raise_for_status()
return self.convert_response(
response,
stream_info=stream_info,
file_extension=file_extension,
url=mock_url,
**kwargs,
)
else:
raise ValueError(
f"Unsupported URI scheme: {uri.split(':')[0]}. Supported schemes are: file:, data:, http:, https:"
)
def convert_response(
self,
response: requests.Response,
*,
stream_info: Optional[StreamInfo] = None,
file_extension: Optional[str] = None, # Deprecated -- use stream_info
url: Optional[str] = None, # Deprecated -- use stream_info
**kwargs: Any,
) -> DocumentConverterResult:
# If there is a content-type header, get the mimetype and charset (if present)
mimetype: Optional[str] = None
charset: Optional[str] = None
if "content-type" in response.headers:
parts = response.headers["content-type"].split(";")
mimetype = parts.pop(0).strip()
for part in parts:
if part.strip().startswith("charset="):
_charset = part.split("=")[1].strip()
if len(_charset) > 0:
charset = _charset
# If there is a content-disposition header, get the filename and possibly the extension
filename: Optional[str] = None
extension: Optional[str] = None
if "content-disposition" in response.headers:
m = re.search(r"filename=([^;]+)", response.headers["content-disposition"])
if m:
filename = m.group(1).strip("\"'")
_, _extension = os.path.splitext(filename)
if len(_extension) > 0:
extension = _extension
# If there is still no filename, try to read it from the url
if filename is None:
parsed_url = urlparse(response.url)
_, _extension = os.path.splitext(parsed_url.path)
if len(_extension) > 0: # Looks like this might be a file!
filename = os.path.basename(parsed_url.path)
extension = _extension
# Create an initial guess from all this information
base_guess = StreamInfo(
mimetype=mimetype,
charset=charset,
filename=filename,
extension=extension,
url=response.url,
)
# Update with any additional info from the arguments
if stream_info is not None:
base_guess = base_guess.copy_and_update(stream_info)
if file_extension is not None:
# Deprecated -- use stream_info
base_guess = base_guess.copy_and_update(extension=file_extension)
if url is not None:
# Deprecated -- use stream_info
base_guess = base_guess.copy_and_update(url=url)
# Read into BytesIO
buffer = io.BytesIO()
for chunk in response.iter_content(chunk_size=512):
buffer.write(chunk)
buffer.seek(0)
# Convert
guesses = self._get_stream_info_guesses(
file_stream=buffer, base_guess=base_guess
)
return self._convert(file_stream=buffer, stream_info_guesses=guesses, **kwargs)
def _convert(
self, *, file_stream: BinaryIO, stream_info_guesses: List[StreamInfo], **kwargs
) -> DocumentConverterResult:
res: Union[None, DocumentConverterResult] = None
# Keep track of which converters throw exceptions
failed_attempts: List[FailedConversionAttempt] = []
# Create a copy of the page_converters list, sorted by priority.
# We do this with each call to _convert because the priority of converters may change between calls.
# The sort is guaranteed to be stable, so converters with the same priority will remain in the same order.
sorted_registrations = sorted(self._converters, key=lambda x: x.priority)
# Remember the initial stream position so that we can return to it
cur_pos = file_stream.tell()
for stream_info in stream_info_guesses + [StreamInfo()]:
for converter_registration in sorted_registrations:
converter = converter_registration.converter
# Sanity check -- make sure the cur_pos is still the same
assert (
cur_pos == file_stream.tell()
), "File stream position should NOT change between guess iterations"
_kwargs = {k: v for k, v in kwargs.items()}
# Copy any additional global options
if "llm_client" not in _kwargs and self._llm_client is not None:
_kwargs["llm_client"] = self._llm_client
if "llm_model" not in _kwargs and self._llm_model is not None:
_kwargs["llm_model"] = self._llm_model
if "llm_prompt" not in _kwargs and self._llm_prompt is not None:
_kwargs["llm_prompt"] = self._llm_prompt
if "style_map" not in _kwargs and self._style_map is not None:
_kwargs["style_map"] = self._style_map
if "exiftool_path" not in _kwargs and self._exiftool_path is not None:
_kwargs["exiftool_path"] = self._exiftool_path
# Add the list of converters for nested processing
_kwargs["_parent_converters"] = self._converters
# Add legaxy kwargs
if stream_info is not None:
if stream_info.extension is not None:
_kwargs["file_extension"] = stream_info.extension
if stream_info.url is not None:
_kwargs["url"] = stream_info.url
# Check if the converter will accept the file, and if so, try to convert it
_accepts = False
try:
_accepts = converter.accepts(file_stream, stream_info, **_kwargs)
except NotImplementedError:
pass
# accept() should not have changed the file stream position
assert (
cur_pos == file_stream.tell()
), f"{type(converter).__name__}.accept() should NOT change the file_stream position"
# Attempt the conversion
if _accepts:
try:
res = converter.convert(file_stream, stream_info, **_kwargs)
except Exception:
failed_attempts.append(
FailedConversionAttempt(
converter=converter, exc_info=sys.exc_info()
)
)
finally:
file_stream.seek(cur_pos)
if res is not None:
# Normalize the content
res.text_content = "\n".join(
[line.rstrip() for line in re.split(r"\r?\n", res.text_content)]
)
res.text_content = re.sub(r"\n{3,}", "\n\n", res.text_content)
return res
# If we got this far without success, report any exceptions
if len(failed_attempts) > 0:
raise FileConversionException(attempts=failed_attempts)
# Nothing can handle it!
raise UnsupportedFormatException(
"Could not convert stream to Markdown. No converter attempted a conversion, suggesting that the filetype is simply not supported."
)
def register_page_converter(self, converter: DocumentConverter) -> None:
"""DEPRECATED: User register_converter instead."""
warn(
"register_page_converter is deprecated. Use register_converter instead.",
DeprecationWarning,
)
self.register_converter(converter)
def register_converter(
self,
converter: DocumentConverter,
*,
priority: float = PRIORITY_SPECIFIC_FILE_FORMAT,
) -> None:
"""
Register a DocumentConverter with a given priority.
Priorities work as follows: By default, most converters get priority
DocumentConverter.PRIORITY_SPECIFIC_FILE_FORMAT (== 0). The exception
is the PlainTextConverter, HtmlConverter, and ZipConverter, which get
priority PRIORITY_SPECIFIC_FILE_FORMAT (== 10), with lower values
being tried first (i.e., higher priority).
Just prior to conversion, the converters are sorted by priority, using
a stable sort. This means that converters with the same priority will
remain in the same order, with the most recently registered converters
appearing first.
We have tight control over the order of built-in converters, but
plugins can register converters in any order. The registration's priority
field reasserts some control over the order of converters.
Plugins can register converters with any priority, to appear before or
after the built-ins. For example, a plugin with priority 9 will run
before the PlainTextConverter, but after the built-in converters.
"""
self._converters.insert(
0, ConverterRegistration(converter=converter, priority=priority)
)
def _get_stream_info_guesses(
self, file_stream: BinaryIO, base_guess: StreamInfo
) -> List[StreamInfo]:
"""
Given a base guess, attempt to guess or expand on the stream info using the stream content (via magika).
"""
guesses: List[StreamInfo] = []
# Enhance the base guess with information based on the extension or mimetype
enhanced_guess = base_guess.copy_and_update()
# If there's an extension and no mimetype, try to guess the mimetype
if base_guess.mimetype is None and base_guess.extension is not None:
_m, _ = mimetypes.guess_type(
"placeholder" + base_guess.extension, strict=False
)
if _m is not None:
enhanced_guess = enhanced_guess.copy_and_update(mimetype=_m)
# If there's a mimetype and no extension, try to guess the extension
if base_guess.mimetype is not None and base_guess.extension is None:
_e = mimetypes.guess_all_extensions(base_guess.mimetype, strict=False)
if len(_e) > 0:
enhanced_guess = enhanced_guess.copy_and_update(extension=_e[0])
# Call magika to guess from the stream
cur_pos = file_stream.tell()
try:
result = self._magika.identify_stream(file_stream)
if result.status == "ok" and result.prediction.output.label != "unknown":
# If it's text, also guess the charset
charset = None
if result.prediction.output.is_text:
# Read the first 4k to guess the charset
file_stream.seek(cur_pos)
stream_page = file_stream.read(4096)
charset_result = charset_normalizer.from_bytes(stream_page).best()
if charset_result is not None:
charset = self._normalize_charset(charset_result.encoding)
# Normalize the first extension listed
guessed_extension = None
if len(result.prediction.output.extensions) > 0:
guessed_extension = "." + result.prediction.output.extensions[0]
# Determine if the guess is compatible with the base guess
compatible = True
if (
base_guess.mimetype is not None
and base_guess.mimetype != result.prediction.output.mime_type
):
compatible = False
if (
base_guess.extension is not None
and base_guess.extension.lstrip(".")
not in result.prediction.output.extensions
):
compatible = False
if (
base_guess.charset is not None
and self._normalize_charset(base_guess.charset) != charset
):
compatible = False
if compatible:
# Add the compatible base guess
guesses.append(
StreamInfo(
mimetype=base_guess.mimetype
or result.prediction.output.mime_type,
extension=base_guess.extension or guessed_extension,
charset=base_guess.charset or charset,
filename=base_guess.filename,
local_path=base_guess.local_path,
url=base_guess.url,
)
)
else:
# The magika guess was incompatible with the base guess, so add both guesses
guesses.append(enhanced_guess)
guesses.append(
StreamInfo(
mimetype=result.prediction.output.mime_type,
extension=guessed_extension,
charset=charset,
filename=base_guess.filename,
local_path=base_guess.local_path,
url=base_guess.url,
)
)
else:
# There were no other guesses, so just add the base guess
guesses.append(enhanced_guess)
finally:
file_stream.seek(cur_pos)
return guesses
def _normalize_charset(self, charset: str | None) -> str | None:
"""
Normalize a charset string to a canonical form.
"""
if charset is None:
return None
try:
return codecs.lookup(charset).name
except LookupError:
return charset
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/__init__.py | packages/markitdown/src/markitdown/__init__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
from .__about__ import __version__
from ._markitdown import (
MarkItDown,
PRIORITY_SPECIFIC_FILE_FORMAT,
PRIORITY_GENERIC_FILE_FORMAT,
)
from ._base_converter import DocumentConverterResult, DocumentConverter
from ._stream_info import StreamInfo
from ._exceptions import (
MarkItDownException,
MissingDependencyException,
FailedConversionAttempt,
FileConversionException,
UnsupportedFormatException,
)
__all__ = [
"__version__",
"MarkItDown",
"DocumentConverter",
"DocumentConverterResult",
"MarkItDownException",
"MissingDependencyException",
"FailedConversionAttempt",
"FileConversionException",
"UnsupportedFormatException",
"StreamInfo",
"PRIORITY_SPECIFIC_FILE_FORMAT",
"PRIORITY_GENERIC_FILE_FORMAT",
]
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/_base_converter.py | packages/markitdown/src/markitdown/_base_converter.py | from typing import Any, BinaryIO, Optional
from ._stream_info import StreamInfo
class DocumentConverterResult:
"""The result of converting a document to Markdown."""
def __init__(
self,
markdown: str,
*,
title: Optional[str] = None,
):
"""
Initialize the DocumentConverterResult.
The only required parameter is the converted Markdown text.
The title, and any other metadata that may be added in the future, are optional.
Parameters:
- markdown: The converted Markdown text.
- title: Optional title of the document.
"""
self.markdown = markdown
self.title = title
@property
def text_content(self) -> str:
"""Soft-deprecated alias for `markdown`. New code should migrate to using `markdown` or __str__."""
return self.markdown
@text_content.setter
def text_content(self, markdown: str):
"""Soft-deprecated alias for `markdown`. New code should migrate to using `markdown` or __str__."""
self.markdown = markdown
def __str__(self) -> str:
"""Return the converted Markdown text."""
return self.markdown
class DocumentConverter:
"""Abstract superclass of all DocumentConverters."""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
"""
Return a quick determination on if the converter should attempt converting the document.
This is primarily based `stream_info` (typically, `stream_info.mimetype`, `stream_info.extension`).
In cases where the data is retrieved via HTTP, the `steam_info.url` might also be referenced to
make a determination (e.g., special converters for Wikipedia, YouTube etc).
Finally, it is conceivable that the `stream_info.filename` might be used to in cases
where the filename is well-known (e.g., `Dockerfile`, `Makefile`, etc)
NOTE: The method signature is designed to match that of the convert() method. This provides some
assurance that, if accepts() returns True, the convert() method will also be able to handle the document.
IMPORTANT: In rare cases, (e.g., OutlookMsgConverter) we need to read more from the stream to make a final
determination. Read operations inevitably advances the position in file_stream. In these case, the position
MUST be reset it MUST be reset before returning. This is because the convert() method may be called immediately
after accepts(), and will expect the file_stream to be at the original position.
E.g.,
cur_pos = file_stream.tell() # Save the current position
data = file_stream.read(100) # ... peek at the first 100 bytes, etc.
file_stream.seek(cur_pos) # Reset the position to the original position
Parameters:
- file_stream: The file-like object to convert. Must support seek(), tell(), and read() methods.
- stream_info: The StreamInfo object containing metadata about the file (mimetype, extension, charset, set)
- kwargs: Additional keyword arguments for the converter.
Returns:
- bool: True if the converter can handle the document, False otherwise.
"""
raise NotImplementedError(
f"The subclass, {type(self).__name__}, must implement the accepts() method to determine if they can handle the document."
)
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
"""
Convert a document to Markdown text.
Parameters:
- file_stream: The file-like object to convert. Must support seek(), tell(), and read() methods.
- stream_info: The StreamInfo object containing metadata about the file (mimetype, extension, charset, set)
- kwargs: Additional keyword arguments for the converter.
Returns:
- DocumentConverterResult: The result of the conversion, which includes the title and markdown content.
Raises:
- FileConversionException: If the mimetype is recognized, but the conversion fails for some other reason.
- MissingDependencyException: If the converter requires a dependency that is not installed.
"""
raise NotImplementedError("Subclasses must implement this method")
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_bing_serp_converter.py | packages/markitdown/src/markitdown/converters/_bing_serp_converter.py | import re
import base64
import binascii
from urllib.parse import parse_qs, urlparse
from typing import Any, BinaryIO
from bs4 import BeautifulSoup
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
from ._markdownify import _CustomMarkdownify
ACCEPTED_MIME_TYPE_PREFIXES = [
"text/html",
"application/xhtml",
]
ACCEPTED_FILE_EXTENSIONS = [
".html",
".htm",
]
class BingSerpConverter(DocumentConverter):
"""
Handle Bing results pages (only the organic search results).
NOTE: It is better to use the Bing API
"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
"""
Make sure we're dealing with HTML content *from* Bing.
"""
url = stream_info.url or ""
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if not re.search(r"^https://www\.bing\.com/search\?q=", url):
# Not a Bing SERP URL
return False
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
# Not HTML content
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
assert stream_info.url is not None
# Parse the query parameters
parsed_params = parse_qs(urlparse(stream_info.url).query)
query = parsed_params.get("q", [""])[0]
# Parse the stream
encoding = "utf-8" if stream_info.charset is None else stream_info.charset
soup = BeautifulSoup(file_stream, "html.parser", from_encoding=encoding)
# Clean up some formatting
for tptt in soup.find_all(class_="tptt"):
if hasattr(tptt, "string") and tptt.string:
tptt.string += " "
for slug in soup.find_all(class_="algoSlug_icon"):
slug.extract()
# Parse the algorithmic results
_markdownify = _CustomMarkdownify(**kwargs)
results = list()
for result in soup.find_all(class_="b_algo"):
if not hasattr(result, "find_all"):
continue
# Rewrite redirect urls
for a in result.find_all("a", href=True):
parsed_href = urlparse(a["href"])
qs = parse_qs(parsed_href.query)
# The destination is contained in the u parameter,
# but appears to be base64 encoded, with some prefix
if "u" in qs:
u = (
qs["u"][0][2:].strip() + "=="
) # Python 3 doesn't care about extra padding
try:
# RFC 4648 / Base64URL" variant, which uses "-" and "_"
a["href"] = base64.b64decode(u, altchars="-_").decode("utf-8")
except UnicodeDecodeError:
pass
except binascii.Error:
pass
# Convert to markdown
md_result = _markdownify.convert_soup(result).strip()
lines = [line.strip() for line in re.split(r"\n+", md_result)]
results.append("\n".join([line for line in lines if len(line) > 0]))
webpage_text = (
f"## A Bing search for '{query}' found the following results:\n\n"
+ "\n\n".join(results)
)
return DocumentConverterResult(
markdown=webpage_text,
title=None if soup.title is None else soup.title.string,
)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_llm_caption.py | packages/markitdown/src/markitdown/converters/_llm_caption.py | from typing import BinaryIO, Union
import base64
import mimetypes
from .._stream_info import StreamInfo
def llm_caption(
file_stream: BinaryIO, stream_info: StreamInfo, *, client, model, prompt=None
) -> Union[None, str]:
if prompt is None or prompt.strip() == "":
prompt = "Write a detailed caption for this image."
# Get the content type
content_type = stream_info.mimetype
if not content_type:
content_type, _ = mimetypes.guess_type("_dummy" + (stream_info.extension or ""))
if not content_type:
content_type = "application/octet-stream"
# Convert to base64
cur_pos = file_stream.tell()
try:
base64_image = base64.b64encode(file_stream.read()).decode("utf-8")
except Exception as e:
return None
finally:
file_stream.seek(cur_pos)
# Prepare the data-uri
data_uri = f"data:{content_type};base64,{base64_image}"
# Prepare the OpenAI API request
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": data_uri,
},
},
],
}
]
# Call the OpenAI API
response = client.chat.completions.create(model=model, messages=messages)
return response.choices[0].message.content
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_wikipedia_converter.py | packages/markitdown/src/markitdown/converters/_wikipedia_converter.py | import re
import bs4
from typing import Any, BinaryIO
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
from ._markdownify import _CustomMarkdownify
ACCEPTED_MIME_TYPE_PREFIXES = [
"text/html",
"application/xhtml",
]
ACCEPTED_FILE_EXTENSIONS = [
".html",
".htm",
]
class WikipediaConverter(DocumentConverter):
"""Handle Wikipedia pages separately, focusing only on the main document content."""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
"""
Make sure we're dealing with HTML content *from* Wikipedia.
"""
url = stream_info.url or ""
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if not re.search(r"^https?:\/\/[a-zA-Z]{2,3}\.wikipedia.org\/", url):
# Not a Wikipedia URL
return False
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
# Not HTML content
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Parse the stream
encoding = "utf-8" if stream_info.charset is None else stream_info.charset
soup = bs4.BeautifulSoup(file_stream, "html.parser", from_encoding=encoding)
# Remove javascript and style blocks
for script in soup(["script", "style"]):
script.extract()
# Print only the main content
body_elm = soup.find("div", {"id": "mw-content-text"})
title_elm = soup.find("span", {"class": "mw-page-title-main"})
webpage_text = ""
main_title = None if soup.title is None else soup.title.string
if body_elm:
# What's the title
if title_elm and isinstance(title_elm, bs4.Tag):
main_title = title_elm.string
# Convert the page
webpage_text = f"# {main_title}\n\n" + _CustomMarkdownify(
**kwargs
).convert_soup(body_elm)
else:
webpage_text = _CustomMarkdownify(**kwargs).convert_soup(soup)
return DocumentConverterResult(
markdown=webpage_text,
title=main_title,
)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_plain_text_converter.py | packages/markitdown/src/markitdown/converters/_plain_text_converter.py | import sys
from typing import BinaryIO, Any
from charset_normalizer import from_bytes
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
# Try loading optional (but in this case, required) dependencies
# Save reporting of any exceptions for later
_dependency_exc_info = None
try:
import mammoth # noqa: F401
except ImportError:
# Preserve the error and stack trace for later
_dependency_exc_info = sys.exc_info()
ACCEPTED_MIME_TYPE_PREFIXES = [
"text/",
"application/json",
"application/markdown",
]
ACCEPTED_FILE_EXTENSIONS = [
".txt",
".text",
".md",
".markdown",
".json",
".jsonl",
]
class PlainTextConverter(DocumentConverter):
"""Anything with content type text/plain"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
# If we have a charset, we can safely assume it's text
# With Magika in the earlier stages, this handles most cases
if stream_info.charset is not None:
return True
# Otherwise, check the mimetype and extension
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
if stream_info.charset:
text_content = file_stream.read().decode(stream_info.charset)
else:
text_content = str(from_bytes(file_stream.read()).best())
return DocumentConverterResult(markdown=text_content)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_transcribe_audio.py | packages/markitdown/src/markitdown/converters/_transcribe_audio.py | import io
import sys
from typing import BinaryIO
from .._exceptions import MissingDependencyException
# Try loading optional (but in this case, required) dependencies
# Save reporting of any exceptions for later
_dependency_exc_info = None
try:
# Suppress some warnings on library import
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=SyntaxWarning)
import speech_recognition as sr
import pydub
except ImportError:
# Preserve the error and stack trace for later
_dependency_exc_info = sys.exc_info()
def transcribe_audio(file_stream: BinaryIO, *, audio_format: str = "wav") -> str:
# Check for installed dependencies
if _dependency_exc_info is not None:
raise MissingDependencyException(
"Speech transcription requires installing MarkItdown with the [audio-transcription] optional dependencies. E.g., `pip install markitdown[audio-transcription]` or `pip install markitdown[all]`"
) from _dependency_exc_info[
1
].with_traceback( # type: ignore[union-attr]
_dependency_exc_info[2]
)
if audio_format in ["wav", "aiff", "flac"]:
audio_source = file_stream
elif audio_format in ["mp3", "mp4"]:
audio_segment = pydub.AudioSegment.from_file(file_stream, format=audio_format)
audio_source = io.BytesIO()
audio_segment.export(audio_source, format="wav")
audio_source.seek(0)
else:
raise ValueError(f"Unsupported audio format: {audio_format}")
recognizer = sr.Recognizer()
with sr.AudioFile(audio_source) as source:
audio = recognizer.record(source)
transcript = recognizer.recognize_google(audio).strip()
return "[No speech detected]" if transcript == "" else transcript
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_epub_converter.py | packages/markitdown/src/markitdown/converters/_epub_converter.py | import os
import zipfile
from defusedxml import minidom
from xml.dom.minidom import Document
from typing import BinaryIO, Any, Dict, List
from ._html_converter import HtmlConverter
from .._base_converter import DocumentConverterResult
from .._stream_info import StreamInfo
ACCEPTED_MIME_TYPE_PREFIXES = [
"application/epub",
"application/epub+zip",
"application/x-epub+zip",
]
ACCEPTED_FILE_EXTENSIONS = [".epub"]
MIME_TYPE_MAPPING = {
".html": "text/html",
".xhtml": "application/xhtml+xml",
}
class EpubConverter(HtmlConverter):
"""
Converts EPUB files to Markdown. Style information (e.g.m headings) and tables are preserved where possible.
"""
def __init__(self):
super().__init__()
self._html_converter = HtmlConverter()
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
with zipfile.ZipFile(file_stream, "r") as z:
# Extracts metadata (title, authors, language, publisher, date, description, cover) from an EPUB file."""
# Locate content.opf
container_dom = minidom.parse(z.open("META-INF/container.xml"))
opf_path = container_dom.getElementsByTagName("rootfile")[0].getAttribute(
"full-path"
)
# Parse content.opf
opf_dom = minidom.parse(z.open(opf_path))
metadata: Dict[str, Any] = {
"title": self._get_text_from_node(opf_dom, "dc:title"),
"authors": self._get_all_texts_from_nodes(opf_dom, "dc:creator"),
"language": self._get_text_from_node(opf_dom, "dc:language"),
"publisher": self._get_text_from_node(opf_dom, "dc:publisher"),
"date": self._get_text_from_node(opf_dom, "dc:date"),
"description": self._get_text_from_node(opf_dom, "dc:description"),
"identifier": self._get_text_from_node(opf_dom, "dc:identifier"),
}
# Extract manifest items (ID β href mapping)
manifest = {
item.getAttribute("id"): item.getAttribute("href")
for item in opf_dom.getElementsByTagName("item")
}
# Extract spine order (ID refs)
spine_items = opf_dom.getElementsByTagName("itemref")
spine_order = [item.getAttribute("idref") for item in spine_items]
# Convert spine order to actual file paths
base_path = "/".join(
opf_path.split("/")[:-1]
) # Get base directory of content.opf
spine = [
f"{base_path}/{manifest[item_id]}" if base_path else manifest[item_id]
for item_id in spine_order
if item_id in manifest
]
# Extract and convert the content
markdown_content: List[str] = []
for file in spine:
if file in z.namelist():
with z.open(file) as f:
filename = os.path.basename(file)
extension = os.path.splitext(filename)[1].lower()
mimetype = MIME_TYPE_MAPPING.get(extension)
converted_content = self._html_converter.convert(
f,
StreamInfo(
mimetype=mimetype,
extension=extension,
filename=filename,
),
)
markdown_content.append(converted_content.markdown.strip())
# Format and add the metadata
metadata_markdown = []
for key, value in metadata.items():
if isinstance(value, list):
value = ", ".join(value)
if value:
metadata_markdown.append(f"**{key.capitalize()}:** {value}")
markdown_content.insert(0, "\n".join(metadata_markdown))
return DocumentConverterResult(
markdown="\n\n".join(markdown_content), title=metadata["title"]
)
def _get_text_from_node(self, dom: Document, tag_name: str) -> str | None:
"""Convenience function to extract a single occurrence of a tag (e.g., title)."""
texts = self._get_all_texts_from_nodes(dom, tag_name)
if len(texts) > 0:
return texts[0]
else:
return None
def _get_all_texts_from_nodes(self, dom: Document, tag_name: str) -> List[str]:
"""Helper function to extract all occurrences of a tag (e.g., multiple authors)."""
texts: List[str] = []
for node in dom.getElementsByTagName(tag_name):
if node.firstChild and hasattr(node.firstChild, "nodeValue"):
texts.append(node.firstChild.nodeValue.strip())
return texts
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_csv_converter.py | packages/markitdown/src/markitdown/converters/_csv_converter.py | import csv
import io
from typing import BinaryIO, Any
from charset_normalizer import from_bytes
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
ACCEPTED_MIME_TYPE_PREFIXES = [
"text/csv",
"application/csv",
]
ACCEPTED_FILE_EXTENSIONS = [".csv"]
class CsvConverter(DocumentConverter):
"""
Converts CSV files to Markdown tables.
"""
def __init__(self):
super().__init__()
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Read the file content
if stream_info.charset:
content = file_stream.read().decode(stream_info.charset)
else:
content = str(from_bytes(file_stream.read()).best())
# Parse CSV content
reader = csv.reader(io.StringIO(content))
rows = list(reader)
if not rows:
return DocumentConverterResult(markdown="")
# Create markdown table
markdown_table = []
# Add header row
markdown_table.append("| " + " | ".join(rows[0]) + " |")
# Add separator row
markdown_table.append("| " + " | ".join(["---"] * len(rows[0])) + " |")
# Add data rows
for row in rows[1:]:
# Make sure row has the same number of columns as header
while len(row) < len(rows[0]):
row.append("")
# Truncate if row has more columns than header
row = row[: len(rows[0])]
markdown_table.append("| " + " | ".join(row) + " |")
result = "\n".join(markdown_table)
return DocumentConverterResult(markdown=result)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_html_converter.py | packages/markitdown/src/markitdown/converters/_html_converter.py | import io
from typing import Any, BinaryIO, Optional
from bs4 import BeautifulSoup
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
from ._markdownify import _CustomMarkdownify
ACCEPTED_MIME_TYPE_PREFIXES = [
"text/html",
"application/xhtml",
]
ACCEPTED_FILE_EXTENSIONS = [
".html",
".htm",
]
class HtmlConverter(DocumentConverter):
"""Anything with content type text/html"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Parse the stream
encoding = "utf-8" if stream_info.charset is None else stream_info.charset
soup = BeautifulSoup(file_stream, "html.parser", from_encoding=encoding)
# Remove javascript and style blocks
for script in soup(["script", "style"]):
script.extract()
# Print only the main content
body_elm = soup.find("body")
webpage_text = ""
if body_elm:
webpage_text = _CustomMarkdownify(**kwargs).convert_soup(body_elm)
else:
webpage_text = _CustomMarkdownify(**kwargs).convert_soup(soup)
assert isinstance(webpage_text, str)
# remove leading and trailing \n
webpage_text = webpage_text.strip()
return DocumentConverterResult(
markdown=webpage_text,
title=None if soup.title is None else soup.title.string,
)
def convert_string(
self, html_content: str, *, url: Optional[str] = None, **kwargs
) -> DocumentConverterResult:
"""
Non-standard convenience method to convert a string to markdown.
Given that many converters produce HTML as intermediate output, this
allows for easy conversion of HTML to markdown.
"""
return self.convert(
file_stream=io.BytesIO(html_content.encode("utf-8")),
stream_info=StreamInfo(
mimetype="text/html",
extension=".html",
charset="utf-8",
url=url,
),
**kwargs,
)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_pdf_converter.py | packages/markitdown/src/markitdown/converters/_pdf_converter.py | import sys
import io
from typing import BinaryIO, Any
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
from .._exceptions import MissingDependencyException, MISSING_DEPENDENCY_MESSAGE
# Try loading optional (but in this case, required) dependencies
# Save reporting of any exceptions for later
_dependency_exc_info = None
try:
import pdfminer
import pdfminer.high_level
except ImportError:
# Preserve the error and stack trace for later
_dependency_exc_info = sys.exc_info()
ACCEPTED_MIME_TYPE_PREFIXES = [
"application/pdf",
"application/x-pdf",
]
ACCEPTED_FILE_EXTENSIONS = [".pdf"]
class PdfConverter(DocumentConverter):
"""
Converts PDFs to Markdown. Most style information is ignored, so the results are essentially plain-text.
"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Check the dependencies
if _dependency_exc_info is not None:
raise MissingDependencyException(
MISSING_DEPENDENCY_MESSAGE.format(
converter=type(self).__name__,
extension=".pdf",
feature="pdf",
)
) from _dependency_exc_info[
1
].with_traceback( # type: ignore[union-attr]
_dependency_exc_info[2]
)
assert isinstance(file_stream, io.IOBase) # for mypy
return DocumentConverterResult(
markdown=pdfminer.high_level.extract_text(file_stream),
)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_ipynb_converter.py | packages/markitdown/src/markitdown/converters/_ipynb_converter.py | from typing import BinaryIO, Any
import json
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._exceptions import FileConversionException
from .._stream_info import StreamInfo
CANDIDATE_MIME_TYPE_PREFIXES = [
"application/json",
]
ACCEPTED_FILE_EXTENSIONS = [".ipynb"]
class IpynbConverter(DocumentConverter):
"""Converts Jupyter Notebook (.ipynb) files to Markdown."""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in CANDIDATE_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
# Read further to see if it's a notebook
cur_pos = file_stream.tell()
try:
encoding = stream_info.charset or "utf-8"
notebook_content = file_stream.read().decode(encoding)
return (
"nbformat" in notebook_content
and "nbformat_minor" in notebook_content
)
finally:
file_stream.seek(cur_pos)
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Parse and convert the notebook
encoding = stream_info.charset or "utf-8"
notebook_content = file_stream.read().decode(encoding=encoding)
return self._convert(json.loads(notebook_content))
def _convert(self, notebook_content: dict) -> DocumentConverterResult:
"""Helper function that converts notebook JSON content to Markdown."""
try:
md_output = []
title = None
for cell in notebook_content.get("cells", []):
cell_type = cell.get("cell_type", "")
source_lines = cell.get("source", [])
if cell_type == "markdown":
md_output.append("".join(source_lines))
# Extract the first # heading as title if not already found
if title is None:
for line in source_lines:
if line.startswith("# "):
title = line.lstrip("# ").strip()
break
elif cell_type == "code":
# Code cells are wrapped in Markdown code blocks
md_output.append(f"```python\n{''.join(source_lines)}\n```")
elif cell_type == "raw":
md_output.append(f"```\n{''.join(source_lines)}\n```")
md_text = "\n\n".join(md_output)
# Check for title in notebook metadata
title = notebook_content.get("metadata", {}).get("title", title)
return DocumentConverterResult(
markdown=md_text,
title=title,
)
except Exception as e:
raise FileConversionException(
f"Error converting .ipynb file: {str(e)}"
) from e
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_rss_converter.py | packages/markitdown/src/markitdown/converters/_rss_converter.py | from defusedxml import minidom
from xml.dom.minidom import Document, Element
from typing import BinaryIO, Any, Union
from bs4 import BeautifulSoup
from ._markdownify import _CustomMarkdownify
from .._stream_info import StreamInfo
from .._base_converter import DocumentConverter, DocumentConverterResult
PRECISE_MIME_TYPE_PREFIXES = [
"application/rss",
"application/rss+xml",
"application/atom",
"application/atom+xml",
]
PRECISE_FILE_EXTENSIONS = [".rss", ".atom"]
CANDIDATE_MIME_TYPE_PREFIXES = [
"text/xml",
"application/xml",
]
CANDIDATE_FILE_EXTENSIONS = [
".xml",
]
class RssConverter(DocumentConverter):
"""Convert RSS / Atom type to markdown"""
def __init__(self):
super().__init__()
self._kwargs = {}
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
# Check for precise mimetypes and file extensions
if extension in PRECISE_FILE_EXTENSIONS:
return True
for prefix in PRECISE_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
# Check for precise mimetypes and file extensions
if extension in CANDIDATE_FILE_EXTENSIONS:
return self._check_xml(file_stream)
for prefix in CANDIDATE_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return self._check_xml(file_stream)
return False
def _check_xml(self, file_stream: BinaryIO) -> bool:
cur_pos = file_stream.tell()
try:
doc = minidom.parse(file_stream)
return self._feed_type(doc) is not None
except BaseException as _:
pass
finally:
file_stream.seek(cur_pos)
return False
def _feed_type(self, doc: Any) -> str | None:
if doc.getElementsByTagName("rss"):
return "rss"
elif doc.getElementsByTagName("feed"):
root = doc.getElementsByTagName("feed")[0]
if root.getElementsByTagName("entry"):
# An Atom feed must have a root element of <feed> and at least one <entry>
return "atom"
return None
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
self._kwargs = kwargs
doc = minidom.parse(file_stream)
feed_type = self._feed_type(doc)
if feed_type == "rss":
return self._parse_rss_type(doc)
elif feed_type == "atom":
return self._parse_atom_type(doc)
else:
raise ValueError("Unknown feed type")
def _parse_atom_type(self, doc: Document) -> DocumentConverterResult:
"""Parse the type of an Atom feed.
Returns None if the feed type is not recognized or something goes wrong.
"""
root = doc.getElementsByTagName("feed")[0]
title = self._get_data_by_tag_name(root, "title")
subtitle = self._get_data_by_tag_name(root, "subtitle")
entries = root.getElementsByTagName("entry")
md_text = f"# {title}\n"
if subtitle:
md_text += f"{subtitle}\n"
for entry in entries:
entry_title = self._get_data_by_tag_name(entry, "title")
entry_summary = self._get_data_by_tag_name(entry, "summary")
entry_updated = self._get_data_by_tag_name(entry, "updated")
entry_content = self._get_data_by_tag_name(entry, "content")
if entry_title:
md_text += f"\n## {entry_title}\n"
if entry_updated:
md_text += f"Updated on: {entry_updated}\n"
if entry_summary:
md_text += self._parse_content(entry_summary)
if entry_content:
md_text += self._parse_content(entry_content)
return DocumentConverterResult(
markdown=md_text,
title=title,
)
def _parse_rss_type(self, doc: Document) -> DocumentConverterResult:
"""Parse the type of an RSS feed.
Returns None if the feed type is not recognized or something goes wrong.
"""
root = doc.getElementsByTagName("rss")[0]
channel_list = root.getElementsByTagName("channel")
if not channel_list:
raise ValueError("No channel found in RSS feed")
channel = channel_list[0]
channel_title = self._get_data_by_tag_name(channel, "title")
channel_description = self._get_data_by_tag_name(channel, "description")
items = channel.getElementsByTagName("item")
if channel_title:
md_text = f"# {channel_title}\n"
if channel_description:
md_text += f"{channel_description}\n"
for item in items:
title = self._get_data_by_tag_name(item, "title")
description = self._get_data_by_tag_name(item, "description")
pubDate = self._get_data_by_tag_name(item, "pubDate")
content = self._get_data_by_tag_name(item, "content:encoded")
if title:
md_text += f"\n## {title}\n"
if pubDate:
md_text += f"Published on: {pubDate}\n"
if description:
md_text += self._parse_content(description)
if content:
md_text += self._parse_content(content)
return DocumentConverterResult(
markdown=md_text,
title=channel_title,
)
def _parse_content(self, content: str) -> str:
"""Parse the content of an RSS feed item"""
try:
# using bs4 because many RSS feeds have HTML-styled content
soup = BeautifulSoup(content, "html.parser")
return _CustomMarkdownify(**self._kwargs).convert_soup(soup)
except BaseException as _:
return content
def _get_data_by_tag_name(
self, element: Element, tag_name: str
) -> Union[str, None]:
"""Get data from first child element with the given tag name.
Returns None when no such element is found.
"""
nodes = element.getElementsByTagName(tag_name)
if not nodes:
return None
fc = nodes[0].firstChild
if fc:
if hasattr(fc, "data"):
return fc.data
return None
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_exiftool.py | packages/markitdown/src/markitdown/converters/_exiftool.py | import json
import locale
import subprocess
from typing import Any, BinaryIO, Union
def _parse_version(version: str) -> tuple:
return tuple(map(int, (version.split("."))))
def exiftool_metadata(
file_stream: BinaryIO,
*,
exiftool_path: Union[str, None],
) -> Any: # Need a better type for json data
# Nothing to do
if not exiftool_path:
return {}
# Verify exiftool version
try:
version_output = subprocess.run(
[exiftool_path, "-ver"],
capture_output=True,
text=True,
check=True,
).stdout.strip()
version = _parse_version(version_output)
min_version = (12, 24)
if version < min_version:
raise RuntimeError(
f"ExifTool version {version_output} is vulnerable to CVE-2021-22204. "
"Please upgrade to version 12.24 or later."
)
except (subprocess.CalledProcessError, ValueError) as e:
raise RuntimeError("Failed to verify ExifTool version.") from e
# Run exiftool
cur_pos = file_stream.tell()
try:
output = subprocess.run(
[exiftool_path, "-json", "-"],
input=file_stream.read(),
capture_output=True,
text=False,
).stdout
return json.loads(
output.decode(locale.getpreferredencoding(False)),
)[0]
finally:
file_stream.seek(cur_pos)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_xlsx_converter.py | packages/markitdown/src/markitdown/converters/_xlsx_converter.py | import sys
from typing import BinaryIO, Any
from ._html_converter import HtmlConverter
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._exceptions import MissingDependencyException, MISSING_DEPENDENCY_MESSAGE
from .._stream_info import StreamInfo
# Try loading optional (but in this case, required) dependencies
# Save reporting of any exceptions for later
_xlsx_dependency_exc_info = None
try:
import pandas as pd
import openpyxl # noqa: F401
except ImportError:
_xlsx_dependency_exc_info = sys.exc_info()
_xls_dependency_exc_info = None
try:
import pandas as pd # noqa: F811
import xlrd # noqa: F401
except ImportError:
_xls_dependency_exc_info = sys.exc_info()
ACCEPTED_XLSX_MIME_TYPE_PREFIXES = [
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
]
ACCEPTED_XLSX_FILE_EXTENSIONS = [".xlsx"]
ACCEPTED_XLS_MIME_TYPE_PREFIXES = [
"application/vnd.ms-excel",
"application/excel",
]
ACCEPTED_XLS_FILE_EXTENSIONS = [".xls"]
class XlsxConverter(DocumentConverter):
"""
Converts XLSX files to Markdown, with each sheet presented as a separate Markdown table.
"""
def __init__(self):
super().__init__()
self._html_converter = HtmlConverter()
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_XLSX_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_XLSX_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Check the dependencies
if _xlsx_dependency_exc_info is not None:
raise MissingDependencyException(
MISSING_DEPENDENCY_MESSAGE.format(
converter=type(self).__name__,
extension=".xlsx",
feature="xlsx",
)
) from _xlsx_dependency_exc_info[
1
].with_traceback( # type: ignore[union-attr]
_xlsx_dependency_exc_info[2]
)
sheets = pd.read_excel(file_stream, sheet_name=None, engine="openpyxl")
md_content = ""
for s in sheets:
md_content += f"## {s}\n"
html_content = sheets[s].to_html(index=False)
md_content += (
self._html_converter.convert_string(
html_content, **kwargs
).markdown.strip()
+ "\n\n"
)
return DocumentConverterResult(markdown=md_content.strip())
class XlsConverter(DocumentConverter):
"""
Converts XLS files to Markdown, with each sheet presented as a separate Markdown table.
"""
def __init__(self):
super().__init__()
self._html_converter = HtmlConverter()
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_XLS_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_XLS_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Load the dependencies
if _xls_dependency_exc_info is not None:
raise MissingDependencyException(
MISSING_DEPENDENCY_MESSAGE.format(
converter=type(self).__name__,
extension=".xls",
feature="xls",
)
) from _xls_dependency_exc_info[
1
].with_traceback( # type: ignore[union-attr]
_xls_dependency_exc_info[2]
)
sheets = pd.read_excel(file_stream, sheet_name=None, engine="xlrd")
md_content = ""
for s in sheets:
md_content += f"## {s}\n"
html_content = sheets[s].to_html(index=False)
md_content += (
self._html_converter.convert_string(
html_content, **kwargs
).markdown.strip()
+ "\n\n"
)
return DocumentConverterResult(markdown=md_content.strip())
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_youtube_converter.py | packages/markitdown/src/markitdown/converters/_youtube_converter.py | import json
import time
import re
import bs4
from typing import Any, BinaryIO, Dict, List, Union
from urllib.parse import parse_qs, urlparse, unquote
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
# Optional YouTube transcription support
try:
# Suppress some warnings on library import
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SyntaxWarning)
# Patch submitted upstream to fix the SyntaxWarning
from youtube_transcript_api import YouTubeTranscriptApi
IS_YOUTUBE_TRANSCRIPT_CAPABLE = True
except ModuleNotFoundError:
IS_YOUTUBE_TRANSCRIPT_CAPABLE = False
ACCEPTED_MIME_TYPE_PREFIXES = [
"text/html",
"application/xhtml",
]
ACCEPTED_FILE_EXTENSIONS = [
".html",
".htm",
]
class YouTubeConverter(DocumentConverter):
"""Handle YouTube specially, focusing on the video title, description, and transcript."""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
"""
Make sure we're dealing with HTML content *from* YouTube.
"""
url = stream_info.url or ""
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
url = unquote(url)
url = url.replace(r"\?", "?").replace(r"\=", "=")
if not url.startswith("https://www.youtube.com/watch?"):
# Not a YouTube URL
return False
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
# Not HTML content
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Parse the stream
encoding = "utf-8" if stream_info.charset is None else stream_info.charset
soup = bs4.BeautifulSoup(file_stream, "html.parser", from_encoding=encoding)
# Read the meta tags
metadata: Dict[str, str] = {}
if soup.title and soup.title.string:
metadata["title"] = soup.title.string
for meta in soup(["meta"]):
if not isinstance(meta, bs4.Tag):
continue
for a in meta.attrs:
if a in ["itemprop", "property", "name"]:
key = str(meta.get(a, ""))
content = str(meta.get("content", ""))
if key and content: # Only add non-empty content
metadata[key] = content
break
# Try reading the description
try:
for script in soup(["script"]):
if not isinstance(script, bs4.Tag):
continue
if not script.string: # Skip empty scripts
continue
content = script.string
if "ytInitialData" in content:
match = re.search(r"var ytInitialData = ({.*?});", content)
if match:
data = json.loads(match.group(1))
attrdesc = self._findKey(data, "attributedDescriptionBodyText")
if attrdesc and isinstance(attrdesc, dict):
metadata["description"] = str(attrdesc.get("content", ""))
break
except Exception as e:
print(f"Error extracting description: {e}")
pass
# Start preparing the page
webpage_text = "# YouTube\n"
title = self._get(metadata, ["title", "og:title", "name"]) # type: ignore
assert isinstance(title, str)
if title:
webpage_text += f"\n## {title}\n"
stats = ""
views = self._get(metadata, ["interactionCount"]) # type: ignore
if views:
stats += f"- **Views:** {views}\n"
keywords = self._get(metadata, ["keywords"]) # type: ignore
if keywords:
stats += f"- **Keywords:** {keywords}\n"
runtime = self._get(metadata, ["duration"]) # type: ignore
if runtime:
stats += f"- **Runtime:** {runtime}\n"
if len(stats) > 0:
webpage_text += f"\n### Video Metadata\n{stats}\n"
description = self._get(metadata, ["description", "og:description"]) # type: ignore
if description:
webpage_text += f"\n### Description\n{description}\n"
if IS_YOUTUBE_TRANSCRIPT_CAPABLE:
ytt_api = YouTubeTranscriptApi()
transcript_text = ""
parsed_url = urlparse(stream_info.url) # type: ignore
params = parse_qs(parsed_url.query) # type: ignore
if "v" in params and params["v"][0]:
video_id = str(params["v"][0])
transcript_list = ytt_api.list(video_id)
languages = ["en"]
for transcript in transcript_list:
languages.append(transcript.language_code)
break
try:
youtube_transcript_languages = kwargs.get(
"youtube_transcript_languages", languages
)
# Retry the transcript fetching operation
transcript = self._retry_operation(
lambda: ytt_api.fetch(
video_id, languages=youtube_transcript_languages
),
retries=3, # Retry 3 times
delay=2, # 2 seconds delay between retries
)
if transcript:
transcript_text = " ".join(
[part.text for part in transcript]
) # type: ignore
except Exception as e:
# No transcript available
if len(languages) == 1:
print(f"Error fetching transcript: {e}")
else:
# Translate transcript into first kwarg
transcript = (
transcript_list.find_transcript(languages)
.translate(youtube_transcript_languages[0])
.fetch()
)
transcript_text = " ".join([part.text for part in transcript])
if transcript_text:
webpage_text += f"\n### Transcript\n{transcript_text}\n"
title = title if title else (soup.title.string if soup.title else "")
assert isinstance(title, str)
return DocumentConverterResult(
markdown=webpage_text,
title=title,
)
def _get(
self,
metadata: Dict[str, str],
keys: List[str],
default: Union[str, None] = None,
) -> Union[str, None]:
"""Get first non-empty value from metadata matching given keys."""
for k in keys:
if k in metadata:
return metadata[k]
return default
def _findKey(self, json: Any, key: str) -> Union[str, None]: # TODO: Fix json type
"""Recursively search for a key in nested dictionary/list structures."""
if isinstance(json, list):
for elm in json:
ret = self._findKey(elm, key)
if ret is not None:
return ret
elif isinstance(json, dict):
for k, v in json.items():
if k == key:
return json[k]
if result := self._findKey(v, key):
return result
return None
def _retry_operation(self, operation, retries=3, delay=2):
"""Retries the operation if it fails."""
attempt = 0
while attempt < retries:
try:
return operation() # Attempt the operation
except Exception as e:
print(f"Attempt {attempt + 1} failed: {e}")
if attempt < retries - 1:
time.sleep(delay) # Wait before retrying
attempt += 1
# If all attempts fail, raise the last exception
raise Exception(f"Operation failed after {retries} attempts.")
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_outlook_msg_converter.py | packages/markitdown/src/markitdown/converters/_outlook_msg_converter.py | import sys
from typing import Any, Union, BinaryIO
from .._stream_info import StreamInfo
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._exceptions import MissingDependencyException, MISSING_DEPENDENCY_MESSAGE
# Try loading optional (but in this case, required) dependencies
# Save reporting of any exceptions for later
_dependency_exc_info = None
olefile = None
try:
import olefile # type: ignore[no-redef]
except ImportError:
# Preserve the error and stack trace for later
_dependency_exc_info = sys.exc_info()
ACCEPTED_MIME_TYPE_PREFIXES = [
"application/vnd.ms-outlook",
]
ACCEPTED_FILE_EXTENSIONS = [".msg"]
class OutlookMsgConverter(DocumentConverter):
"""Converts Outlook .msg files to markdown by extracting email metadata and content.
Uses the olefile package to parse the .msg file structure and extract:
- Email headers (From, To, Subject)
- Email body content
"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
# Check the extension and mimetype
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
# Brute force, check if we have an OLE file
cur_pos = file_stream.tell()
try:
if olefile and not olefile.isOleFile(file_stream):
return False
finally:
file_stream.seek(cur_pos)
# Brue force, check if it's an Outlook file
try:
if olefile is not None:
msg = olefile.OleFileIO(file_stream)
toc = "\n".join([str(stream) for stream in msg.listdir()])
return (
"__properties_version1.0" in toc
and "__recip_version1.0_#00000000" in toc
)
except Exception as e:
pass
finally:
file_stream.seek(cur_pos)
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Check: the dependencies
if _dependency_exc_info is not None:
raise MissingDependencyException(
MISSING_DEPENDENCY_MESSAGE.format(
converter=type(self).__name__,
extension=".msg",
feature="outlook",
)
) from _dependency_exc_info[
1
].with_traceback( # type: ignore[union-attr]
_dependency_exc_info[2]
)
assert (
olefile is not None
) # If we made it this far, olefile should be available
msg = olefile.OleFileIO(file_stream)
# Extract email metadata
md_content = "# Email Message\n\n"
# Get headers
headers = {
"From": self._get_stream_data(msg, "__substg1.0_0C1F001F"),
"To": self._get_stream_data(msg, "__substg1.0_0E04001F"),
"Subject": self._get_stream_data(msg, "__substg1.0_0037001F"),
}
# Add headers to markdown
for key, value in headers.items():
if value:
md_content += f"**{key}:** {value}\n"
md_content += "\n## Content\n\n"
# Get email body
body = self._get_stream_data(msg, "__substg1.0_1000001F")
if body:
md_content += body
msg.close()
return DocumentConverterResult(
markdown=md_content.strip(),
title=headers.get("Subject"),
)
def _get_stream_data(self, msg: Any, stream_path: str) -> Union[str, None]:
"""Helper to safely extract and decode stream data from the MSG file."""
assert olefile is not None
assert isinstance(
msg, olefile.OleFileIO
) # Ensure msg is of the correct type (type hinting is not possible with the optional olefile package)
try:
if msg.exists(stream_path):
data = msg.openstream(stream_path).read()
# Try UTF-16 first (common for .msg files)
try:
return data.decode("utf-16-le").strip()
except UnicodeDecodeError:
# Fall back to UTF-8
try:
return data.decode("utf-8").strip()
except UnicodeDecodeError:
# Last resort - ignore errors
return data.decode("utf-8", errors="ignore").strip()
except Exception:
pass
return None
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_pptx_converter.py | packages/markitdown/src/markitdown/converters/_pptx_converter.py | import sys
import base64
import os
import io
import re
import html
from typing import BinaryIO, Any
from operator import attrgetter
from ._html_converter import HtmlConverter
from ._llm_caption import llm_caption
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
from .._exceptions import MissingDependencyException, MISSING_DEPENDENCY_MESSAGE
# Try loading optional (but in this case, required) dependencies
# Save reporting of any exceptions for later
_dependency_exc_info = None
try:
import pptx
except ImportError:
# Preserve the error and stack trace for later
_dependency_exc_info = sys.exc_info()
ACCEPTED_MIME_TYPE_PREFIXES = [
"application/vnd.openxmlformats-officedocument.presentationml",
]
ACCEPTED_FILE_EXTENSIONS = [".pptx"]
class PptxConverter(DocumentConverter):
"""
Converts PPTX files to Markdown. Supports heading, tables and images with alt text.
"""
def __init__(self):
super().__init__()
self._html_converter = HtmlConverter()
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Check the dependencies
if _dependency_exc_info is not None:
raise MissingDependencyException(
MISSING_DEPENDENCY_MESSAGE.format(
converter=type(self).__name__,
extension=".pptx",
feature="pptx",
)
) from _dependency_exc_info[
1
].with_traceback( # type: ignore[union-attr]
_dependency_exc_info[2]
)
# Perform the conversion
presentation = pptx.Presentation(file_stream)
md_content = ""
slide_num = 0
for slide in presentation.slides:
slide_num += 1
md_content += f"\n\n<!-- Slide number: {slide_num} -->\n"
title = slide.shapes.title
def get_shape_content(shape, **kwargs):
nonlocal md_content
# Pictures
if self._is_picture(shape):
# https://github.com/scanny/python-pptx/pull/512#issuecomment-1713100069
llm_description = ""
alt_text = ""
# Potentially generate a description using an LLM
llm_client = kwargs.get("llm_client")
llm_model = kwargs.get("llm_model")
if llm_client is not None and llm_model is not None:
# Prepare a file_stream and stream_info for the image data
image_filename = shape.image.filename
image_extension = None
if image_filename:
image_extension = os.path.splitext(image_filename)[1]
image_stream_info = StreamInfo(
mimetype=shape.image.content_type,
extension=image_extension,
filename=image_filename,
)
image_stream = io.BytesIO(shape.image.blob)
# Caption the image
try:
llm_description = llm_caption(
image_stream,
image_stream_info,
client=llm_client,
model=llm_model,
prompt=kwargs.get("llm_prompt"),
)
except Exception:
# Unable to generate a description
pass
# Also grab any description embedded in the deck
try:
alt_text = shape._element._nvXxPr.cNvPr.attrib.get("descr", "")
except Exception:
# Unable to get alt text
pass
# Prepare the alt, escaping any special characters
alt_text = "\n".join([llm_description, alt_text]) or shape.name
alt_text = re.sub(r"[\r\n\[\]]", " ", alt_text)
alt_text = re.sub(r"\s+", " ", alt_text).strip()
# If keep_data_uris is True, use base64 encoding for images
if kwargs.get("keep_data_uris", False):
blob = shape.image.blob
content_type = shape.image.content_type or "image/png"
b64_string = base64.b64encode(blob).decode("utf-8")
md_content += f"\n\n"
else:
# A placeholder name
filename = re.sub(r"\W", "", shape.name) + ".jpg"
md_content += "\n\n"
# Tables
if self._is_table(shape):
md_content += self._convert_table_to_markdown(shape.table, **kwargs)
# Charts
if shape.has_chart:
md_content += self._convert_chart_to_markdown(shape.chart)
# Text areas
elif shape.has_text_frame:
if shape == title:
md_content += "# " + shape.text.lstrip() + "\n"
else:
md_content += shape.text + "\n"
# Group Shapes
if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.GROUP:
sorted_shapes = sorted(
shape.shapes,
key=lambda x: (
float("-inf") if not x.top else x.top,
float("-inf") if not x.left else x.left,
),
)
for subshape in sorted_shapes:
get_shape_content(subshape, **kwargs)
sorted_shapes = sorted(
slide.shapes,
key=lambda x: (
float("-inf") if not x.top else x.top,
float("-inf") if not x.left else x.left,
),
)
for shape in sorted_shapes:
get_shape_content(shape, **kwargs)
md_content = md_content.strip()
if slide.has_notes_slide:
md_content += "\n\n### Notes:\n"
notes_frame = slide.notes_slide.notes_text_frame
if notes_frame is not None:
md_content += notes_frame.text
md_content = md_content.strip()
return DocumentConverterResult(markdown=md_content.strip())
def _is_picture(self, shape):
if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.PICTURE:
return True
if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.PLACEHOLDER:
if hasattr(shape, "image"):
return True
return False
def _is_table(self, shape):
if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.TABLE:
return True
return False
def _convert_table_to_markdown(self, table, **kwargs):
# Write the table as HTML, then convert it to Markdown
html_table = "<html><body><table>"
first_row = True
for row in table.rows:
html_table += "<tr>"
for cell in row.cells:
if first_row:
html_table += "<th>" + html.escape(cell.text) + "</th>"
else:
html_table += "<td>" + html.escape(cell.text) + "</td>"
html_table += "</tr>"
first_row = False
html_table += "</table></body></html>"
return (
self._html_converter.convert_string(html_table, **kwargs).markdown.strip()
+ "\n"
)
def _convert_chart_to_markdown(self, chart):
try:
md = "\n\n### Chart"
if chart.has_title:
md += f": {chart.chart_title.text_frame.text}"
md += "\n\n"
data = []
category_names = [c.label for c in chart.plots[0].categories]
series_names = [s.name for s in chart.series]
data.append(["Category"] + series_names)
for idx, category in enumerate(category_names):
row = [category]
for series in chart.series:
row.append(series.values[idx])
data.append(row)
markdown_table = []
for row in data:
markdown_table.append("| " + " | ".join(map(str, row)) + " |")
header = markdown_table[0]
separator = "|" + "|".join(["---"] * len(data[0])) + "|"
return md + "\n".join([header, separator] + markdown_table[1:])
except ValueError as e:
# Handle the specific error for unsupported chart types
if "unsupported plot type" in str(e):
return "\n\n[unsupported chart]\n\n"
except Exception:
# Catch any other exceptions that might occur
return "\n\n[unsupported chart]\n\n"
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/__init__.py | packages/markitdown/src/markitdown/converters/__init__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
from ._plain_text_converter import PlainTextConverter
from ._html_converter import HtmlConverter
from ._rss_converter import RssConverter
from ._wikipedia_converter import WikipediaConverter
from ._youtube_converter import YouTubeConverter
from ._ipynb_converter import IpynbConverter
from ._bing_serp_converter import BingSerpConverter
from ._pdf_converter import PdfConverter
from ._docx_converter import DocxConverter
from ._xlsx_converter import XlsxConverter, XlsConverter
from ._pptx_converter import PptxConverter
from ._image_converter import ImageConverter
from ._audio_converter import AudioConverter
from ._outlook_msg_converter import OutlookMsgConverter
from ._zip_converter import ZipConverter
from ._doc_intel_converter import (
DocumentIntelligenceConverter,
DocumentIntelligenceFileType,
)
from ._epub_converter import EpubConverter
from ._csv_converter import CsvConverter
__all__ = [
"PlainTextConverter",
"HtmlConverter",
"RssConverter",
"WikipediaConverter",
"YouTubeConverter",
"IpynbConverter",
"BingSerpConverter",
"PdfConverter",
"DocxConverter",
"XlsxConverter",
"XlsConverter",
"PptxConverter",
"ImageConverter",
"AudioConverter",
"OutlookMsgConverter",
"ZipConverter",
"DocumentIntelligenceConverter",
"DocumentIntelligenceFileType",
"EpubConverter",
"CsvConverter",
]
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_audio_converter.py | packages/markitdown/src/markitdown/converters/_audio_converter.py | from typing import Any, BinaryIO
from ._exiftool import exiftool_metadata
from ._transcribe_audio import transcribe_audio
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
from .._exceptions import MissingDependencyException
ACCEPTED_MIME_TYPE_PREFIXES = [
"audio/x-wav",
"audio/mpeg",
"video/mp4",
]
ACCEPTED_FILE_EXTENSIONS = [
".wav",
".mp3",
".m4a",
".mp4",
]
class AudioConverter(DocumentConverter):
"""
Converts audio files to markdown via extraction of metadata (if `exiftool` is installed), and speech transcription (if `speech_recognition` is installed).
"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
md_content = ""
# Add metadata
metadata = exiftool_metadata(
file_stream, exiftool_path=kwargs.get("exiftool_path")
)
if metadata:
for f in [
"Title",
"Artist",
"Author",
"Band",
"Album",
"Genre",
"Track",
"DateTimeOriginal",
"CreateDate",
# "Duration", -- Wrong values when read from memory
"NumChannels",
"SampleRate",
"AvgBytesPerSec",
"BitsPerSample",
]:
if f in metadata:
md_content += f"{f}: {metadata[f]}\n"
# Figure out the audio format for transcription
if stream_info.extension == ".wav" or stream_info.mimetype == "audio/x-wav":
audio_format = "wav"
elif stream_info.extension == ".mp3" or stream_info.mimetype == "audio/mpeg":
audio_format = "mp3"
elif (
stream_info.extension in [".mp4", ".m4a"]
or stream_info.mimetype == "video/mp4"
):
audio_format = "mp4"
else:
audio_format = None
# Transcribe
if audio_format:
try:
transcript = transcribe_audio(file_stream, audio_format=audio_format)
if transcript:
md_content += "\n\n### Audio Transcript:\n" + transcript
except MissingDependencyException:
pass
# Return the result
return DocumentConverterResult(markdown=md_content.strip())
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_image_converter.py | packages/markitdown/src/markitdown/converters/_image_converter.py | from typing import BinaryIO, Any, Union
import base64
import mimetypes
from ._exiftool import exiftool_metadata
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
ACCEPTED_MIME_TYPE_PREFIXES = [
"image/jpeg",
"image/png",
]
ACCEPTED_FILE_EXTENSIONS = [".jpg", ".jpeg", ".png"]
class ImageConverter(DocumentConverter):
"""
Converts images to markdown via extraction of metadata (if `exiftool` is installed), and description via a multimodal LLM (if an llm_client is configured).
"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any,
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
md_content = ""
# Add metadata
metadata = exiftool_metadata(
file_stream, exiftool_path=kwargs.get("exiftool_path")
)
if metadata:
for f in [
"ImageSize",
"Title",
"Caption",
"Description",
"Keywords",
"Artist",
"Author",
"DateTimeOriginal",
"CreateDate",
"GPSPosition",
]:
if f in metadata:
md_content += f"{f}: {metadata[f]}\n"
# Try describing the image with GPT
llm_client = kwargs.get("llm_client")
llm_model = kwargs.get("llm_model")
if llm_client is not None and llm_model is not None:
llm_description = self._get_llm_description(
file_stream,
stream_info,
client=llm_client,
model=llm_model,
prompt=kwargs.get("llm_prompt"),
)
if llm_description is not None:
md_content += "\n# Description:\n" + llm_description.strip() + "\n"
return DocumentConverterResult(
markdown=md_content,
)
def _get_llm_description(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
*,
client,
model,
prompt=None,
) -> Union[None, str]:
if prompt is None or prompt.strip() == "":
prompt = "Write a detailed caption for this image."
# Get the content type
content_type = stream_info.mimetype
if not content_type:
content_type, _ = mimetypes.guess_type(
"_dummy" + (stream_info.extension or "")
)
if not content_type:
content_type = "application/octet-stream"
# Convert to base64
cur_pos = file_stream.tell()
try:
base64_image = base64.b64encode(file_stream.read()).decode("utf-8")
except Exception as e:
return None
finally:
file_stream.seek(cur_pos)
# Prepare the data-uri
data_uri = f"data:{content_type};base64,{base64_image}"
# Prepare the OpenAI API request
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": data_uri,
},
},
],
}
]
# Call the OpenAI API
response = client.chat.completions.create(model=model, messages=messages)
return response.choices[0].message.content
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_doc_intel_converter.py | packages/markitdown/src/markitdown/converters/_doc_intel_converter.py | import sys
import re
import os
from typing import BinaryIO, Any, List
from enum import Enum
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
from .._exceptions import MissingDependencyException
# Try loading optional (but in this case, required) dependencies
# Save reporting of any exceptions for later
_dependency_exc_info = None
try:
from azure.ai.documentintelligence import DocumentIntelligenceClient
from azure.ai.documentintelligence.models import (
AnalyzeDocumentRequest,
AnalyzeResult,
DocumentAnalysisFeature,
)
from azure.core.credentials import AzureKeyCredential, TokenCredential
from azure.identity import DefaultAzureCredential
except ImportError:
# Preserve the error and stack trace for later
_dependency_exc_info = sys.exc_info()
# Define these types for type hinting when the package is not available
class AzureKeyCredential:
pass
class TokenCredential:
pass
class DocumentIntelligenceClient:
pass
class AnalyzeDocumentRequest:
pass
class AnalyzeResult:
pass
class DocumentAnalysisFeature:
pass
class DefaultAzureCredential:
pass
# TODO: currently, there is a bug in the document intelligence SDK with importing the "ContentFormat" enum.
# This constant is a temporary fix until the bug is resolved.
CONTENT_FORMAT = "markdown"
class DocumentIntelligenceFileType(str, Enum):
"""Enum of file types supported by the Document Intelligence Converter."""
# No OCR
DOCX = "docx"
PPTX = "pptx"
XLSX = "xlsx"
HTML = "html"
# OCR
PDF = "pdf"
JPEG = "jpeg"
PNG = "png"
BMP = "bmp"
TIFF = "tiff"
def _get_mime_type_prefixes(types: List[DocumentIntelligenceFileType]) -> List[str]:
"""Get the MIME type prefixes for the given file types."""
prefixes: List[str] = []
for type_ in types:
if type_ == DocumentIntelligenceFileType.DOCX:
prefixes.append(
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
elif type_ == DocumentIntelligenceFileType.PPTX:
prefixes.append(
"application/vnd.openxmlformats-officedocument.presentationml"
)
elif type_ == DocumentIntelligenceFileType.XLSX:
prefixes.append(
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
elif type_ == DocumentIntelligenceFileType.HTML:
prefixes.append("text/html")
prefixes.append("application/xhtml+xml")
elif type_ == DocumentIntelligenceFileType.PDF:
prefixes.append("application/pdf")
prefixes.append("application/x-pdf")
elif type_ == DocumentIntelligenceFileType.JPEG:
prefixes.append("image/jpeg")
elif type_ == DocumentIntelligenceFileType.PNG:
prefixes.append("image/png")
elif type_ == DocumentIntelligenceFileType.BMP:
prefixes.append("image/bmp")
elif type_ == DocumentIntelligenceFileType.TIFF:
prefixes.append("image/tiff")
return prefixes
def _get_file_extensions(types: List[DocumentIntelligenceFileType]) -> List[str]:
"""Get the file extensions for the given file types."""
extensions: List[str] = []
for type_ in types:
if type_ == DocumentIntelligenceFileType.DOCX:
extensions.append(".docx")
elif type_ == DocumentIntelligenceFileType.PPTX:
extensions.append(".pptx")
elif type_ == DocumentIntelligenceFileType.XLSX:
extensions.append(".xlsx")
elif type_ == DocumentIntelligenceFileType.PDF:
extensions.append(".pdf")
elif type_ == DocumentIntelligenceFileType.JPEG:
extensions.append(".jpg")
extensions.append(".jpeg")
elif type_ == DocumentIntelligenceFileType.PNG:
extensions.append(".png")
elif type_ == DocumentIntelligenceFileType.BMP:
extensions.append(".bmp")
elif type_ == DocumentIntelligenceFileType.TIFF:
extensions.append(".tiff")
elif type_ == DocumentIntelligenceFileType.HTML:
extensions.append(".html")
return extensions
class DocumentIntelligenceConverter(DocumentConverter):
"""Specialized DocumentConverter that uses Document Intelligence to extract text from documents."""
def __init__(
self,
*,
endpoint: str,
api_version: str = "2024-07-31-preview",
credential: AzureKeyCredential | TokenCredential | None = None,
file_types: List[DocumentIntelligenceFileType] = [
DocumentIntelligenceFileType.DOCX,
DocumentIntelligenceFileType.PPTX,
DocumentIntelligenceFileType.XLSX,
DocumentIntelligenceFileType.PDF,
DocumentIntelligenceFileType.JPEG,
DocumentIntelligenceFileType.PNG,
DocumentIntelligenceFileType.BMP,
DocumentIntelligenceFileType.TIFF,
],
):
"""
Initialize the DocumentIntelligenceConverter.
Args:
endpoint (str): The endpoint for the Document Intelligence service.
api_version (str): The API version to use. Defaults to "2024-07-31-preview".
credential (AzureKeyCredential | TokenCredential | None): The credential to use for authentication.
file_types (List[DocumentIntelligenceFileType]): The file types to accept. Defaults to all supported file types.
"""
super().__init__()
self._file_types = file_types
# Raise an error if the dependencies are not available.
# This is different than other converters since this one isn't even instantiated
# unless explicitly requested.
if _dependency_exc_info is not None:
raise MissingDependencyException(
"DocumentIntelligenceConverter requires the optional dependency [az-doc-intel] (or [all]) to be installed. E.g., `pip install markitdown[az-doc-intel]`"
) from _dependency_exc_info[
1
].with_traceback( # type: ignore[union-attr]
_dependency_exc_info[2]
)
if credential is None:
if os.environ.get("AZURE_API_KEY") is None:
credential = DefaultAzureCredential()
else:
credential = AzureKeyCredential(os.environ["AZURE_API_KEY"])
self.endpoint = endpoint
self.api_version = api_version
self.doc_intel_client = DocumentIntelligenceClient(
endpoint=self.endpoint,
api_version=self.api_version,
credential=credential,
)
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in _get_file_extensions(self._file_types):
return True
for prefix in _get_mime_type_prefixes(self._file_types):
if mimetype.startswith(prefix):
return True
return False
def _analysis_features(self, stream_info: StreamInfo) -> List[str]:
"""
Helper needed to determine which analysis features to use.
Certain document analysis features are not availiable for
office filetypes (.xlsx, .pptx, .html, .docx)
"""
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
# Types that don't support ocr
no_ocr_types = [
DocumentIntelligenceFileType.DOCX,
DocumentIntelligenceFileType.PPTX,
DocumentIntelligenceFileType.XLSX,
DocumentIntelligenceFileType.HTML,
]
if extension in _get_file_extensions(no_ocr_types):
return []
for prefix in _get_mime_type_prefixes(no_ocr_types):
if mimetype.startswith(prefix):
return []
return [
DocumentAnalysisFeature.FORMULAS, # enable formula extraction
DocumentAnalysisFeature.OCR_HIGH_RESOLUTION, # enable high resolution OCR
DocumentAnalysisFeature.STYLE_FONT, # enable font style extraction
]
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Extract the text using Azure Document Intelligence
poller = self.doc_intel_client.begin_analyze_document(
model_id="prebuilt-layout",
body=AnalyzeDocumentRequest(bytes_source=file_stream.read()),
features=self._analysis_features(stream_info),
output_content_format=CONTENT_FORMAT, # TODO: replace with "ContentFormat.MARKDOWN" when the bug is fixed
)
result: AnalyzeResult = poller.result()
# remove comments from the markdown content generated by Doc Intelligence and append to markdown string
markdown_text = re.sub(r"<!--.*?-->", "", result.content, flags=re.DOTALL)
return DocumentConverterResult(markdown=markdown_text)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_docx_converter.py | packages/markitdown/src/markitdown/converters/_docx_converter.py | import sys
import io
from warnings import warn
from typing import BinaryIO, Any
from ._html_converter import HtmlConverter
from ..converter_utils.docx.pre_process import pre_process_docx
from .._base_converter import DocumentConverterResult
from .._stream_info import StreamInfo
from .._exceptions import MissingDependencyException, MISSING_DEPENDENCY_MESSAGE
# Try loading optional (but in this case, required) dependencies
# Save reporting of any exceptions for later
_dependency_exc_info = None
try:
import mammoth
except ImportError:
# Preserve the error and stack trace for later
_dependency_exc_info = sys.exc_info()
ACCEPTED_MIME_TYPE_PREFIXES = [
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
]
ACCEPTED_FILE_EXTENSIONS = [".docx"]
class DocxConverter(HtmlConverter):
"""
Converts DOCX files to Markdown. Style information (e.g.m headings) and tables are preserved where possible.
"""
def __init__(self):
super().__init__()
self._html_converter = HtmlConverter()
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Check: the dependencies
if _dependency_exc_info is not None:
raise MissingDependencyException(
MISSING_DEPENDENCY_MESSAGE.format(
converter=type(self).__name__,
extension=".docx",
feature="docx",
)
) from _dependency_exc_info[
1
].with_traceback( # type: ignore[union-attr]
_dependency_exc_info[2]
)
style_map = kwargs.get("style_map", None)
pre_process_stream = pre_process_docx(file_stream)
return self._html_converter.convert_string(
mammoth.convert_to_html(pre_process_stream, style_map=style_map).value,
**kwargs,
)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_markdownify.py | packages/markitdown/src/markitdown/converters/_markdownify.py | import re
import markdownify
from typing import Any, Optional
from urllib.parse import quote, unquote, urlparse, urlunparse
class _CustomMarkdownify(markdownify.MarkdownConverter):
"""
A custom version of markdownify's MarkdownConverter. Changes include:
- Altering the default heading style to use '#', '##', etc.
- Removing javascript hyperlinks.
- Truncating images with large data:uri sources.
- Ensuring URIs are properly escaped, and do not conflict with Markdown syntax
"""
def __init__(self, **options: Any):
options["heading_style"] = options.get("heading_style", markdownify.ATX)
options["keep_data_uris"] = options.get("keep_data_uris", False)
# Explicitly cast options to the expected type if necessary
super().__init__(**options)
def convert_hn(
self,
n: int,
el: Any,
text: str,
convert_as_inline: Optional[bool] = False,
**kwargs,
) -> str:
"""Same as usual, but be sure to start with a new line"""
if not convert_as_inline:
if not re.search(r"^\n", text):
return "\n" + super().convert_hn(n, el, text, convert_as_inline) # type: ignore
return super().convert_hn(n, el, text, convert_as_inline) # type: ignore
def convert_a(
self,
el: Any,
text: str,
convert_as_inline: Optional[bool] = False,
**kwargs,
):
"""Same as usual converter, but removes Javascript links and escapes URIs."""
prefix, suffix, text = markdownify.chomp(text) # type: ignore
if not text:
return ""
if el.find_parent("pre") is not None:
return text
href = el.get("href")
title = el.get("title")
# Escape URIs and skip non-http or file schemes
if href:
try:
parsed_url = urlparse(href) # type: ignore
if parsed_url.scheme and parsed_url.scheme.lower() not in ["http", "https", "file"]: # type: ignore
return "%s%s%s" % (prefix, text, suffix)
href = urlunparse(parsed_url._replace(path=quote(unquote(parsed_url.path)))) # type: ignore
except ValueError: # It's not clear if this ever gets thrown
return "%s%s%s" % (prefix, text, suffix)
# For the replacement see #29: text nodes underscores are escaped
if (
self.options["autolinks"]
and text.replace(r"\_", "_") == href
and not title
and not self.options["default_title"]
):
# Shortcut syntax
return "<%s>" % href
if self.options["default_title"] and not title:
title = href
title_part = ' "%s"' % title.replace('"', r"\"") if title else ""
return (
"%s[%s](%s%s)%s" % (prefix, text, href, title_part, suffix)
if href
else text
)
def convert_img(
self,
el: Any,
text: str,
convert_as_inline: Optional[bool] = False,
**kwargs,
) -> str:
"""Same as usual converter, but removes data URIs"""
alt = el.attrs.get("alt", None) or ""
src = el.attrs.get("src", None) or el.attrs.get("data-src", None) or ""
title = el.attrs.get("title", None) or ""
title_part = ' "%s"' % title.replace('"', r"\"") if title else ""
# Remove all line breaks from alt
alt = alt.replace("\n", " ")
if (
convert_as_inline
and el.parent.name not in self.options["keep_inline_images_in"]
):
return alt
# Remove dataURIs
if src.startswith("data:") and not self.options["keep_data_uris"]:
src = src.split(",")[0] + "..."
return "" % (alt, src, title_part)
def convert_input(
self,
el: Any,
text: str,
convert_as_inline: Optional[bool] = False,
**kwargs,
) -> str:
"""Convert checkboxes to Markdown [x]/[ ] syntax."""
if el.get("type") == "checkbox":
return "[x] " if el.has_attr("checked") else "[ ] "
return ""
def convert_soup(self, soup: Any) -> str:
return super().convert_soup(soup) # type: ignore
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converters/_zip_converter.py | packages/markitdown/src/markitdown/converters/_zip_converter.py | import zipfile
import io
import os
from typing import BinaryIO, Any, TYPE_CHECKING
from .._base_converter import DocumentConverter, DocumentConverterResult
from .._stream_info import StreamInfo
from .._exceptions import UnsupportedFormatException, FileConversionException
# Break otherwise circular import for type hinting
if TYPE_CHECKING:
from .._markitdown import MarkItDown
ACCEPTED_MIME_TYPE_PREFIXES = [
"application/zip",
]
ACCEPTED_FILE_EXTENSIONS = [".zip"]
class ZipConverter(DocumentConverter):
"""Converts ZIP files to markdown by extracting and converting all contained files.
The converter extracts the ZIP contents to a temporary directory, processes each file
using appropriate converters based on file extensions, and then combines the results
into a single markdown document. The temporary directory is cleaned up after processing.
Example output format:
```markdown
Content from the zip file `example.zip`:
## File: docs/readme.txt
This is the content of readme.txt
Multiple lines are preserved
## File: images/example.jpg
ImageSize: 1920x1080
DateTimeOriginal: 2024-02-15 14:30:00
Description: A beautiful landscape photo
## File: data/report.xlsx
## Sheet1
| Column1 | Column2 | Column3 |
|---------|---------|---------|
| data1 | data2 | data3 |
| data4 | data5 | data6 |
```
Key features:
- Maintains original file structure in headings
- Processes nested files recursively
- Uses appropriate converters for each file type
- Preserves formatting of converted content
- Cleans up temporary files after processing
"""
def __init__(
self,
*,
markitdown: "MarkItDown",
):
super().__init__()
self._markitdown = markitdown
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
file_path = stream_info.url or stream_info.local_path or stream_info.filename
md_content = f"Content from the zip file `{file_path}`:\n\n"
with zipfile.ZipFile(file_stream, "r") as zipObj:
for name in zipObj.namelist():
try:
z_file_stream = io.BytesIO(zipObj.read(name))
z_file_stream_info = StreamInfo(
extension=os.path.splitext(name)[1],
filename=os.path.basename(name),
)
result = self._markitdown.convert_stream(
stream=z_file_stream,
stream_info=z_file_stream_info,
)
if result is not None:
md_content += f"## File: {name}\n\n"
md_content += result.markdown + "\n\n"
except UnsupportedFormatException:
pass
except FileConversionException:
pass
return DocumentConverterResult(markdown=md_content.strip())
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converter_utils/__init__.py | packages/markitdown/src/markitdown/converter_utils/__init__.py | python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false | |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converter_utils/docx/pre_process.py | packages/markitdown/src/markitdown/converter_utils/docx/pre_process.py | import zipfile
from io import BytesIO
from typing import BinaryIO
from xml.etree import ElementTree as ET
from bs4 import BeautifulSoup, Tag
from .math.omml import OMML_NS, oMath2Latex
MATH_ROOT_TEMPLATE = "".join(
(
"<w:document ",
'xmlns:wpc="http://schemas.microsoft.com/office/word/2010/wordprocessingCanvas" ',
'xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" ',
'xmlns:o="urn:schemas-microsoft-com:office:office" ',
'xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" ',
'xmlns:m="http://schemas.openxmlformats.org/officeDocument/2006/math" ',
'xmlns:v="urn:schemas-microsoft-com:vml" ',
'xmlns:wp14="http://schemas.microsoft.com/office/word/2010/wordprocessingDrawing" ',
'xmlns:wp="http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing" ',
'xmlns:w10="urn:schemas-microsoft-com:office:word" ',
'xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main" ',
'xmlns:w14="http://schemas.microsoft.com/office/word/2010/wordml" ',
'xmlns:wpg="http://schemas.microsoft.com/office/word/2010/wordprocessingGroup" ',
'xmlns:wpi="http://schemas.microsoft.com/office/word/2010/wordprocessingInk" ',
'xmlns:wne="http://schemas.microsoft.com/office/word/2006/wordml" ',
'xmlns:wps="http://schemas.microsoft.com/office/word/2010/wordprocessingShape" mc:Ignorable="w14 wp14">',
"{0}</w:document>",
)
)
def _convert_omath_to_latex(tag: Tag) -> str:
"""
Converts an OMML (Office Math Markup Language) tag to LaTeX format.
Args:
tag (Tag): A BeautifulSoup Tag object representing the OMML element.
Returns:
str: The LaTeX representation of the OMML element.
"""
# Format the tag into a complete XML document string
math_root = ET.fromstring(MATH_ROOT_TEMPLATE.format(str(tag)))
# Find the 'oMath' element within the XML document
math_element = math_root.find(OMML_NS + "oMath")
# Convert the 'oMath' element to LaTeX using the oMath2Latex function
latex = oMath2Latex(math_element).latex
return latex
def _get_omath_tag_replacement(tag: Tag, block: bool = False) -> Tag:
"""
Creates a replacement tag for an OMML (Office Math Markup Language) element.
Args:
tag (Tag): A BeautifulSoup Tag object representing the "oMath" element.
block (bool, optional): If True, the LaTeX will be wrapped in double dollar signs for block mode. Defaults to False.
Returns:
Tag: A BeautifulSoup Tag object representing the replacement element.
"""
t_tag = Tag(name="w:t")
t_tag.string = (
f"$${_convert_omath_to_latex(tag)}$$"
if block
else f"${_convert_omath_to_latex(tag)}$"
)
r_tag = Tag(name="w:r")
r_tag.append(t_tag)
return r_tag
def _replace_equations(tag: Tag):
"""
Replaces OMML (Office Math Markup Language) elements with their LaTeX equivalents.
Args:
tag (Tag): A BeautifulSoup Tag object representing the OMML element. Could be either "oMathPara" or "oMath".
Raises:
ValueError: If the tag is not supported.
"""
if tag.name == "oMathPara":
# Create a new paragraph tag
p_tag = Tag(name="w:p")
# Replace each 'oMath' child tag with its LaTeX equivalent as block equations
for child_tag in tag.find_all("oMath"):
p_tag.append(_get_omath_tag_replacement(child_tag, block=True))
# Replace the original 'oMathPara' tag with the new paragraph tag
tag.replace_with(p_tag)
elif tag.name == "oMath":
# Replace the 'oMath' tag with its LaTeX equivalent as inline equation
tag.replace_with(_get_omath_tag_replacement(tag, block=False))
else:
raise ValueError(f"Not supported tag: {tag.name}")
def _pre_process_math(content: bytes) -> bytes:
"""
Pre-processes the math content in a DOCX -> XML file by converting OMML (Office Math Markup Language) elements to LaTeX.
This preprocessed content can be directly replaced in the DOCX file -> XMLs.
Args:
content (bytes): The XML content of the DOCX file as bytes.
Returns:
bytes: The processed content with OMML elements replaced by their LaTeX equivalents, encoded as bytes.
"""
soup = BeautifulSoup(content.decode(), features="xml")
for tag in soup.find_all("oMathPara"):
_replace_equations(tag)
for tag in soup.find_all("oMath"):
_replace_equations(tag)
return str(soup).encode()
def pre_process_docx(input_docx: BinaryIO) -> BinaryIO:
"""
Pre-processes a DOCX file with provided steps.
The process works by unzipping the DOCX file in memory, transforming specific XML files
(such as converting OMML elements to LaTeX), and then zipping everything back into a
DOCX file without writing to disk.
Args:
input_docx (BinaryIO): A binary input stream representing the DOCX file.
Returns:
BinaryIO: A binary output stream representing the processed DOCX file.
"""
output_docx = BytesIO()
# The files that need to be pre-processed from .docx
pre_process_enable_files = [
"word/document.xml",
"word/footnotes.xml",
"word/endnotes.xml",
]
with zipfile.ZipFile(input_docx, mode="r") as zip_input:
files = {name: zip_input.read(name) for name in zip_input.namelist()}
with zipfile.ZipFile(output_docx, mode="w") as zip_output:
zip_output.comment = zip_input.comment
for name, content in files.items():
if name in pre_process_enable_files:
try:
# Pre-process the content
updated_content = _pre_process_math(content)
# In the future, if there are more pre-processing steps, they can be added here
zip_output.writestr(name, updated_content)
except Exception:
# If there is an error in processing the content, write the original content
zip_output.writestr(name, content)
else:
zip_output.writestr(name, content)
output_docx.seek(0)
return output_docx
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converter_utils/docx/__init__.py | packages/markitdown/src/markitdown/converter_utils/docx/__init__.py | python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false | |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converter_utils/docx/math/omml.py | packages/markitdown/src/markitdown/converter_utils/docx/math/omml.py | # -*- coding: utf-8 -*-
"""
Office Math Markup Language (OMML)
Adapted from https://github.com/xiilei/dwml/blob/master/dwml/omml.py
On 25/03/2025
"""
from defusedxml import ElementTree as ET
from .latex_dict import (
CHARS,
CHR,
CHR_BO,
CHR_DEFAULT,
POS,
POS_DEFAULT,
SUB,
SUP,
F,
F_DEFAULT,
T,
FUNC,
D,
D_DEFAULT,
RAD,
RAD_DEFAULT,
ARR,
LIM_FUNC,
LIM_TO,
LIM_UPP,
M,
BRK,
BLANK,
BACKSLASH,
ALN,
FUNC_PLACE,
)
OMML_NS = "{http://schemas.openxmlformats.org/officeDocument/2006/math}"
def load(stream):
tree = ET.parse(stream)
for omath in tree.findall(OMML_NS + "oMath"):
yield oMath2Latex(omath)
def load_string(string):
root = ET.fromstring(string)
for omath in root.findall(OMML_NS + "oMath"):
yield oMath2Latex(omath)
def escape_latex(strs):
last = None
new_chr = []
strs = strs.replace(r"\\", "\\")
for c in strs:
if (c in CHARS) and (last != BACKSLASH):
new_chr.append(BACKSLASH + c)
else:
new_chr.append(c)
last = c
return BLANK.join(new_chr)
def get_val(key, default=None, store=CHR):
if key is not None:
return key if not store else store.get(key, key)
else:
return default
class Tag2Method(object):
def call_method(self, elm, stag=None):
getmethod = self.tag2meth.get
if stag is None:
stag = elm.tag.replace(OMML_NS, "")
method = getmethod(stag)
if method:
return method(self, elm)
else:
return None
def process_children_list(self, elm, include=None):
"""
process children of the elm,return iterable
"""
for _e in list(elm):
if OMML_NS not in _e.tag:
continue
stag = _e.tag.replace(OMML_NS, "")
if include and (stag not in include):
continue
t = self.call_method(_e, stag=stag)
if t is None:
t = self.process_unknow(_e, stag)
if t is None:
continue
yield (stag, t, _e)
def process_children_dict(self, elm, include=None):
"""
process children of the elm,return dict
"""
latex_chars = dict()
for stag, t, e in self.process_children_list(elm, include):
latex_chars[stag] = t
return latex_chars
def process_children(self, elm, include=None):
"""
process children of the elm,return string
"""
return BLANK.join(
(
t if not isinstance(t, Tag2Method) else str(t)
for stag, t, e in self.process_children_list(elm, include)
)
)
def process_unknow(self, elm, stag):
return None
class Pr(Tag2Method):
text = ""
__val_tags = ("chr", "pos", "begChr", "endChr", "type")
__innerdict = None # can't use the __dict__
""" common properties of element"""
def __init__(self, elm):
self.__innerdict = {}
self.text = self.process_children(elm)
def __str__(self):
return self.text
def __unicode__(self):
return self.__str__(self)
def __getattr__(self, name):
return self.__innerdict.get(name, None)
def do_brk(self, elm):
self.__innerdict["brk"] = BRK
return BRK
def do_common(self, elm):
stag = elm.tag.replace(OMML_NS, "")
if stag in self.__val_tags:
t = elm.get("{0}val".format(OMML_NS))
self.__innerdict[stag] = t
return None
tag2meth = {
"brk": do_brk,
"chr": do_common,
"pos": do_common,
"begChr": do_common,
"endChr": do_common,
"type": do_common,
}
class oMath2Latex(Tag2Method):
"""
Convert oMath element of omml to latex
"""
_t_dict = T
__direct_tags = ("box", "sSub", "sSup", "sSubSup", "num", "den", "deg", "e")
def __init__(self, element):
self._latex = self.process_children(element)
def __str__(self):
return self.latex
def __unicode__(self):
return self.__str__(self)
def process_unknow(self, elm, stag):
if stag in self.__direct_tags:
return self.process_children(elm)
elif stag[-2:] == "Pr":
return Pr(elm)
else:
return None
@property
def latex(self):
return self._latex
def do_acc(self, elm):
"""
the accent function
"""
c_dict = self.process_children_dict(elm)
latex_s = get_val(
c_dict["accPr"].chr, default=CHR_DEFAULT.get("ACC_VAL"), store=CHR
)
return latex_s.format(c_dict["e"])
def do_bar(self, elm):
"""
the bar function
"""
c_dict = self.process_children_dict(elm)
pr = c_dict["barPr"]
latex_s = get_val(pr.pos, default=POS_DEFAULT.get("BAR_VAL"), store=POS)
return pr.text + latex_s.format(c_dict["e"])
def do_d(self, elm):
"""
the delimiter object
"""
c_dict = self.process_children_dict(elm)
pr = c_dict["dPr"]
null = D_DEFAULT.get("null")
s_val = get_val(pr.begChr, default=D_DEFAULT.get("left"), store=T)
e_val = get_val(pr.endChr, default=D_DEFAULT.get("right"), store=T)
return pr.text + D.format(
left=null if not s_val else escape_latex(s_val),
text=c_dict["e"],
right=null if not e_val else escape_latex(e_val),
)
def do_spre(self, elm):
"""
the Pre-Sub-Superscript object -- Not support yet
"""
pass
def do_sub(self, elm):
text = self.process_children(elm)
return SUB.format(text)
def do_sup(self, elm):
text = self.process_children(elm)
return SUP.format(text)
def do_f(self, elm):
"""
the fraction object
"""
c_dict = self.process_children_dict(elm)
pr = c_dict["fPr"]
latex_s = get_val(pr.type, default=F_DEFAULT, store=F)
return pr.text + latex_s.format(num=c_dict.get("num"), den=c_dict.get("den"))
def do_func(self, elm):
"""
the Function-Apply object (Examples:sin cos)
"""
c_dict = self.process_children_dict(elm)
func_name = c_dict.get("fName")
return func_name.replace(FUNC_PLACE, c_dict.get("e"))
def do_fname(self, elm):
"""
the func name
"""
latex_chars = []
for stag, t, e in self.process_children_list(elm):
if stag == "r":
if FUNC.get(t):
latex_chars.append(FUNC[t])
else:
raise NotImplementedError("Not support func %s" % t)
else:
latex_chars.append(t)
t = BLANK.join(latex_chars)
return t if FUNC_PLACE in t else t + FUNC_PLACE # do_func will replace this
def do_groupchr(self, elm):
"""
the Group-Character object
"""
c_dict = self.process_children_dict(elm)
pr = c_dict["groupChrPr"]
latex_s = get_val(pr.chr)
return pr.text + latex_s.format(c_dict["e"])
def do_rad(self, elm):
"""
the radical object
"""
c_dict = self.process_children_dict(elm)
text = c_dict.get("e")
deg_text = c_dict.get("deg")
if deg_text:
return RAD.format(deg=deg_text, text=text)
else:
return RAD_DEFAULT.format(text=text)
def do_eqarr(self, elm):
"""
the Array object
"""
return ARR.format(
text=BRK.join(
[t for stag, t, e in self.process_children_list(elm, include=("e",))]
)
)
def do_limlow(self, elm):
"""
the Lower-Limit object
"""
t_dict = self.process_children_dict(elm, include=("e", "lim"))
latex_s = LIM_FUNC.get(t_dict["e"])
if not latex_s:
raise NotImplementedError("Not support lim %s" % t_dict["e"])
else:
return latex_s.format(lim=t_dict.get("lim"))
def do_limupp(self, elm):
"""
the Upper-Limit object
"""
t_dict = self.process_children_dict(elm, include=("e", "lim"))
return LIM_UPP.format(lim=t_dict.get("lim"), text=t_dict.get("e"))
def do_lim(self, elm):
"""
the lower limit of the limLow object and the upper limit of the limUpp function
"""
return self.process_children(elm).replace(LIM_TO[0], LIM_TO[1])
def do_m(self, elm):
"""
the Matrix object
"""
rows = []
for stag, t, e in self.process_children_list(elm):
if stag == "mPr":
pass
elif stag == "mr":
rows.append(t)
return M.format(text=BRK.join(rows))
def do_mr(self, elm):
"""
a single row of the matrix m
"""
return ALN.join(
[t for stag, t, e in self.process_children_list(elm, include=("e",))]
)
def do_nary(self, elm):
"""
the n-ary object
"""
res = []
bo = ""
for stag, t, e in self.process_children_list(elm):
if stag == "naryPr":
bo = get_val(t.chr, store=CHR_BO)
else:
res.append(t)
return bo + BLANK.join(res)
def do_r(self, elm):
"""
Get text from 'r' element,And try convert them to latex symbols
@todo text style support , (sty)
@todo \text (latex pure text support)
"""
_str = []
for s in elm.findtext("./{0}t".format(OMML_NS)):
# s = s if isinstance(s,unicode) else unicode(s,'utf-8')
_str.append(self._t_dict.get(s, s))
return escape_latex(BLANK.join(_str))
tag2meth = {
"acc": do_acc,
"r": do_r,
"bar": do_bar,
"sub": do_sub,
"sup": do_sup,
"f": do_f,
"func": do_func,
"fName": do_fname,
"groupChr": do_groupchr,
"d": do_d,
"rad": do_rad,
"eqArr": do_eqarr,
"limLow": do_limlow,
"limUpp": do_limupp,
"lim": do_lim,
"m": do_m,
"mr": do_mr,
"nary": do_nary,
}
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converter_utils/docx/math/latex_dict.py | packages/markitdown/src/markitdown/converter_utils/docx/math/latex_dict.py | # -*- coding: utf-8 -*-
"""
Adapted from https://github.com/xiilei/dwml/blob/master/dwml/latex_dict.py
On 25/03/2025
"""
from __future__ import unicode_literals
CHARS = ("{", "}", "_", "^", "#", "&", "$", "%", "~")
BLANK = ""
BACKSLASH = "\\"
ALN = "&"
CHR = {
# Unicode : Latex Math Symbols
# Top accents
"\u0300": "\\grave{{{0}}}",
"\u0301": "\\acute{{{0}}}",
"\u0302": "\\hat{{{0}}}",
"\u0303": "\\tilde{{{0}}}",
"\u0304": "\\bar{{{0}}}",
"\u0305": "\\overbar{{{0}}}",
"\u0306": "\\breve{{{0}}}",
"\u0307": "\\dot{{{0}}}",
"\u0308": "\\ddot{{{0}}}",
"\u0309": "\\ovhook{{{0}}}",
"\u030a": "\\ocirc{{{0}}}}",
"\u030c": "\\check{{{0}}}}",
"\u0310": "\\candra{{{0}}}",
"\u0312": "\\oturnedcomma{{{0}}}",
"\u0315": "\\ocommatopright{{{0}}}",
"\u031a": "\\droang{{{0}}}",
"\u0338": "\\not{{{0}}}",
"\u20d0": "\\leftharpoonaccent{{{0}}}",
"\u20d1": "\\rightharpoonaccent{{{0}}}",
"\u20d2": "\\vertoverlay{{{0}}}",
"\u20d6": "\\overleftarrow{{{0}}}",
"\u20d7": "\\vec{{{0}}}",
"\u20db": "\\dddot{{{0}}}",
"\u20dc": "\\ddddot{{{0}}}",
"\u20e1": "\\overleftrightarrow{{{0}}}",
"\u20e7": "\\annuity{{{0}}}",
"\u20e9": "\\widebridgeabove{{{0}}}",
"\u20f0": "\\asteraccent{{{0}}}",
# Bottom accents
"\u0330": "\\wideutilde{{{0}}}",
"\u0331": "\\underbar{{{0}}}",
"\u20e8": "\\threeunderdot{{{0}}}",
"\u20ec": "\\underrightharpoondown{{{0}}}",
"\u20ed": "\\underleftharpoondown{{{0}}}",
"\u20ee": "\\underledtarrow{{{0}}}",
"\u20ef": "\\underrightarrow{{{0}}}",
# Over | group
"\u23b4": "\\overbracket{{{0}}}",
"\u23dc": "\\overparen{{{0}}}",
"\u23de": "\\overbrace{{{0}}}",
# Under| group
"\u23b5": "\\underbracket{{{0}}}",
"\u23dd": "\\underparen{{{0}}}",
"\u23df": "\\underbrace{{{0}}}",
}
CHR_BO = {
# Big operators,
"\u2140": "\\Bbbsum",
"\u220f": "\\prod",
"\u2210": "\\coprod",
"\u2211": "\\sum",
"\u222b": "\\int",
"\u22c0": "\\bigwedge",
"\u22c1": "\\bigvee",
"\u22c2": "\\bigcap",
"\u22c3": "\\bigcup",
"\u2a00": "\\bigodot",
"\u2a01": "\\bigoplus",
"\u2a02": "\\bigotimes",
}
T = {
"\u2192": "\\rightarrow ",
# Greek letters
"\U0001d6fc": "\\alpha ",
"\U0001d6fd": "\\beta ",
"\U0001d6fe": "\\gamma ",
"\U0001d6ff": "\\theta ",
"\U0001d700": "\\epsilon ",
"\U0001d701": "\\zeta ",
"\U0001d702": "\\eta ",
"\U0001d703": "\\theta ",
"\U0001d704": "\\iota ",
"\U0001d705": "\\kappa ",
"\U0001d706": "\\lambda ",
"\U0001d707": "\\m ",
"\U0001d708": "\\n ",
"\U0001d709": "\\xi ",
"\U0001d70a": "\\omicron ",
"\U0001d70b": "\\pi ",
"\U0001d70c": "\\rho ",
"\U0001d70d": "\\varsigma ",
"\U0001d70e": "\\sigma ",
"\U0001d70f": "\\ta ",
"\U0001d710": "\\upsilon ",
"\U0001d711": "\\phi ",
"\U0001d712": "\\chi ",
"\U0001d713": "\\psi ",
"\U0001d714": "\\omega ",
"\U0001d715": "\\partial ",
"\U0001d716": "\\varepsilon ",
"\U0001d717": "\\vartheta ",
"\U0001d718": "\\varkappa ",
"\U0001d719": "\\varphi ",
"\U0001d71a": "\\varrho ",
"\U0001d71b": "\\varpi ",
# Relation symbols
"\u2190": "\\leftarrow ",
"\u2191": "\\uparrow ",
"\u2192": "\\rightarrow ",
"\u2193": "\\downright ",
"\u2194": "\\leftrightarrow ",
"\u2195": "\\updownarrow ",
"\u2196": "\\nwarrow ",
"\u2197": "\\nearrow ",
"\u2198": "\\searrow ",
"\u2199": "\\swarrow ",
"\u22ee": "\\vdots ",
"\u22ef": "\\cdots ",
"\u22f0": "\\adots ",
"\u22f1": "\\ddots ",
"\u2260": "\\ne ",
"\u2264": "\\leq ",
"\u2265": "\\geq ",
"\u2266": "\\leqq ",
"\u2267": "\\geqq ",
"\u2268": "\\lneqq ",
"\u2269": "\\gneqq ",
"\u226a": "\\ll ",
"\u226b": "\\gg ",
"\u2208": "\\in ",
"\u2209": "\\notin ",
"\u220b": "\\ni ",
"\u220c": "\\nni ",
# Ordinary symbols
"\u221e": "\\infty ",
# Binary relations
"\u00b1": "\\pm ",
"\u2213": "\\mp ",
# Italic, Latin, uppercase
"\U0001d434": "A",
"\U0001d435": "B",
"\U0001d436": "C",
"\U0001d437": "D",
"\U0001d438": "E",
"\U0001d439": "F",
"\U0001d43a": "G",
"\U0001d43b": "H",
"\U0001d43c": "I",
"\U0001d43d": "J",
"\U0001d43e": "K",
"\U0001d43f": "L",
"\U0001d440": "M",
"\U0001d441": "N",
"\U0001d442": "O",
"\U0001d443": "P",
"\U0001d444": "Q",
"\U0001d445": "R",
"\U0001d446": "S",
"\U0001d447": "T",
"\U0001d448": "U",
"\U0001d449": "V",
"\U0001d44a": "W",
"\U0001d44b": "X",
"\U0001d44c": "Y",
"\U0001d44d": "Z",
# Italic, Latin, lowercase
"\U0001d44e": "a",
"\U0001d44f": "b",
"\U0001d450": "c",
"\U0001d451": "d",
"\U0001d452": "e",
"\U0001d453": "f",
"\U0001d454": "g",
"\U0001d456": "i",
"\U0001d457": "j",
"\U0001d458": "k",
"\U0001d459": "l",
"\U0001d45a": "m",
"\U0001d45b": "n",
"\U0001d45c": "o",
"\U0001d45d": "p",
"\U0001d45e": "q",
"\U0001d45f": "r",
"\U0001d460": "s",
"\U0001d461": "t",
"\U0001d462": "u",
"\U0001d463": "v",
"\U0001d464": "w",
"\U0001d465": "x",
"\U0001d466": "y",
"\U0001d467": "z",
}
FUNC = {
"sin": "\\sin({fe})",
"cos": "\\cos({fe})",
"tan": "\\tan({fe})",
"arcsin": "\\arcsin({fe})",
"arccos": "\\arccos({fe})",
"arctan": "\\arctan({fe})",
"arccot": "\\arccot({fe})",
"sinh": "\\sinh({fe})",
"cosh": "\\cosh({fe})",
"tanh": "\\tanh({fe})",
"coth": "\\coth({fe})",
"sec": "\\sec({fe})",
"csc": "\\csc({fe})",
}
FUNC_PLACE = "{fe}"
BRK = "\\\\"
CHR_DEFAULT = {
"ACC_VAL": "\\hat{{{0}}}",
}
POS = {
"top": "\\overline{{{0}}}", # not sure
"bot": "\\underline{{{0}}}",
}
POS_DEFAULT = {
"BAR_VAL": "\\overline{{{0}}}",
}
SUB = "_{{{0}}}"
SUP = "^{{{0}}}"
F = {
"bar": "\\frac{{{num}}}{{{den}}}",
"skw": r"^{{{num}}}/_{{{den}}}",
"noBar": "\\genfrac{{}}{{}}{{0pt}}{{}}{{{num}}}{{{den}}}",
"lin": "{{{num}}}/{{{den}}}",
}
F_DEFAULT = "\\frac{{{num}}}{{{den}}}"
D = "\\left{left}{text}\\right{right}"
D_DEFAULT = {
"left": "(",
"right": ")",
"null": ".",
}
RAD = "\\sqrt[{deg}]{{{text}}}"
RAD_DEFAULT = "\\sqrt{{{text}}}"
ARR = "\\begin{{array}}{{c}}{text}\\end{{array}}"
LIM_FUNC = {
"lim": "\\lim_{{{lim}}}",
"max": "\\max_{{{lim}}}",
"min": "\\min_{{{lim}}}",
}
LIM_TO = ("\\rightarrow", "\\to")
LIM_UPP = "\\overset{{{lim}}}{{{text}}}"
M = "\\begin{{matrix}}{text}\\end{{matrix}}"
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/src/markitdown/converter_utils/docx/math/__init__.py | packages/markitdown/src/markitdown/converter_utils/docx/math/__init__.py | python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false | |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/tests/_test_vectors.py | packages/markitdown/tests/_test_vectors.py | import dataclasses
from typing import List
@dataclasses.dataclass(frozen=True, kw_only=True)
class FileTestVector(object):
filename: str
mimetype: str | None
charset: str | None
url: str | None
must_include: List[str]
must_not_include: List[str]
GENERAL_TEST_VECTORS = [
FileTestVector(
filename="test.docx",
mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
charset=None,
url=None,
must_include=[
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
"49e168b7-d2ae-407f-a055-2167576f39a1",
"## d666f1f7-46cb-42bd-9a39-9a39cf2a509f",
"# Abstract",
"# Introduction",
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
"data:image/png;base64...",
],
must_not_include=[
"data:image/png;base64,iVBORw0KGgoAAAANSU",
],
),
FileTestVector(
filename="test.xlsx",
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
charset=None,
url=None,
must_include=[
"## 09060124-b5e7-4717-9d07-3c046eb",
"6ff4173b-42a5-4784-9b19-f49caff4d93d",
"affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
],
must_not_include=[],
),
FileTestVector(
filename="test.xls",
mimetype="application/vnd.ms-excel",
charset=None,
url=None,
must_include=[
"## 09060124-b5e7-4717-9d07-3c046eb",
"6ff4173b-42a5-4784-9b19-f49caff4d93d",
"affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
],
must_not_include=[],
),
FileTestVector(
filename="test.pptx",
mimetype="application/vnd.openxmlformats-officedocument.presentationml.presentation",
charset=None,
url=None,
must_include=[
"2cdda5c8-e50e-4db4-b5f0-9722a649f455",
"04191ea8-5c73-4215-a1d3-1cfb43aaaf12",
"44bf7d06-5e7a-4a40-a2e1-a2e42ef28c8a",
"1b92870d-e3b5-4e65-8153-919f4ff45592",
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
"a3f6004b-6f4f-4ea8-bee3-3741f4dc385f", # chart title
"2003", # chart value
"",
],
must_not_include=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQE"],
),
FileTestVector(
filename="test_outlook_msg.msg",
mimetype="application/vnd.ms-outlook",
charset=None,
url=None,
must_include=[
"# Email Message",
"**From:** test.sender@example.com",
"**To:** test.recipient@example.com",
"**Subject:** Test Email Message",
"## Content",
"This is the body of the test email message",
],
must_not_include=[],
),
FileTestVector(
filename="test.pdf",
mimetype="application/pdf",
charset=None,
url=None,
must_include=[
"While there is contemporaneous exploration of multi-agent approaches"
],
must_not_include=[],
),
FileTestVector(
filename="test_blog.html",
mimetype="text/html",
charset="utf-8",
url="https://microsoft.github.io/autogen/blog/2023/04/21/LLM-tuning-math",
must_include=[
"Large language models (LLMs) are powerful tools that can generate natural language texts for various applications, such as chatbots, summarization, translation, and more. GPT-4 is currently the state of the art LLM in the world. Is model selection irrelevant? What about inference parameters?",
"an example where high cost can easily prevent a generic complex",
],
must_not_include=[],
),
FileTestVector(
filename="test_wikipedia.html",
mimetype="text/html",
charset="utf-8",
url="https://en.wikipedia.org/wiki/Microsoft",
must_include=[
"Microsoft entered the operating system (OS) business in 1980 with its own version of [Unix]",
'Microsoft was founded by [Bill Gates](/wiki/Bill_Gates "Bill Gates")',
],
must_not_include=[
"You are encouraged to create an account and log in",
"154 languages",
"move to sidebar",
],
),
FileTestVector(
filename="test_serp.html",
mimetype="text/html",
charset="utf-8",
url="https://www.bing.com/search?q=microsoft+wikipedia",
must_include=[
"](https://en.wikipedia.org/wiki/Microsoft",
"Microsoft Corporation is **an American multinational corporation and technology company headquartered** in Redmond",
"1995β2007: Foray into the Web, Windows 95, Windows XP, and Xbox",
],
must_not_include=[
"https://www.bing.com/ck/a?!&&p=",
"data:image/svg+xml,%3Csvg%20width%3D",
],
),
FileTestVector(
filename="test_mskanji.csv",
mimetype="text/csv",
charset="cp932",
url=None,
must_include=[
"| εε | εΉ΄ι½’ | δ½ζ |",
"| --- | --- | --- |",
"| δ½θ€ε€ͺι | 30 | ζ±δΊ¬ |",
"| δΈζ¨θ±ε | 25 | ε€§ιͺ |",
"| ι«ζ©ζ·³ | 35 | εε€ε± |",
],
must_not_include=[],
),
FileTestVector(
filename="test.json",
mimetype="application/json",
charset="ascii",
url=None,
must_include=[
"5b64c88c-b3c3-4510-bcb8-da0b200602d8",
"9700dc99-6685-40b4-9a3a-5e406dcb37f3",
],
must_not_include=[],
),
FileTestVector(
filename="test_rss.xml",
mimetype="text/xml",
charset="utf-8",
url=None,
must_include=[
"# The Official Microsoft Blog",
"## Ignite 2024: Why nearly 70% of the Fortune 500 now use Microsoft 365 Copilot",
"In the case of AI, it is absolutely true that the industry is moving incredibly fast",
],
must_not_include=["<rss", "<feed"],
),
FileTestVector(
filename="test_notebook.ipynb",
mimetype="application/json",
charset="ascii",
url=None,
must_include=[
"# Test Notebook",
"```python",
'print("markitdown")',
"```",
"## Code Cell Below",
],
must_not_include=[
"nbformat",
"nbformat_minor",
],
),
FileTestVector(
filename="test_files.zip",
mimetype="application/zip",
charset=None,
url=None,
must_include=[
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
"49e168b7-d2ae-407f-a055-2167576f39a1",
"## d666f1f7-46cb-42bd-9a39-9a39cf2a509f",
"# Abstract",
"# Introduction",
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
"2cdda5c8-e50e-4db4-b5f0-9722a649f455",
"04191ea8-5c73-4215-a1d3-1cfb43aaaf12",
"44bf7d06-5e7a-4a40-a2e1-a2e42ef28c8a",
"1b92870d-e3b5-4e65-8153-919f4ff45592",
"## 09060124-b5e7-4717-9d07-3c046eb",
"6ff4173b-42a5-4784-9b19-f49caff4d93d",
"affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
"Microsoft entered the operating system (OS) business in 1980 with its own version of [Unix]",
'Microsoft was founded by [Bill Gates](/wiki/Bill_Gates "Bill Gates")',
],
must_not_include=[],
),
FileTestVector(
filename="test.epub",
mimetype="application/epub+zip",
charset=None,
url=None,
must_include=[
"**Authors:** Test Author",
"A test EPUB document for MarkItDown testing",
"# Chapter 1: Test Content",
"This is a **test** paragraph with some formatting",
"* A bullet point",
"* Another point",
"# Chapter 2: More Content",
"*different* style",
"> This is a blockquote for testing",
],
must_not_include=[],
),
]
DATA_URI_TEST_VECTORS = [
FileTestVector(
filename="test.docx",
mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
charset=None,
url=None,
must_include=[
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
"49e168b7-d2ae-407f-a055-2167576f39a1",
"## d666f1f7-46cb-42bd-9a39-9a39cf2a509f",
"# Abstract",
"# Introduction",
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
"data:image/png;base64,iVBORw0KGgoAAAANSU",
],
must_not_include=[
"data:image/png;base64...",
],
),
FileTestVector(
filename="test.pptx",
mimetype="application/vnd.openxmlformats-officedocument.presentationml.presentation",
charset=None,
url=None,
must_include=[
"2cdda5c8-e50e-4db4-b5f0-9722a649f455",
"04191ea8-5c73-4215-a1d3-1cfb43aaaf12",
"44bf7d06-5e7a-4a40-a2e1-a2e42ef28c8a",
"1b92870d-e3b5-4e65-8153-919f4ff45592",
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
"a3f6004b-6f4f-4ea8-bee3-3741f4dc385f", # chart title
"2003", # chart value
"![This phrase of the caption is Human-written.]", # image caption
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQE",
],
must_not_include=[
"",
],
),
]
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/tests/test_docintel_html.py | packages/markitdown/tests/test_docintel_html.py | import io
from markitdown.converters._doc_intel_converter import (
DocumentIntelligenceConverter,
DocumentIntelligenceFileType,
)
from markitdown._stream_info import StreamInfo
def _make_converter(file_types):
conv = DocumentIntelligenceConverter.__new__(DocumentIntelligenceConverter)
conv._file_types = file_types
return conv
def test_docintel_accepts_html_extension():
conv = _make_converter([DocumentIntelligenceFileType.HTML])
stream_info = StreamInfo(mimetype=None, extension=".html")
assert conv.accepts(io.BytesIO(b""), stream_info)
def test_docintel_accepts_html_mimetype():
conv = _make_converter([DocumentIntelligenceFileType.HTML])
stream_info = StreamInfo(mimetype="text/html", extension=None)
assert conv.accepts(io.BytesIO(b""), stream_info)
stream_info = StreamInfo(mimetype="application/xhtml+xml", extension=None)
assert conv.accepts(io.BytesIO(b""), stream_info)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/tests/test_cli_vectors.py | packages/markitdown/tests/test_cli_vectors.py | #!/usr/bin/env python3 -m pytest
import os
import time
import pytest
import subprocess
import locale
from typing import List
if __name__ == "__main__":
from _test_vectors import (
GENERAL_TEST_VECTORS,
DATA_URI_TEST_VECTORS,
FileTestVector,
)
else:
from ._test_vectors import (
GENERAL_TEST_VECTORS,
DATA_URI_TEST_VECTORS,
FileTestVector,
)
skip_remote = (
True if os.environ.get("GITHUB_ACTIONS") else False
) # Don't run these tests in CI
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files")
TEST_FILES_URL = "https://raw.githubusercontent.com/microsoft/markitdown/refs/heads/main/packages/markitdown/tests/test_files"
# Prepare CLI test vectors (remove vectors that require mockig the url)
CLI_TEST_VECTORS: List[FileTestVector] = []
for test_vector in GENERAL_TEST_VECTORS:
if test_vector.url is not None:
continue
CLI_TEST_VECTORS.append(test_vector)
@pytest.fixture(scope="session")
def shared_tmp_dir(tmp_path_factory):
return tmp_path_factory.mktemp("pytest_tmp")
@pytest.mark.parametrize("test_vector", CLI_TEST_VECTORS)
def test_output_to_stdout(shared_tmp_dir, test_vector) -> None:
"""Test that the CLI outputs to stdout correctly."""
result = subprocess.run(
[
"python",
"-m",
"markitdown",
os.path.join(TEST_FILES_DIR, test_vector.filename),
],
capture_output=True,
text=True,
)
assert result.returncode == 0, f"CLI exited with error: {result.stderr}"
for test_string in test_vector.must_include:
assert test_string in result.stdout
for test_string in test_vector.must_not_include:
assert test_string not in result.stdout
@pytest.mark.parametrize("test_vector", CLI_TEST_VECTORS)
def test_output_to_file(shared_tmp_dir, test_vector) -> None:
"""Test that the CLI outputs to a file correctly."""
output_file = os.path.join(shared_tmp_dir, test_vector.filename + ".output")
result = subprocess.run(
[
"python",
"-m",
"markitdown",
"-o",
output_file,
os.path.join(TEST_FILES_DIR, test_vector.filename),
],
capture_output=True,
text=True,
)
assert result.returncode == 0, f"CLI exited with error: {result.stderr}"
assert os.path.exists(output_file), f"Output file not created: {output_file}"
with open(output_file, "r") as f:
output_data = f.read()
for test_string in test_vector.must_include:
assert test_string in output_data
for test_string in test_vector.must_not_include:
assert test_string not in output_data
os.remove(output_file)
assert not os.path.exists(output_file), f"Output file not deleted: {output_file}"
@pytest.mark.parametrize("test_vector", CLI_TEST_VECTORS)
def test_input_from_stdin_without_hints(shared_tmp_dir, test_vector) -> None:
"""Test that the CLI readds from stdin correctly."""
test_input = b""
with open(os.path.join(TEST_FILES_DIR, test_vector.filename), "rb") as stream:
test_input = stream.read()
result = subprocess.run(
[
"python",
"-m",
"markitdown",
os.path.join(TEST_FILES_DIR, test_vector.filename),
],
input=test_input,
capture_output=True,
text=False,
)
stdout = result.stdout.decode(locale.getpreferredencoding())
assert (
result.returncode == 0
), f"CLI exited with error: {result.stderr.decode('utf-8')}"
for test_string in test_vector.must_include:
assert test_string in stdout
for test_string in test_vector.must_not_include:
assert test_string not in stdout
@pytest.mark.skipif(
skip_remote,
reason="do not run tests that query external urls",
)
@pytest.mark.parametrize("test_vector", CLI_TEST_VECTORS)
def test_convert_url(shared_tmp_dir, test_vector):
"""Test the conversion of a stream with no stream info."""
# Note: tmp_dir is not used here, but is needed to match the signature
time.sleep(1) # Ensure we don't hit rate limits
result = subprocess.run(
["python", "-m", "markitdown", TEST_FILES_URL + "/" + test_vector.filename],
capture_output=True,
text=False,
)
stdout = result.stdout.decode(locale.getpreferredencoding())
assert result.returncode == 0, f"CLI exited with error: {result.stderr}"
for test_string in test_vector.must_include:
assert test_string in stdout
for test_string in test_vector.must_not_include:
assert test_string not in stdout
@pytest.mark.parametrize("test_vector", DATA_URI_TEST_VECTORS)
def test_output_to_file_with_data_uris(shared_tmp_dir, test_vector) -> None:
"""Test CLI functionality when keep_data_uris is enabled"""
output_file = os.path.join(shared_tmp_dir, test_vector.filename + ".output")
result = subprocess.run(
[
"python",
"-m",
"markitdown",
"--keep-data-uris",
"-o",
output_file,
os.path.join(TEST_FILES_DIR, test_vector.filename),
],
capture_output=True,
text=True,
)
assert result.returncode == 0, f"CLI exited with error: {result.stderr}"
assert os.path.exists(output_file), f"Output file not created: {output_file}"
with open(output_file, "r") as f:
output_data = f.read()
for test_string in test_vector.must_include:
assert test_string in output_data
for test_string in test_vector.must_not_include:
assert test_string not in output_data
os.remove(output_file)
assert not os.path.exists(output_file), f"Output file not deleted: {output_file}"
if __name__ == "__main__":
import tempfile
"""Runs this file's tests from the command line."""
with tempfile.TemporaryDirectory() as tmp_dir:
# General tests
for test_function in [
test_output_to_stdout,
test_output_to_file,
test_input_from_stdin_without_hints,
test_convert_url,
]:
for test_vector in CLI_TEST_VECTORS:
print(
f"Running {test_function.__name__} on {test_vector.filename}...",
end="",
)
test_function(tmp_dir, test_vector)
print("OK")
# Data URI tests
for test_function in [
test_output_to_file_with_data_uris,
]:
for test_vector in DATA_URI_TEST_VECTORS:
print(
f"Running {test_function.__name__} on {test_vector.filename}...",
end="",
)
test_function(tmp_dir, test_vector)
print("OK")
print("All tests passed!")
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/tests/test_cli_misc.py | packages/markitdown/tests/test_cli_misc.py | #!/usr/bin/env python3 -m pytest
import subprocess
from markitdown import __version__
# This file contains CLI tests that are not directly tested by the FileTestVectors.
# This includes things like help messages, version numbers, and invalid flags.
def test_version() -> None:
result = subprocess.run(
["python", "-m", "markitdown", "--version"], capture_output=True, text=True
)
assert result.returncode == 0, f"CLI exited with error: {result.stderr}"
assert __version__ in result.stdout, f"Version not found in output: {result.stdout}"
def test_invalid_flag() -> None:
result = subprocess.run(
["python", "-m", "markitdown", "--foobar"], capture_output=True, text=True
)
assert result.returncode != 0, f"CLI exited with error: {result.stderr}"
assert (
"unrecognized arguments" in result.stderr
), "Expected 'unrecognized arguments' to appear in STDERR"
assert "SYNTAX" in result.stderr, "Expected 'SYNTAX' to appear in STDERR"
if __name__ == "__main__":
"""Runs this file's tests from the command line."""
test_version()
test_invalid_flag()
print("All tests passed!")
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/tests/__init__.py | packages/markitdown/tests/__init__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/tests/test_module_vectors.py | packages/markitdown/tests/test_module_vectors.py | #!/usr/bin/env python3 -m pytest
import os
import time
import pytest
import base64
from pathlib import Path
if __name__ == "__main__":
from _test_vectors import GENERAL_TEST_VECTORS, DATA_URI_TEST_VECTORS
else:
from ._test_vectors import GENERAL_TEST_VECTORS, DATA_URI_TEST_VECTORS
from markitdown import (
MarkItDown,
StreamInfo,
)
skip_remote = (
True if os.environ.get("GITHUB_ACTIONS") else False
) # Don't run these tests in CI
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files")
TEST_FILES_URL = "https://raw.githubusercontent.com/microsoft/markitdown/refs/heads/main/packages/markitdown/tests/test_files"
@pytest.mark.parametrize("test_vector", GENERAL_TEST_VECTORS)
def test_guess_stream_info(test_vector):
"""Test the ability to guess stream info."""
markitdown = MarkItDown()
local_path = os.path.join(TEST_FILES_DIR, test_vector.filename)
expected_extension = os.path.splitext(test_vector.filename)[1]
with open(local_path, "rb") as stream:
guesses = markitdown._get_stream_info_guesses(
stream,
base_guess=StreamInfo(
filename=os.path.basename(test_vector.filename),
local_path=local_path,
extension=expected_extension,
),
)
# For some limited exceptions, we can't guarantee the exact
# mimetype or extension, so we'll special-case them here.
if test_vector.filename in [
"test_outlook_msg.msg",
]:
return
assert guesses[0].mimetype == test_vector.mimetype
assert guesses[0].extension == expected_extension
assert guesses[0].charset == test_vector.charset
@pytest.mark.parametrize("test_vector", GENERAL_TEST_VECTORS)
def test_convert_local(test_vector):
"""Test the conversion of a local file."""
markitdown = MarkItDown()
result = markitdown.convert(
os.path.join(TEST_FILES_DIR, test_vector.filename), url=test_vector.url
)
for string in test_vector.must_include:
assert string in result.markdown
for string in test_vector.must_not_include:
assert string not in result.markdown
@pytest.mark.parametrize("test_vector", GENERAL_TEST_VECTORS)
def test_convert_stream_with_hints(test_vector):
"""Test the conversion of a stream with full stream info."""
markitdown = MarkItDown()
stream_info = StreamInfo(
extension=os.path.splitext(test_vector.filename)[1],
mimetype=test_vector.mimetype,
charset=test_vector.charset,
)
with open(os.path.join(TEST_FILES_DIR, test_vector.filename), "rb") as stream:
result = markitdown.convert(
stream, stream_info=stream_info, url=test_vector.url
)
for string in test_vector.must_include:
assert string in result.markdown
for string in test_vector.must_not_include:
assert string not in result.markdown
@pytest.mark.parametrize("test_vector", GENERAL_TEST_VECTORS)
def test_convert_stream_without_hints(test_vector):
"""Test the conversion of a stream with no stream info."""
markitdown = MarkItDown()
with open(os.path.join(TEST_FILES_DIR, test_vector.filename), "rb") as stream:
result = markitdown.convert(stream, url=test_vector.url)
for string in test_vector.must_include:
assert string in result.markdown
for string in test_vector.must_not_include:
assert string not in result.markdown
@pytest.mark.skipif(
skip_remote,
reason="do not run tests that query external urls",
)
@pytest.mark.parametrize("test_vector", GENERAL_TEST_VECTORS)
def test_convert_http_uri(test_vector):
"""Test the conversion of an HTTP:// or HTTPS:// URI."""
markitdown = MarkItDown()
time.sleep(1) # Ensure we don't hit rate limits
result = markitdown.convert(
TEST_FILES_URL + "/" + test_vector.filename,
url=test_vector.url, # Mock where this file would be found
)
for string in test_vector.must_include:
assert string in result.markdown
for string in test_vector.must_not_include:
assert string not in result.markdown
@pytest.mark.parametrize("test_vector", GENERAL_TEST_VECTORS)
def test_convert_file_uri(test_vector):
"""Test the conversion of a file:// URI."""
markitdown = MarkItDown()
result = markitdown.convert(
Path(os.path.join(TEST_FILES_DIR, test_vector.filename)).as_uri(),
url=test_vector.url,
)
for string in test_vector.must_include:
assert string in result.markdown
for string in test_vector.must_not_include:
assert string not in result.markdown
@pytest.mark.parametrize("test_vector", GENERAL_TEST_VECTORS)
def test_convert_data_uri(test_vector):
"""Test the conversion of a data URI."""
markitdown = MarkItDown()
data = ""
with open(os.path.join(TEST_FILES_DIR, test_vector.filename), "rb") as stream:
data = base64.b64encode(stream.read()).decode("utf-8")
mimetype = test_vector.mimetype
data_uri = f"data:{mimetype};base64,{data}"
result = markitdown.convert(
data_uri,
url=test_vector.url,
)
for string in test_vector.must_include:
assert string in result.markdown
for string in test_vector.must_not_include:
assert string not in result.markdown
@pytest.mark.parametrize("test_vector", DATA_URI_TEST_VECTORS)
def test_convert_keep_data_uris(test_vector):
"""Test API functionality when keep_data_uris is enabled"""
markitdown = MarkItDown()
# Test local file conversion
result = markitdown.convert(
os.path.join(TEST_FILES_DIR, test_vector.filename),
keep_data_uris=True,
url=test_vector.url,
)
for string in test_vector.must_include:
assert string in result.markdown
for string in test_vector.must_not_include:
assert string not in result.markdown
@pytest.mark.parametrize("test_vector", DATA_URI_TEST_VECTORS)
def test_convert_stream_keep_data_uris(test_vector):
"""Test the conversion of a stream with no stream info."""
markitdown = MarkItDown()
stream_info = StreamInfo(
extension=os.path.splitext(test_vector.filename)[1],
mimetype=test_vector.mimetype,
charset=test_vector.charset,
)
with open(os.path.join(TEST_FILES_DIR, test_vector.filename), "rb") as stream:
result = markitdown.convert(
stream, stream_info=stream_info, keep_data_uris=True, url=test_vector.url
)
for string in test_vector.must_include:
assert string in result.markdown
for string in test_vector.must_not_include:
assert string not in result.markdown
if __name__ == "__main__":
"""Runs this file's tests from the command line."""
# General tests
for test_function in [
test_guess_stream_info,
test_convert_local,
test_convert_stream_with_hints,
test_convert_stream_without_hints,
test_convert_http_uri,
test_convert_file_uri,
test_convert_data_uri,
]:
for test_vector in GENERAL_TEST_VECTORS:
print(
f"Running {test_function.__name__} on {test_vector.filename}...", end=""
)
test_function(test_vector)
print("OK")
# Data URI tests
for test_function in [
test_convert_keep_data_uris,
test_convert_stream_keep_data_uris,
]:
for test_vector in DATA_URI_TEST_VECTORS:
print(
f"Running {test_function.__name__} on {test_vector.filename}...", end=""
)
test_function(test_vector)
print("OK")
print("All tests passed!")
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown/tests/test_module_misc.py | packages/markitdown/tests/test_module_misc.py | #!/usr/bin/env python3 -m pytest
import io
import os
import re
import shutil
import pytest
from unittest.mock import MagicMock
from markitdown._uri_utils import parse_data_uri, file_uri_to_path
from markitdown import (
MarkItDown,
UnsupportedFormatException,
FileConversionException,
StreamInfo,
)
# This file contains module tests that are not directly tested by the FileTestVectors.
# This includes things like helper functions and runtime conversion options
# (e.g., LLM clients, exiftool path, transcription services, etc.)
skip_remote = (
True if os.environ.get("GITHUB_ACTIONS") else False
) # Don't run these tests in CI
# Don't run the llm tests without a key and the client library
skip_llm = False if os.environ.get("OPENAI_API_KEY") else True
try:
import openai
except ModuleNotFoundError:
skip_llm = True
# Skip exiftool tests if not installed
skip_exiftool = shutil.which("exiftool") is None
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files")
JPG_TEST_EXIFTOOL = {
"Author": "AutoGen Authors",
"Title": "AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
"Description": "AutoGen enables diverse LLM-based applications",
"ImageSize": "1615x1967",
"DateTimeOriginal": "2024:03:14 22:10:00",
}
MP3_TEST_EXIFTOOL = {
"Title": "f67a499e-a7d0-4ca3-a49b-358bd934ae3e",
"Artist": "Artist Name Test String",
"Album": "Album Name Test String",
"SampleRate": "48000",
}
PDF_TEST_URL = "https://arxiv.org/pdf/2308.08155v2.pdf"
PDF_TEST_STRINGS = [
"While there is contemporaneous exploration of multi-agent approaches"
]
YOUTUBE_TEST_URL = "https://www.youtube.com/watch?v=V2qZ_lgxTzg"
YOUTUBE_TEST_STRINGS = [
"## AutoGen FULL Tutorial with Python (Step-By-Step)",
"This is an intermediate tutorial for installing and using AutoGen locally",
"PT15M4S",
"the model we're going to be using today is GPT 3.5 turbo", # From the transcript
]
DOCX_COMMENT_TEST_STRINGS = [
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
"49e168b7-d2ae-407f-a055-2167576f39a1",
"## d666f1f7-46cb-42bd-9a39-9a39cf2a509f",
"# Abstract",
"# Introduction",
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
"This is a test comment. 12df-321a",
"Yet another comment in the doc. 55yiyi-asd09",
]
BLOG_TEST_URL = "https://microsoft.github.io/autogen/blog/2023/04/21/LLM-tuning-math"
BLOG_TEST_STRINGS = [
"Large language models (LLMs) are powerful tools that can generate natural language texts for various applications, such as chatbots, summarization, translation, and more. GPT-4 is currently the state of the art LLM in the world. Is model selection irrelevant? What about inference parameters?",
"an example where high cost can easily prevent a generic complex",
]
LLM_TEST_STRINGS = [
"5bda1dd6",
]
PPTX_TEST_STRINGS = [
"2cdda5c8-e50e-4db4-b5f0-9722a649f455",
"04191ea8-5c73-4215-a1d3-1cfb43aaaf12",
"44bf7d06-5e7a-4a40-a2e1-a2e42ef28c8a",
"1b92870d-e3b5-4e65-8153-919f4ff45592",
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
"a3f6004b-6f4f-4ea8-bee3-3741f4dc385f", # chart title
"2003", # chart value
]
# --- Helper Functions ---
def validate_strings(result, expected_strings, exclude_strings=None):
"""Validate presence or absence of specific strings."""
text_content = result.text_content.replace("\\", "")
for string in expected_strings:
assert string in text_content
if exclude_strings:
for string in exclude_strings:
assert string not in text_content
def test_stream_info_operations() -> None:
"""Test operations performed on StreamInfo objects."""
stream_info_original = StreamInfo(
mimetype="mimetype.1",
extension="extension.1",
charset="charset.1",
filename="filename.1",
local_path="local_path.1",
url="url.1",
)
# Check updating all attributes by keyword
keywords = ["mimetype", "extension", "charset", "filename", "local_path", "url"]
for keyword in keywords:
updated_stream_info = stream_info_original.copy_and_update(
**{keyword: f"{keyword}.2"}
)
# Make sure the targted attribute is updated
assert getattr(updated_stream_info, keyword) == f"{keyword}.2"
# Make sure the other attributes are unchanged
for k in keywords:
if k != keyword:
assert getattr(stream_info_original, k) == getattr(
updated_stream_info, k
)
# Check updating all attributes by passing a new StreamInfo object
keywords = ["mimetype", "extension", "charset", "filename", "local_path", "url"]
for keyword in keywords:
updated_stream_info = stream_info_original.copy_and_update(
StreamInfo(**{keyword: f"{keyword}.2"})
)
# Make sure the targted attribute is updated
assert getattr(updated_stream_info, keyword) == f"{keyword}.2"
# Make sure the other attributes are unchanged
for k in keywords:
if k != keyword:
assert getattr(stream_info_original, k) == getattr(
updated_stream_info, k
)
# Check mixing and matching
updated_stream_info = stream_info_original.copy_and_update(
StreamInfo(extension="extension.2", filename="filename.2"),
mimetype="mimetype.3",
charset="charset.3",
)
assert updated_stream_info.extension == "extension.2"
assert updated_stream_info.filename == "filename.2"
assert updated_stream_info.mimetype == "mimetype.3"
assert updated_stream_info.charset == "charset.3"
assert updated_stream_info.local_path == "local_path.1"
assert updated_stream_info.url == "url.1"
# Check multiple StreamInfo objects
updated_stream_info = stream_info_original.copy_and_update(
StreamInfo(extension="extension.4", filename="filename.5"),
StreamInfo(mimetype="mimetype.6", charset="charset.7"),
)
assert updated_stream_info.extension == "extension.4"
assert updated_stream_info.filename == "filename.5"
assert updated_stream_info.mimetype == "mimetype.6"
assert updated_stream_info.charset == "charset.7"
assert updated_stream_info.local_path == "local_path.1"
assert updated_stream_info.url == "url.1"
def test_data_uris() -> None:
# Test basic parsing of data URIs
data_uri = "data:text/plain;base64,SGVsbG8sIFdvcmxkIQ=="
mime_type, attributes, data = parse_data_uri(data_uri)
assert mime_type == "text/plain"
assert len(attributes) == 0
assert data == b"Hello, World!"
data_uri = "data:base64,SGVsbG8sIFdvcmxkIQ=="
mime_type, attributes, data = parse_data_uri(data_uri)
assert mime_type is None
assert len(attributes) == 0
assert data == b"Hello, World!"
data_uri = "data:text/plain;charset=utf-8;base64,SGVsbG8sIFdvcmxkIQ=="
mime_type, attributes, data = parse_data_uri(data_uri)
assert mime_type == "text/plain"
assert len(attributes) == 1
assert attributes["charset"] == "utf-8"
assert data == b"Hello, World!"
data_uri = "data:,Hello%2C%20World%21"
mime_type, attributes, data = parse_data_uri(data_uri)
assert mime_type is None
assert len(attributes) == 0
assert data == b"Hello, World!"
data_uri = "data:text/plain,Hello%2C%20World%21"
mime_type, attributes, data = parse_data_uri(data_uri)
assert mime_type == "text/plain"
assert len(attributes) == 0
assert data == b"Hello, World!"
data_uri = "data:text/plain;charset=utf-8,Hello%2C%20World%21"
mime_type, attributes, data = parse_data_uri(data_uri)
assert mime_type == "text/plain"
assert len(attributes) == 1
assert attributes["charset"] == "utf-8"
assert data == b"Hello, World!"
def test_file_uris() -> None:
# Test file URI with an empty host
file_uri = "file:///path/to/file.txt"
netloc, path = file_uri_to_path(file_uri)
assert netloc is None
assert path == "/path/to/file.txt"
# Test file URI with no host
file_uri = "file:/path/to/file.txt"
netloc, path = file_uri_to_path(file_uri)
assert netloc is None
assert path == "/path/to/file.txt"
# Test file URI with localhost
file_uri = "file://localhost/path/to/file.txt"
netloc, path = file_uri_to_path(file_uri)
assert netloc == "localhost"
assert path == "/path/to/file.txt"
# Test file URI with query parameters
file_uri = "file:///path/to/file.txt?param=value"
netloc, path = file_uri_to_path(file_uri)
assert netloc is None
assert path == "/path/to/file.txt"
# Test file URI with fragment
file_uri = "file:///path/to/file.txt#fragment"
netloc, path = file_uri_to_path(file_uri)
assert netloc is None
assert path == "/path/to/file.txt"
def test_docx_comments() -> None:
# Test DOCX processing, with comments and setting style_map on init
markitdown_with_style_map = MarkItDown(style_map="comment-reference => ")
result = markitdown_with_style_map.convert(
os.path.join(TEST_FILES_DIR, "test_with_comment.docx")
)
validate_strings(result, DOCX_COMMENT_TEST_STRINGS)
def test_docx_equations() -> None:
markitdown = MarkItDown()
docx_file = os.path.join(TEST_FILES_DIR, "equations.docx")
result = markitdown.convert(docx_file)
# Check for inline equation m=1 (wrapped with single $) is present
assert "$m=1$" in result.text_content, "Inline equation $m=1$ not found"
# Find block equations wrapped with double $$ and check if they are present
block_equations = re.findall(r"\$\$(.+?)\$\$", result.text_content)
assert block_equations, "No block equations found in the document."
def test_input_as_strings() -> None:
markitdown = MarkItDown()
# Test input from a stream
input_data = b"<html><body><h1>Test</h1></body></html>"
result = markitdown.convert_stream(io.BytesIO(input_data))
assert "# Test" in result.text_content
# Test input with leading blank characters
input_data = b" \n\n\n<html><body><h1>Test</h1></body></html>"
result = markitdown.convert_stream(io.BytesIO(input_data))
assert "# Test" in result.text_content
def test_doc_rlink() -> None:
# Test for: CVE-2025-11849
markitdown = MarkItDown()
# Document with rlink
docx_file = os.path.join(TEST_FILES_DIR, "rlink.docx")
# Directory containing the target rlink file
rlink_tmp_dir = os.path.abspath(os.sep + "tmp")
# Ensure the tmp directory exists
if not os.path.exists(rlink_tmp_dir):
pytest.skip(f"Skipping rlink test; {rlink_tmp_dir} directory does not exist.")
return
rlink_file_path = os.path.join(rlink_tmp_dir, "test_rlink.txt")
rlink_content = "de658225-569e-4e3d-9ed2-cfb6abf927fc"
b64_prefix = (
"ZGU2NTgyMjUtNTY5ZS00ZTNkLTllZDItY2ZiNmFiZjk" # base64 prefix of rlink_content
)
if os.path.exists(rlink_file_path):
with open(rlink_file_path, "r", encoding="utf-8") as f:
existing_content = f.read()
if existing_content != rlink_content:
raise ValueError(
f"Existing {rlink_file_path} content does not match expected content."
)
else:
with open(rlink_file_path, "w", encoding="utf-8") as f:
f.write(rlink_content)
try:
result = markitdown.convert(docx_file, keep_data_uris=True).text_content
assert (
b64_prefix not in result
) # Make sure the target file was NOT embedded in the output
finally:
os.remove(rlink_file_path)
@pytest.mark.skipif(
skip_remote,
reason="do not run tests that query external urls",
)
def test_markitdown_remote() -> None:
markitdown = MarkItDown()
# By URL
result = markitdown.convert(PDF_TEST_URL)
for test_string in PDF_TEST_STRINGS:
assert test_string in result.text_content
# Youtube
# result = markitdown.convert(YOUTUBE_TEST_URL)
# for test_string in YOUTUBE_TEST_STRINGS:
# assert test_string in result.text_content
@pytest.mark.skipif(
skip_remote,
reason="do not run remotely run speech transcription tests",
)
def test_speech_transcription() -> None:
markitdown = MarkItDown()
# Test WAV files, MP3 and M4A files
for file_name in ["test.wav", "test.mp3", "test.m4a"]:
result = markitdown.convert(os.path.join(TEST_FILES_DIR, file_name))
result_lower = result.text_content.lower()
assert (
("1" in result_lower or "one" in result_lower)
and ("2" in result_lower or "two" in result_lower)
and ("3" in result_lower or "three" in result_lower)
and ("4" in result_lower or "four" in result_lower)
and ("5" in result_lower or "five" in result_lower)
)
def test_exceptions() -> None:
# Check that an exception is raised when trying to convert an unsupported format
markitdown = MarkItDown()
with pytest.raises(UnsupportedFormatException):
markitdown.convert(os.path.join(TEST_FILES_DIR, "random.bin"))
# Check that an exception is raised when trying to convert a file that is corrupted
with pytest.raises(FileConversionException) as exc_info:
markitdown.convert(
os.path.join(TEST_FILES_DIR, "random.bin"), file_extension=".pptx"
)
assert len(exc_info.value.attempts) == 1
assert type(exc_info.value.attempts[0].converter).__name__ == "PptxConverter"
@pytest.mark.skipif(
skip_exiftool,
reason="do not run if exiftool is not installed",
)
def test_markitdown_exiftool() -> None:
which_exiftool = shutil.which("exiftool")
assert which_exiftool is not None
# Test explicitly setting the location of exiftool
markitdown = MarkItDown(exiftool_path=which_exiftool)
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.jpg"))
for key in JPG_TEST_EXIFTOOL:
target = f"{key}: {JPG_TEST_EXIFTOOL[key]}"
assert target in result.text_content
# Test setting the exiftool path through an environment variable
os.environ["EXIFTOOL_PATH"] = which_exiftool
markitdown = MarkItDown()
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.jpg"))
for key in JPG_TEST_EXIFTOOL:
target = f"{key}: {JPG_TEST_EXIFTOOL[key]}"
assert target in result.text_content
# Test some other media types
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.mp3"))
for key in MP3_TEST_EXIFTOOL:
target = f"{key}: {MP3_TEST_EXIFTOOL[key]}"
assert target in result.text_content
def test_markitdown_llm_parameters() -> None:
"""Test that LLM parameters are correctly passed to the client."""
mock_client = MagicMock()
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="Test caption with red circle and blue square 5bda1dd6"
)
)
]
mock_client.chat.completions.create.return_value = mock_response
test_prompt = "You are a professional test prompt."
markitdown = MarkItDown(
llm_client=mock_client, llm_model="gpt-4o", llm_prompt=test_prompt
)
# Test image file
markitdown.convert(os.path.join(TEST_FILES_DIR, "test_llm.jpg"))
# Verify the prompt was passed to the OpenAI API
assert mock_client.chat.completions.create.called
call_args = mock_client.chat.completions.create.call_args
messages = call_args[1]["messages"]
assert len(messages) == 1
assert messages[0]["content"][0]["text"] == test_prompt
# Reset the mock for the next test
mock_client.chat.completions.create.reset_mock()
# TODO: may only use one test after the llm caption method duplicate has been removed:
# https://github.com/microsoft/markitdown/pull/1254
# Test PPTX file
markitdown.convert(os.path.join(TEST_FILES_DIR, "test.pptx"))
# Verify the prompt was passed to the OpenAI API for PPTX images too
assert mock_client.chat.completions.create.called
call_args = mock_client.chat.completions.create.call_args
messages = call_args[1]["messages"]
assert len(messages) == 1
assert messages[0]["content"][0]["text"] == test_prompt
@pytest.mark.skipif(
skip_llm,
reason="do not run llm tests without a key",
)
def test_markitdown_llm() -> None:
client = openai.OpenAI()
markitdown = MarkItDown(llm_client=client, llm_model="gpt-4o")
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_llm.jpg"))
for test_string in LLM_TEST_STRINGS:
assert test_string in result.text_content
# This is not super precise. It would also accept "red square", "blue circle",
# "the square is not blue", etc. But it's sufficient for this test.
for test_string in ["red", "circle", "blue", "square"]:
assert test_string in result.text_content.lower()
# Images embedded in PPTX files
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.pptx"))
# LLM Captions are included
for test_string in LLM_TEST_STRINGS:
assert test_string in result.text_content
# Standard alt text is included
validate_strings(result, PPTX_TEST_STRINGS)
if __name__ == "__main__":
"""Runs this file's tests from the command line."""
for test in [
test_stream_info_operations,
test_data_uris,
test_file_uris,
test_docx_comments,
test_input_as_strings,
test_markitdown_remote,
test_speech_transcription,
test_exceptions,
test_doc_rlink,
test_markitdown_exiftool,
test_markitdown_llm_parameters,
test_markitdown_llm,
]:
print(f"Running {test.__name__}...", end="")
test()
print("OK")
print("All tests passed!")
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-sample-plugin/src/markitdown_sample_plugin/_plugin.py | packages/markitdown-sample-plugin/src/markitdown_sample_plugin/_plugin.py | import locale
from typing import BinaryIO, Any
from striprtf.striprtf import rtf_to_text
from markitdown import (
MarkItDown,
DocumentConverter,
DocumentConverterResult,
StreamInfo,
)
__plugin_interface_version__ = (
1 # The version of the plugin interface that this plugin uses
)
ACCEPTED_MIME_TYPE_PREFIXES = [
"text/rtf",
"application/rtf",
]
ACCEPTED_FILE_EXTENSIONS = [".rtf"]
def register_converters(markitdown: MarkItDown, **kwargs):
"""
Called during construction of MarkItDown instances to register converters provided by plugins.
"""
# Simply create and attach an RtfConverter instance
markitdown.register_converter(RtfConverter())
class RtfConverter(DocumentConverter):
"""
Converts an RTF file to in the simplest possible way.
"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any,
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any,
) -> DocumentConverterResult:
# Read the file stream into an str using hte provided charset encoding, or using the system default
encoding = stream_info.charset or locale.getpreferredencoding()
stream_data = file_stream.read().decode(encoding)
# Return the result
return DocumentConverterResult(
title=None,
markdown=rtf_to_text(stream_data),
)
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-sample-plugin/src/markitdown_sample_plugin/__about__.py | packages/markitdown-sample-plugin/src/markitdown_sample_plugin/__about__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
__version__ = "0.1.0a1"
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-sample-plugin/src/markitdown_sample_plugin/__init__.py | packages/markitdown-sample-plugin/src/markitdown_sample_plugin/__init__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
from ._plugin import __plugin_interface_version__, register_converters, RtfConverter
from .__about__ import __version__
__all__ = [
"__version__",
"__plugin_interface_version__",
"register_converters",
"RtfConverter",
]
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-sample-plugin/tests/__init__.py | packages/markitdown-sample-plugin/tests/__init__.py | # SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
#
# SPDX-License-Identifier: MIT
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
microsoft/markitdown | https://github.com/microsoft/markitdown/blob/dde250a456d178fe344fce17ef10d00fe929f680/packages/markitdown-sample-plugin/tests/test_sample_plugin.py | packages/markitdown-sample-plugin/tests/test_sample_plugin.py | #!/usr/bin/env python3 -m pytest
import os
from markitdown import MarkItDown, StreamInfo
from markitdown_sample_plugin import RtfConverter
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files")
RTF_TEST_STRINGS = {
"This is a Sample RTF File",
"It is included to test if the MarkItDown sample plugin can correctly convert RTF files.",
}
def test_converter() -> None:
"""Tests the RTF converter dirctly."""
with open(os.path.join(TEST_FILES_DIR, "test.rtf"), "rb") as file_stream:
converter = RtfConverter()
result = converter.convert(
file_stream=file_stream,
stream_info=StreamInfo(
mimetype="text/rtf", extension=".rtf", filename="test.rtf"
),
)
for test_string in RTF_TEST_STRINGS:
assert test_string in result.text_content
def test_markitdown() -> None:
"""Tests that MarkItDown correctly loads the plugin."""
md = MarkItDown(enable_plugins=True)
result = md.convert(os.path.join(TEST_FILES_DIR, "test.rtf"))
for test_string in RTF_TEST_STRINGS:
assert test_string in result.text_content
if __name__ == "__main__":
"""Runs this file's tests from the command line."""
test_converter()
test_markitdown()
print("All tests passed.")
| python | MIT | dde250a456d178fe344fce17ef10d00fe929f680 | 2026-01-04T14:38:15.496810Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/fastentrypoints.py | fastentrypoints.py | # Copyright (c) 2016, Aaron Christianson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Monkey patch setuptools to write faster console_scripts with this format:
import sys
from mymodule import entry_function
sys.exit(entry_function())
This is better.
(c) 2016, Aaron Christianson
http://github.com/ninjaaron/fast-entry_points
'''
from setuptools.command import easy_install
import re
TEMPLATE = r'''\
# -*- coding: utf-8 -*-
# EASY-INSTALL-ENTRY-SCRIPT: '{3}','{4}','{5}'
__requires__ = '{3}'
import re
import sys
from {0} import {1}
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit({2}())'''
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
# ensure_safe_name
if re.search(r'[\\/]', name):
raise ValueError("Path separators not allowed in script names")
script_text = TEMPLATE.format(
ep.module_name, ep.attrs[0], '.'.join(ep.attrs),
spec, group, name)
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
easy_install.ScriptWriter.get_args = get_args
def main():
import os
import re
import shutil
import sys
dests = sys.argv[1:] or ['.']
filename = re.sub(r'\.pyc$', '.py', __file__)
for dst in dests:
shutil.copy(filename, dst)
manifest_path = os.path.join(dst, 'MANIFEST.in')
setup_path = os.path.join(dst, 'setup.py')
# Insert the include statement to MANIFEST.in if not present
with open(manifest_path, 'a+') as manifest:
manifest.seek(0)
manifest_content = manifest.read()
if not 'include fastentrypoints.py' in manifest_content:
manifest.write(('\n' if manifest_content else '')
+ 'include fastentrypoints.py')
# Insert the import statement to setup.py if not present
with open(setup_path, 'a+') as setup:
setup.seek(0)
setup_content = setup.read()
if not 'import fastentrypoints' in setup_content:
setup.seek(0)
setup.truncate()
setup.write('import fastentrypoints\n' + setup_content)
print(__name__)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
import pkg_resources
import sys
import os
import fastentrypoints
try:
if int(pkg_resources.get_distribution("pip").version.split('.')[0]) < 6:
print('pip older than 6.0 not supported, please upgrade pip with:\n\n'
' pip install -U pip')
sys.exit(-1)
except pkg_resources.DistributionNotFound:
pass
if os.environ.get('CONVERT_README'):
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
else:
long_description = ''
version = sys.version_info[:2]
if version < (2, 7):
print('thefuck requires Python version 2.7 or later' +
' ({}.{} detected).'.format(*version))
sys.exit(-1)
elif (3, 0) < version < (3, 5):
print('thefuck requires Python version 3.5 or later' +
' ({}.{} detected).'.format(*version))
sys.exit(-1)
VERSION = '3.32'
install_requires = ['psutil', 'colorama', 'six']
extras_require = {':python_version<"3.4"': ['pathlib2'],
':python_version<"3.3"': ['backports.shutil_get_terminal_size'],
':python_version<="2.7"': ['decorator<5', 'pyte<0.8.1'],
':python_version>"2.7"': ['decorator', 'pyte'],
":sys_platform=='win32'": ['win_unicode_console']}
if sys.platform == "win32":
scripts = ['scripts\\fuck.bat', 'scripts\\fuck.ps1']
entry_points = {'console_scripts': [
'thefuck = thefuck.entrypoints.main:main',
'thefuck_firstuse = thefuck.entrypoints.not_configured:main']}
else:
scripts = []
entry_points = {'console_scripts': [
'thefuck = thefuck.entrypoints.main:main',
'fuck = thefuck.entrypoints.not_configured:main']}
setup(name='thefuck',
version=VERSION,
description="Magnificent app which corrects your previous console command",
long_description=long_description,
author='Vladimir Iakovlev',
author_email='nvbn.rm@gmail.com',
url='https://github.com/nvbn/thefuck',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples',
'tests', 'tests.*', 'release']),
include_package_data=True,
zip_safe=False,
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
install_requires=install_requires,
extras_require=extras_require,
scripts=scripts,
entry_points=entry_points)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/release.py | release.py | #!/usr/bin/env python
from subprocess import call
import os
import re
version = None
def get_new_setup_py_lines():
global version
with open('setup.py', 'r') as sf:
current_setup = sf.readlines()
for line in current_setup:
if line.startswith('VERSION = '):
major, minor = re.findall(r"VERSION = '(\d+)\.(\d+)'", line)[0]
version = "{}.{}".format(major, int(minor) + 1)
yield "VERSION = '{}'\n".format(version)
else:
yield line
lines = list(get_new_setup_py_lines())
with open('setup.py', 'w') as sf:
sf.writelines(lines)
call('git pull', shell=True)
call('git commit -am "Bump to {}"'.format(version), shell=True)
call('git tag {}'.format(version), shell=True)
call('git push', shell=True)
call('git push --tags', shell=True)
env = os.environ
env['CONVERT_README'] = 'true'
call('rm -rf dist/*', shell=True, env=env)
call('python setup.py sdist bdist_wheel', shell=True, env=env)
call('twine upload dist/*', shell=True, env=env)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/test_ui.py | tests/test_ui.py | # -*- encoding: utf-8 -*-
import pytest
from itertools import islice
from thefuck import ui
from thefuck.types import CorrectedCommand
from thefuck import const
@pytest.fixture
def patch_get_key(monkeypatch):
def patch(vals):
vals = iter(vals)
monkeypatch.setattr('thefuck.ui.get_key', lambda: next(vals))
return patch
def test_read_actions(patch_get_key):
patch_get_key([
# Enter:
'\n',
# Enter:
'\r',
# Ignored:
'x', 'y',
# Up:
const.KEY_UP, 'k',
# Down:
const.KEY_DOWN, 'j',
# Ctrl+C:
const.KEY_CTRL_C, 'q'])
assert (list(islice(ui.read_actions(), 8))
== [const.ACTION_SELECT, const.ACTION_SELECT,
const.ACTION_PREVIOUS, const.ACTION_PREVIOUS,
const.ACTION_NEXT, const.ACTION_NEXT,
const.ACTION_ABORT, const.ACTION_ABORT])
def test_command_selector():
selector = ui.CommandSelector(iter([1, 2, 3]))
assert selector.value == 1
selector.next()
assert selector.value == 2
selector.next()
assert selector.value == 3
selector.next()
assert selector.value == 1
selector.previous()
assert selector.value == 3
@pytest.mark.usefixtures('no_colors')
class TestSelectCommand(object):
@pytest.fixture
def commands_with_side_effect(self):
return [CorrectedCommand('ls', lambda *_: None, 100),
CorrectedCommand('cd', lambda *_: None, 100)]
@pytest.fixture
def commands(self):
return [CorrectedCommand('ls', None, 100),
CorrectedCommand('cd', None, 100)]
def test_without_commands(self, capsys):
assert ui.select_command(iter([])) is None
assert capsys.readouterr() == ('', 'No fucks given\n')
def test_without_confirmation(self, capsys, commands, settings):
settings.require_confirmation = False
assert ui.select_command(iter(commands)) == commands[0]
assert capsys.readouterr() == ('', const.USER_COMMAND_MARK + 'ls\n')
def test_without_confirmation_with_side_effects(
self, capsys, commands_with_side_effect, settings):
settings.require_confirmation = False
assert (ui.select_command(iter(commands_with_side_effect))
== commands_with_side_effect[0])
assert capsys.readouterr() == ('', const.USER_COMMAND_MARK + 'ls (+side effect)\n')
def test_with_confirmation(self, capsys, patch_get_key, commands):
patch_get_key(['\n'])
assert ui.select_command(iter(commands)) == commands[0]
assert capsys.readouterr() == (
'', const.USER_COMMAND_MARK + u'\x1b[1K\rls [enter/β/β/ctrl+c]\n')
def test_with_confirmation_abort(self, capsys, patch_get_key, commands):
patch_get_key([const.KEY_CTRL_C])
assert ui.select_command(iter(commands)) is None
assert capsys.readouterr() == (
'', const.USER_COMMAND_MARK + u'\x1b[1K\rls [enter/β/β/ctrl+c]\nAborted\n')
def test_with_confirmation_with_side_effct(self, capsys, patch_get_key,
commands_with_side_effect):
patch_get_key(['\n'])
assert (ui.select_command(iter(commands_with_side_effect))
== commands_with_side_effect[0])
assert capsys.readouterr() == (
'', const.USER_COMMAND_MARK + u'\x1b[1K\rls (+side effect) [enter/β/β/ctrl+c]\n')
def test_with_confirmation_select_second(self, capsys, patch_get_key, commands):
patch_get_key([const.KEY_DOWN, '\n'])
assert ui.select_command(iter(commands)) == commands[1]
stderr = (
u'{mark}\x1b[1K\rls [enter/β/β/ctrl+c]'
u'{mark}\x1b[1K\rcd [enter/β/β/ctrl+c]\n'
).format(mark=const.USER_COMMAND_MARK)
assert capsys.readouterr() == ('', stderr)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/test_readme.py | tests/test_readme.py | def test_readme(source_root):
with source_root.joinpath('README.md').open() as f:
readme = f.read()
bundled = source_root.joinpath('thefuck') \
.joinpath('rules') \
.glob('*.py')
for rule in bundled:
if rule.stem != '__init__':
assert rule.stem in readme, \
'Missing rule "{}" in README.md'.format(rule.stem)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/test_utils.py | tests/test_utils.py | # -*- coding: utf-8 -*-
import pytest
import warnings
from mock import Mock, call, patch
from thefuck.utils import default_settings, \
memoize, get_closest, get_all_executables, replace_argument, \
get_all_matched_commands, is_app, for_app, cache, \
get_valid_history_without_current, _cache, get_close_matches
from thefuck.types import Command
@pytest.mark.parametrize('override, old, new', [
({'key': 'val'}, {}, {'key': 'val'}),
({'key': 'new-val'}, {'key': 'val'}, {'key': 'val'}),
({'key': 'new-val', 'unset': 'unset'}, {'key': 'val'}, {'key': 'val', 'unset': 'unset'})])
def test_default_settings(settings, override, old, new):
settings.clear()
settings.update(old)
default_settings(override)(lambda _: _)(None)
assert settings == new
def test_memoize():
fn = Mock(__name__='fn')
memoized = memoize(fn)
memoized()
memoized()
fn.assert_called_once_with()
@pytest.mark.usefixtures('no_memoize')
def test_no_memoize():
fn = Mock(__name__='fn')
memoized = memoize(fn)
memoized()
memoized()
assert fn.call_count == 2
class TestGetClosest(object):
def test_when_can_match(self):
assert 'branch' == get_closest('brnch', ['branch', 'status'])
def test_when_cant_match(self):
assert 'status' == get_closest('st', ['status', 'reset'])
def test_without_fallback(self):
assert get_closest('st', ['status', 'reset'],
fallback_to_first=False) is None
class TestGetCloseMatches(object):
@patch('thefuck.utils.difflib_get_close_matches')
def test_call_with_n(self, difflib_mock):
get_close_matches('', [], 1)
assert difflib_mock.call_args[0][2] == 1
@patch('thefuck.utils.difflib_get_close_matches')
def test_call_without_n(self, difflib_mock, settings):
get_close_matches('', [])
assert difflib_mock.call_args[0][2] == settings.get('num_close_matches')
@pytest.fixture
def get_aliases(mocker):
mocker.patch('thefuck.shells.shell.get_aliases',
return_value=['vim', 'apt-get', 'fsck', 'fuck'])
@pytest.mark.usefixtures('no_memoize', 'get_aliases')
def test_get_all_executables():
all_callables = get_all_executables()
assert 'vim' in all_callables
assert 'fsck' in all_callables
assert 'fuck' not in all_callables
@pytest.fixture
def os_environ_pathsep(monkeypatch, path, pathsep):
env = {'PATH': path}
monkeypatch.setattr('os.environ', env)
monkeypatch.setattr('os.pathsep', pathsep)
return env
@pytest.mark.usefixtures('no_memoize', 'os_environ_pathsep')
@pytest.mark.parametrize('path, pathsep', [
('/foo:/bar:/baz:/foo/bar', ':'),
(r'C:\\foo;C:\\bar;C:\\baz;C:\\foo\\bar', ';')])
def test_get_all_executables_pathsep(path, pathsep):
with patch('thefuck.utils.Path') as Path_mock:
get_all_executables()
Path_mock.assert_has_calls([call(p) for p in path.split(pathsep)], True)
@pytest.mark.usefixtures('no_memoize', 'os_environ_pathsep')
@pytest.mark.parametrize('path, pathsep, excluded', [
('/foo:/bar:/baz:/foo/bar:/mnt/foo', ':', '/mnt/foo'),
(r'C:\\foo;C:\\bar;C:\\baz;C:\\foo\\bar;Z:\\foo', ';', r'Z:\\foo')])
def test_get_all_executables_exclude_paths(path, pathsep, excluded, settings):
settings.init()
settings.excluded_search_path_prefixes = [excluded]
with patch('thefuck.utils.Path') as Path_mock:
get_all_executables()
path_list = path.split(pathsep)
assert call(path_list[-1]) not in Path_mock.mock_calls
assert all(call(p) in Path_mock.mock_calls for p in path_list[:-1])
@pytest.mark.parametrize('args, result', [
(('apt-get instol vim', 'instol', 'install'), 'apt-get install vim'),
(('git brnch', 'brnch', 'branch'), 'git branch')])
def test_replace_argument(args, result):
assert replace_argument(*args) == result
@pytest.mark.parametrize('stderr, result', [
(("git: 'cone' is not a git command. See 'git --help'.\n"
'\n'
'Did you mean one of these?\n'
'\tclone'), ['clone']),
(("git: 're' is not a git command. See 'git --help'.\n"
'\n'
'Did you mean one of these?\n'
'\trebase\n'
'\treset\n'
'\tgrep\n'
'\trm'), ['rebase', 'reset', 'grep', 'rm']),
(('tsuru: "target" is not a tsuru command. See "tsuru help".\n'
'\n'
'Did you mean one of these?\n'
'\tservice-add\n'
'\tservice-bind\n'
'\tservice-doc\n'
'\tservice-info\n'
'\tservice-list\n'
'\tservice-remove\n'
'\tservice-status\n'
'\tservice-unbind'), ['service-add', 'service-bind', 'service-doc',
'service-info', 'service-list', 'service-remove',
'service-status', 'service-unbind'])])
def test_get_all_matched_commands(stderr, result):
assert list(get_all_matched_commands(stderr)) == result
@pytest.mark.usefixtures('no_memoize')
@pytest.mark.parametrize('script, names, result', [
('/usr/bin/git diff', ['git', 'hub'], True),
('/bin/hdfs dfs -rm foo', ['hdfs'], True),
('git diff', ['git', 'hub'], True),
('hub diff', ['git', 'hub'], True),
('hg diff', ['git', 'hub'], False)])
def test_is_app(script, names, result):
assert is_app(Command(script, ''), *names) == result
@pytest.mark.usefixtures('no_memoize')
@pytest.mark.parametrize('script, names, result', [
('/usr/bin/git diff', ['git', 'hub'], True),
('/bin/hdfs dfs -rm foo', ['hdfs'], True),
('git diff', ['git', 'hub'], True),
('hub diff', ['git', 'hub'], True),
('hg diff', ['git', 'hub'], False)])
def test_for_app(script, names, result):
@for_app(*names)
def match(command):
return True
assert match(Command(script, '')) == result
class TestCache(object):
@pytest.fixture
def shelve(self, mocker):
value = {}
class _Shelve(object):
def __init__(self, path):
pass
def __setitem__(self, k, v):
value[k] = v
def __getitem__(self, k):
return value[k]
def get(self, k, v=None):
return value.get(k, v)
def close(self):
return
mocker.patch('thefuck.utils.shelve.open', new_callable=lambda: _Shelve)
return value
@pytest.fixture(autouse=True)
def enable_cache(self, monkeypatch, shelve):
monkeypatch.setattr('thefuck.utils.cache.disabled', False)
_cache._init_db()
@pytest.fixture(autouse=True)
def mtime(self, mocker):
mocker.patch('thefuck.utils.os.path.getmtime', return_value=0)
@pytest.fixture
def fn(self):
@cache('~/.bashrc')
def fn():
return 'test'
return fn
@pytest.fixture
def key(self, monkeypatch):
monkeypatch.setattr('thefuck.utils.Cache._get_key',
lambda *_: 'key')
return 'key'
def test_with_blank_cache(self, shelve, fn, key):
assert shelve == {}
assert fn() == 'test'
assert shelve == {key: {'etag': '0', 'value': 'test'}}
def test_with_filled_cache(self, shelve, fn, key):
cache_value = {key: {'etag': '0', 'value': 'new-value'}}
shelve.update(cache_value)
assert fn() == 'new-value'
assert shelve == cache_value
def test_when_etag_changed(self, shelve, fn, key):
shelve.update({key: {'etag': '-1', 'value': 'old-value'}})
assert fn() == 'test'
assert shelve == {key: {'etag': '0', 'value': 'test'}}
class TestGetValidHistoryWithoutCurrent(object):
@pytest.fixture(autouse=True)
def fail_on_warning(self):
warnings.simplefilter('error')
yield
warnings.resetwarnings()
@pytest.fixture(autouse=True)
def history(self, mocker):
mock = mocker.patch('thefuck.shells.shell.get_history')
# Passing as an argument causes `UnicodeDecodeError`
# with newer pytest and python 2.7
mock.return_value = ['le cat', 'fuck', 'ls cat',
'diff x', 'nocommand x', u'cafΓ© Γ΄']
return mock
@pytest.fixture(autouse=True)
def alias(self, mocker):
return mocker.patch('thefuck.utils.get_alias',
return_value='fuck')
@pytest.fixture(autouse=True)
def bins(self, mocker):
callables = list()
for name in ['diff', 'ls', 'cafΓ©']:
bin_mock = mocker.Mock(name=name)
bin_mock.configure_mock(name=name, is_dir=lambda: False)
callables.append(bin_mock)
path_mock = mocker.Mock(iterdir=mocker.Mock(return_value=callables))
return mocker.patch('thefuck.utils.Path', return_value=path_mock)
@pytest.mark.parametrize('script, result', [
('le cat', ['ls cat', 'diff x', u'cafΓ© Γ΄']),
('diff x', ['ls cat', u'cafΓ© Γ΄']),
('fuck', ['ls cat', 'diff x', u'cafΓ© Γ΄']),
(u'cafe Γ΄', ['ls cat', 'diff x', u'cafΓ© Γ΄']),
])
def test_get_valid_history_without_current(self, script, result):
command = Command(script, '')
assert get_valid_history_without_current(command) == result
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/test_argument_parser.py | tests/test_argument_parser.py | import pytest
from thefuck.argument_parser import Parser
from thefuck.const import ARGUMENT_PLACEHOLDER
def _args(**override):
args = {'alias': None, 'command': [], 'yes': False,
'help': False, 'version': False, 'debug': False,
'force_command': None, 'repeat': False,
'enable_experimental_instant_mode': False,
'shell_logger': None}
args.update(override)
return args
@pytest.mark.parametrize('argv, result', [
(['thefuck'], _args()),
(['thefuck', '-a'], _args(alias='fuck')),
(['thefuck', '--alias', '--enable-experimental-instant-mode'],
_args(alias='fuck', enable_experimental_instant_mode=True)),
(['thefuck', '-a', 'fix'], _args(alias='fix')),
(['thefuck', 'git', 'branch', ARGUMENT_PLACEHOLDER, '-y'],
_args(command=['git', 'branch'], yes=True)),
(['thefuck', 'git', 'branch', '-a', ARGUMENT_PLACEHOLDER, '-y'],
_args(command=['git', 'branch', '-a'], yes=True)),
(['thefuck', ARGUMENT_PLACEHOLDER, '-v'], _args(version=True)),
(['thefuck', ARGUMENT_PLACEHOLDER, '--help'], _args(help=True)),
(['thefuck', 'git', 'branch', '-a', ARGUMENT_PLACEHOLDER, '-y', '-d'],
_args(command=['git', 'branch', '-a'], yes=True, debug=True)),
(['thefuck', 'git', 'branch', '-a', ARGUMENT_PLACEHOLDER, '-r', '-d'],
_args(command=['git', 'branch', '-a'], repeat=True, debug=True)),
(['thefuck', '-l', '/tmp/log'], _args(shell_logger='/tmp/log')),
(['thefuck', '--shell-logger', '/tmp/log'],
_args(shell_logger='/tmp/log'))])
def test_parse(argv, result):
assert vars(Parser().parse(argv)) == result
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/test_types.py | tests/test_types.py | # -*- coding: utf-8 -*-
import os
from subprocess import PIPE, STDOUT
from mock import Mock
import pytest
from tests.utils import CorrectedCommand, Rule
from thefuck import const
from thefuck.exceptions import EmptyCommand
from thefuck.system import Path
from thefuck.types import Command
class TestCorrectedCommand(object):
def test_equality(self):
assert (CorrectedCommand('ls', None, 100) ==
CorrectedCommand('ls', None, 200))
assert (CorrectedCommand('ls', None, 100) !=
CorrectedCommand('ls', lambda *_: _, 100))
def test_hashable(self):
assert {CorrectedCommand('ls', None, 100),
CorrectedCommand('ls', None, 200)} == {CorrectedCommand('ls')}
def test_representable(self):
assert '{}'.format(CorrectedCommand('ls', None, 100)) == \
'CorrectedCommand(script=ls, side_effect=None, priority=100)'
assert u'{}'.format(CorrectedCommand(u'echo cafΓ©', None, 100)) == \
u'CorrectedCommand(script=echo cafΓ©, side_effect=None, priority=100)'
@pytest.mark.parametrize('script, printed, override_settings', [
('git branch', 'git branch', {'repeat': False, 'debug': False}),
('git brunch',
"git brunch || fuck --repeat --force-command 'git brunch'",
{'repeat': True, 'debug': False}),
('git brunch',
"git brunch || fuck --repeat --debug --force-command 'git brunch'",
{'repeat': True, 'debug': True})])
def test_run(self, capsys, settings, script, printed, override_settings):
settings.update(override_settings)
CorrectedCommand(script, None, 1000).run(Command(script, ''))
out, _ = capsys.readouterr()
assert out == printed
class TestRule(object):
def test_from_path_rule_exception(self, mocker):
load_source = mocker.patch('thefuck.types.load_source',
side_effect=ImportError("No module named foo..."))
assert Rule.from_path(Path('git.py')) is None
load_source.assert_called_once_with('git', 'git.py')
def test_from_path(self, mocker):
match = object()
get_new_command = object()
load_source = mocker.patch(
'thefuck.types.load_source',
return_value=Mock(match=match,
get_new_command=get_new_command,
enabled_by_default=True,
priority=900,
requires_output=True))
rule_path = os.path.join(os.sep, 'rules', 'bash.py')
assert (Rule.from_path(Path(rule_path))
== Rule('bash', match, get_new_command, priority=900))
load_source.assert_called_once_with('bash', rule_path)
def test_from_path_excluded_rule(self, mocker, settings):
load_source = mocker.patch('thefuck.types.load_source')
settings.update(exclude_rules=['git'])
rule_path = os.path.join(os.sep, 'rules', 'git.py')
assert Rule.from_path(Path(rule_path)) is None
assert not load_source.called
@pytest.mark.parametrize('rules, rule, is_enabled', [
(const.DEFAULT_RULES, Rule('git', enabled_by_default=True), True),
(const.DEFAULT_RULES, Rule('git', enabled_by_default=False), False),
([], Rule('git', enabled_by_default=False), False),
([], Rule('git', enabled_by_default=True), False),
(const.DEFAULT_RULES + ['git'], Rule('git', enabled_by_default=False), True),
(['git'], Rule('git', enabled_by_default=False), True)])
def test_is_enabled(self, settings, rules, rule, is_enabled):
settings.update(rules=rules)
assert rule.is_enabled == is_enabled
def test_isnt_match(self):
assert not Rule('', lambda _: False).is_match(
Command('ls', ''))
def test_is_match(self):
rule = Rule('', lambda x: x.script == 'cd ..')
assert rule.is_match(Command('cd ..', ''))
@pytest.mark.usefixtures('no_colors')
def test_isnt_match_when_rule_failed(self, capsys):
rule = Rule('test', Mock(side_effect=OSError('Denied')),
requires_output=False)
assert not rule.is_match(Command('ls', ''))
assert capsys.readouterr()[1].split('\n')[0] == '[WARN] Rule test:'
def test_get_corrected_commands_with_rule_returns_list(self):
rule = Rule(get_new_command=lambda x: [x.script + '!', x.script + '@'],
priority=100)
assert (list(rule.get_corrected_commands(Command('test', '')))
== [CorrectedCommand(script='test!', priority=100),
CorrectedCommand(script='test@', priority=200)])
def test_get_corrected_commands_with_rule_returns_command(self):
rule = Rule(get_new_command=lambda x: x.script + '!',
priority=100)
assert (list(rule.get_corrected_commands(Command('test', '')))
== [CorrectedCommand(script='test!', priority=100)])
class TestCommand(object):
@pytest.fixture(autouse=True)
def Popen(self, monkeypatch):
Popen = Mock()
Popen.return_value.stdout.read.return_value = b'output'
monkeypatch.setattr('thefuck.output_readers.rerun.Popen', Popen)
return Popen
@pytest.fixture(autouse=True)
def prepare(self, monkeypatch):
monkeypatch.setattr('thefuck.output_readers.rerun._wait_output',
lambda *_: True)
def test_from_script_calls(self, Popen, settings, os_environ):
settings.env = {}
assert Command.from_raw_script(
['apt-get', 'search', 'vim']) == Command(
'apt-get search vim', 'output')
Popen.assert_called_once_with('apt-get search vim',
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
env=os_environ)
@pytest.mark.parametrize('script, result', [
([], None),
([''], None),
(['', ''], None),
(['ls', '-la'], 'ls -la'),
(['ls'], 'ls'),
(['echo \\ '], 'echo \\ '),
(['echo \\\n'], 'echo \\\n')])
def test_from_script(self, script, result):
if result:
assert Command.from_raw_script(script).script == result
else:
with pytest.raises(EmptyCommand):
Command.from_raw_script(script)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/conftest.py | tests/conftest.py | import os
import pytest
from thefuck import shells
from thefuck import conf, const
from thefuck.system import Path
shells.shell = shells.Generic()
def pytest_configure(config):
config.addinivalue_line("markers", "functional: mark test as functional")
def pytest_addoption(parser):
"""Adds `--enable-functional` argument."""
group = parser.getgroup("thefuck")
group.addoption('--enable-functional', action="store_true", default=False,
help="Enable functional tests")
@pytest.fixture
def no_memoize(monkeypatch):
monkeypatch.setattr('thefuck.utils.memoize.disabled', True)
@pytest.fixture(autouse=True)
def settings(request):
def _reset_settings():
conf.settings.clear()
conf.settings.update(const.DEFAULT_SETTINGS)
request.addfinalizer(_reset_settings)
conf.settings.user_dir = Path('~/.thefuck')
return conf.settings
@pytest.fixture
def no_colors(settings):
settings.no_colors = True
@pytest.fixture(autouse=True)
def no_cache(monkeypatch):
monkeypatch.setattr('thefuck.utils.cache.disabled', True)
@pytest.fixture(autouse=True)
def functional(request):
if request.node.get_closest_marker('functional') \
and not request.config.getoption('enable_functional'):
pytest.skip('functional tests are disabled')
@pytest.fixture
def source_root():
return Path(__file__).parent.parent.resolve()
@pytest.fixture
def set_shell(monkeypatch):
def _set(cls):
shell = cls()
monkeypatch.setattr('thefuck.shells.shell', shell)
return shell
return _set
@pytest.fixture(autouse=True)
def os_environ(monkeypatch):
env = {'PATH': os.environ['PATH']}
monkeypatch.setattr('os.environ', env)
return env
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/utils.py | tests/utils.py | from thefuck import types
from thefuck.const import DEFAULT_PRIORITY
class Rule(types.Rule):
def __init__(self, name='', match=lambda *_: True,
get_new_command=lambda *_: '',
enabled_by_default=True,
side_effect=None,
priority=DEFAULT_PRIORITY,
requires_output=True):
super(Rule, self).__init__(name, match, get_new_command,
enabled_by_default, side_effect,
priority, requires_output)
class CorrectedCommand(types.CorrectedCommand):
def __init__(self, script='', side_effect=None, priority=DEFAULT_PRIORITY):
super(CorrectedCommand, self).__init__(
script, side_effect, priority)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/test_conf.py | tests/test_conf.py | import pytest
import six
import os
from mock import Mock
from thefuck import const
@pytest.fixture
def load_source(mocker):
return mocker.patch('thefuck.conf.load_source')
def test_settings_defaults(load_source, settings):
load_source.return_value = object()
settings.init()
for key, val in const.DEFAULT_SETTINGS.items():
assert getattr(settings, key) == val
class TestSettingsFromFile(object):
def test_from_file(self, load_source, settings):
load_source.return_value = Mock(rules=['test'],
wait_command=10,
require_confirmation=True,
no_colors=True,
priority={'vim': 100},
exclude_rules=['git'])
settings.init()
assert settings.rules == ['test']
assert settings.wait_command == 10
assert settings.require_confirmation is True
assert settings.no_colors is True
assert settings.priority == {'vim': 100}
assert settings.exclude_rules == ['git']
def test_from_file_with_DEFAULT(self, load_source, settings):
load_source.return_value = Mock(rules=const.DEFAULT_RULES + ['test'],
wait_command=10,
exclude_rules=[],
require_confirmation=True,
no_colors=True)
settings.init()
assert settings.rules == const.DEFAULT_RULES + ['test']
@pytest.mark.usefixtures('load_source')
class TestSettingsFromEnv(object):
def test_from_env(self, os_environ, settings):
os_environ.update({'THEFUCK_RULES': 'bash:lisp',
'THEFUCK_EXCLUDE_RULES': 'git:vim',
'THEFUCK_WAIT_COMMAND': '55',
'THEFUCK_REQUIRE_CONFIRMATION': 'true',
'THEFUCK_NO_COLORS': 'false',
'THEFUCK_PRIORITY': 'bash=10:lisp=wrong:vim=15',
'THEFUCK_WAIT_SLOW_COMMAND': '999',
'THEFUCK_SLOW_COMMANDS': 'lein:react-native:./gradlew',
'THEFUCK_NUM_CLOSE_MATCHES': '359',
'THEFUCK_EXCLUDED_SEARCH_PATH_PREFIXES': '/media/:/mnt/'})
settings.init()
assert settings.rules == ['bash', 'lisp']
assert settings.exclude_rules == ['git', 'vim']
assert settings.wait_command == 55
assert settings.require_confirmation is True
assert settings.no_colors is False
assert settings.priority == {'bash': 10, 'vim': 15}
assert settings.wait_slow_command == 999
assert settings.slow_commands == ['lein', 'react-native', './gradlew']
assert settings.num_close_matches == 359
assert settings.excluded_search_path_prefixes == ['/media/', '/mnt/']
def test_from_env_with_DEFAULT(self, os_environ, settings):
os_environ.update({'THEFUCK_RULES': 'DEFAULT_RULES:bash:lisp'})
settings.init()
assert settings.rules == const.DEFAULT_RULES + ['bash', 'lisp']
def test_settings_from_args(settings):
settings.init(Mock(yes=True, debug=True, repeat=True))
assert not settings.require_confirmation
assert settings.debug
assert settings.repeat
class TestInitializeSettingsFile(object):
def test_ignore_if_exists(self, settings):
settings_path_mock = Mock(is_file=Mock(return_value=True), open=Mock())
settings.user_dir = Mock(joinpath=Mock(return_value=settings_path_mock))
settings._init_settings_file()
assert settings_path_mock.is_file.call_count == 1
assert not settings_path_mock.open.called
def test_create_if_doesnt_exists(self, settings):
settings_file = six.StringIO()
settings_path_mock = Mock(
is_file=Mock(return_value=False),
open=Mock(return_value=Mock(
__exit__=lambda *args: None, __enter__=lambda *args: settings_file)))
settings.user_dir = Mock(joinpath=Mock(return_value=settings_path_mock))
settings._init_settings_file()
settings_file_contents = settings_file.getvalue()
assert settings_path_mock.is_file.call_count == 1
assert settings_path_mock.open.call_count == 1
assert const.SETTINGS_HEADER in settings_file_contents
for setting in const.DEFAULT_SETTINGS.items():
assert '# {} = {}\n'.format(*setting) in settings_file_contents
settings_file.close()
@pytest.mark.parametrize('legacy_dir_exists, xdg_config_home, result', [
(False, '~/.config', '~/.config/thefuck'),
(False, '/user/test/config/', '/user/test/config/thefuck'),
(True, '~/.config', '~/.thefuck'),
(True, '/user/test/config/', '~/.thefuck')])
def test_get_user_dir_path(mocker, os_environ, settings, legacy_dir_exists,
xdg_config_home, result):
mocker.patch('thefuck.conf.Path.is_dir',
return_value=legacy_dir_exists)
if xdg_config_home is not None:
os_environ['XDG_CONFIG_HOME'] = xdg_config_home
else:
os_environ.pop('XDG_CONFIG_HOME', None)
path = settings._get_user_dir_path().as_posix()
assert path == os.path.expanduser(result)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/test_corrector.py | tests/test_corrector.py | # -*- coding: utf-8 -*-
import pytest
from tests.utils import Rule, CorrectedCommand
from thefuck import corrector, const
from thefuck.system import Path
from thefuck.types import Command
from thefuck.corrector import get_corrected_commands, organize_commands
@pytest.fixture
def glob(mocker):
results = {}
mocker.patch('thefuck.system.Path.glob',
new_callable=lambda: lambda *_: results.pop('value', []))
return lambda value: results.update({'value': value})
class TestGetRules(object):
@pytest.fixture(autouse=True)
def load_source(self, monkeypatch):
monkeypatch.setattr('thefuck.types.load_source',
lambda x, _: Rule(x))
def _compare_names(self, rules, names):
assert {r.name for r in rules} == set(names)
@pytest.mark.parametrize('paths, conf_rules, exclude_rules, loaded_rules', [
(['git.py', 'bash.py'], const.DEFAULT_RULES, [], ['git', 'bash']),
(['git.py', 'bash.py'], ['git'], [], ['git']),
(['git.py', 'bash.py'], const.DEFAULT_RULES, ['git'], ['bash']),
(['git.py', 'bash.py'], ['git'], ['git'], [])])
def test_get_rules(self, glob, settings, paths, conf_rules, exclude_rules,
loaded_rules):
glob([Path(path) for path in paths])
settings.update(rules=conf_rules,
priority={},
exclude_rules=exclude_rules)
rules = corrector.get_rules()
self._compare_names(rules, loaded_rules)
def test_get_rules_rule_exception(mocker, glob):
load_source = mocker.patch('thefuck.types.load_source',
side_effect=ImportError("No module named foo..."))
glob([Path('git.py')])
assert not corrector.get_rules()
load_source.assert_called_once_with('git', 'git.py')
def test_get_corrected_commands(mocker):
command = Command('test', 'test')
rules = [Rule(match=lambda _: False),
Rule(match=lambda _: True,
get_new_command=lambda x: x.script + '!', priority=100),
Rule(match=lambda _: True,
get_new_command=lambda x: [x.script + '@', x.script + ';'],
priority=60)]
mocker.patch('thefuck.corrector.get_rules', return_value=rules)
assert ([cmd.script for cmd in get_corrected_commands(command)]
== ['test!', 'test@', 'test;'])
def test_organize_commands():
"""Ensures that the function removes duplicates and sorts commands."""
commands = [CorrectedCommand('ls'), CorrectedCommand('ls -la', priority=9000),
CorrectedCommand('ls -lh', priority=100),
CorrectedCommand(u'echo cafΓ©', priority=200),
CorrectedCommand('ls -lh', priority=9999)]
assert list(organize_commands(iter(commands))) \
== [CorrectedCommand('ls'), CorrectedCommand('ls -lh', priority=100),
CorrectedCommand(u'echo cafΓ©', priority=200),
CorrectedCommand('ls -la', priority=9000)]
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/__init__.py | tests/__init__.py | python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false | |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/test_logs.py | tests/test_logs.py | import pytest
from thefuck import logs
def test_color(settings):
settings.no_colors = False
assert logs.color('red') == 'red'
settings.no_colors = True
assert logs.color('red') == ''
@pytest.mark.usefixtures('no_colors')
@pytest.mark.parametrize('debug, stderr', [
(True, 'DEBUG: test\n'),
(False, '')])
def test_debug(capsys, settings, debug, stderr):
settings.debug = debug
logs.debug('test')
assert capsys.readouterr() == ('', stderr)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/entrypoints/test_alias.py | tests/entrypoints/test_alias.py | from mock import Mock
import pytest
from thefuck.entrypoints.alias import _get_alias, print_alias
@pytest.mark.parametrize(
'py2, enable_experimental_instant_mode, which, is_instant', [
(False, True, True, True),
(False, False, True, False),
(False, True, False, False),
(True, True, True, False),
(True, True, False, False),
(True, False, True, False)])
def test_get_alias(monkeypatch, mocker, py2,
enable_experimental_instant_mode,
which, is_instant):
monkeypatch.setattr('six.PY2', py2)
args = Mock(
enable_experimental_instant_mode=enable_experimental_instant_mode,
alias='fuck', )
mocker.patch('thefuck.entrypoints.alias.which', return_value=which)
shell = Mock(app_alias=lambda _: 'app_alias',
instant_mode_alias=lambda _: 'instant_mode_alias')
monkeypatch.setattr('thefuck.entrypoints.alias.shell', shell)
alias = _get_alias(args)
if is_instant:
assert alias == 'instant_mode_alias'
else:
assert alias == 'app_alias'
def test_print_alias(mocker):
settings_mock = mocker.patch('thefuck.entrypoints.alias.settings')
_get_alias_mock = mocker.patch('thefuck.entrypoints.alias._get_alias')
known_args = Mock()
print_alias(known_args)
settings_mock.init.assert_called_once_with(known_args)
_get_alias_mock.assert_called_once_with(known_args)
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/entrypoints/test_not_configured.py | tests/entrypoints/test_not_configured.py | import pytest
import json
from six import StringIO
from mock import MagicMock
from thefuck.shells.generic import ShellConfiguration
from thefuck.entrypoints.not_configured import main
@pytest.fixture(autouse=True)
def usage_tracker(mocker):
return mocker.patch(
'thefuck.entrypoints.not_configured._get_not_configured_usage_tracker_path',
new_callable=MagicMock)
@pytest.fixture(autouse=True)
def usage_tracker_io(usage_tracker):
io = StringIO()
usage_tracker.return_value \
.open.return_value \
.__enter__.return_value = io
return io
@pytest.fixture(autouse=True)
def usage_tracker_exists(usage_tracker):
usage_tracker.return_value \
.exists.return_value = True
return usage_tracker.return_value.exists
def _assert_tracker_updated(usage_tracker_io, pid):
usage_tracker_io.seek(0)
info = json.load(usage_tracker_io)
assert info['pid'] == pid
def _change_tracker(usage_tracker_io, pid):
usage_tracker_io.truncate(0)
info = {'pid': pid, 'time': 0}
json.dump(info, usage_tracker_io)
usage_tracker_io.seek(0)
@pytest.fixture(autouse=True)
def shell_pid(mocker):
return mocker.patch('thefuck.entrypoints.not_configured._get_shell_pid',
new_callable=MagicMock)
@pytest.fixture(autouse=True)
def shell(mocker):
shell = mocker.patch('thefuck.entrypoints.not_configured.shell',
new_callable=MagicMock)
shell.get_history.return_value = []
shell.how_to_configure.return_value = ShellConfiguration(
content='eval $(thefuck --alias)',
path='/tmp/.bashrc',
reload='bash',
can_configure_automatically=True)
return shell
@pytest.fixture(autouse=True)
def shell_config(mocker):
path_mock = mocker.patch('thefuck.entrypoints.not_configured.Path',
new_callable=MagicMock)
return path_mock.return_value \
.expanduser.return_value \
.open.return_value \
.__enter__.return_value
@pytest.fixture(autouse=True)
def logs(mocker):
return mocker.patch('thefuck.entrypoints.not_configured.logs',
new_callable=MagicMock)
def test_for_generic_shell(shell, logs):
shell.how_to_configure.return_value = None
main()
logs.how_to_configure_alias.assert_called_once()
def test_on_first_run(usage_tracker_io, usage_tracker_exists, shell_pid, logs):
shell_pid.return_value = 12
main()
usage_tracker_exists.return_value = False
_assert_tracker_updated(usage_tracker_io, 12)
logs.how_to_configure_alias.assert_called_once()
def test_on_run_after_other_commands(usage_tracker_io, shell_pid, shell, logs):
shell_pid.return_value = 12
shell.get_history.return_value = ['fuck', 'ls']
_change_tracker(usage_tracker_io, 12)
main()
logs.how_to_configure_alias.assert_called_once()
def test_on_first_run_from_current_shell(usage_tracker_io, shell_pid,
shell, logs):
shell.get_history.return_value = ['fuck']
shell_pid.return_value = 12
main()
_assert_tracker_updated(usage_tracker_io, 12)
logs.how_to_configure_alias.assert_called_once()
def test_when_cant_configure_automatically(shell_pid, shell, logs):
shell_pid.return_value = 12
shell.how_to_configure.return_value = ShellConfiguration(
content='eval $(thefuck --alias)',
path='/tmp/.bashrc',
reload='bash',
can_configure_automatically=False)
main()
logs.how_to_configure_alias.assert_called_once()
def test_when_already_configured(usage_tracker_io, shell_pid,
shell, shell_config, logs):
shell.get_history.return_value = ['fuck']
shell_pid.return_value = 12
_change_tracker(usage_tracker_io, 12)
shell_config.read.return_value = 'eval $(thefuck --alias)'
main()
logs.already_configured.assert_called_once()
def test_when_successfully_configured(usage_tracker_io, shell_pid,
shell, shell_config, logs):
shell.get_history.return_value = ['fuck']
shell_pid.return_value = 12
_change_tracker(usage_tracker_io, 12)
shell_config.read.return_value = ''
main()
shell_config.write.assert_any_call('eval $(thefuck --alias)')
logs.configured_successfully.assert_called_once()
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/entrypoints/__init__.py | tests/entrypoints/__init__.py | python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false | |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/entrypoints/test_fix_command.py | tests/entrypoints/test_fix_command.py | import pytest
from mock import Mock
from thefuck.entrypoints.fix_command import _get_raw_command
class TestGetRawCommand(object):
def test_from_force_command_argument(self):
known_args = Mock(force_command='git brunch')
assert _get_raw_command(known_args) == ['git brunch']
def test_from_command_argument(self, os_environ):
os_environ['TF_HISTORY'] = None
known_args = Mock(force_command=None,
command=['sl'])
assert _get_raw_command(known_args) == ['sl']
@pytest.mark.parametrize('history, result', [
('git br', 'git br'),
('git br\nfcuk', 'git br'),
('git br\nfcuk\nls', 'ls'),
('git br\nfcuk\nls\nfuk', 'ls')])
def test_from_history(self, os_environ, history, result):
os_environ['TF_HISTORY'] = history
known_args = Mock(force_command=None,
command=None)
assert _get_raw_command(known_args) == [result]
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_workon_doesnt_exists.py | tests/rules/test_workon_doesnt_exists.py | import pytest
from thefuck.rules.workon_doesnt_exists import match, get_new_command
from thefuck.types import Command
@pytest.fixture(autouse=True)
def envs(mocker):
return mocker.patch(
'thefuck.rules.workon_doesnt_exists._get_all_environments',
return_value=['thefuck', 'code_view'])
@pytest.mark.parametrize('script', [
'workon tehfuck', 'workon code-view', 'workon new-env'])
def test_match(script):
assert match(Command(script, ''))
@pytest.mark.parametrize('script', [
'workon thefuck', 'workon code_view', 'work on tehfuck'])
def test_not_match(script):
assert not match(Command(script, ''))
@pytest.mark.parametrize('script, result', [
('workon tehfuck', 'workon thefuck'),
('workon code-view', 'workon code_view'),
('workon zzzz', 'mkvirtualenv zzzz')])
def test_get_new_command(script, result):
assert get_new_command(Command(script, ''))[0] == result
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_go_run.py | tests/rules/test_go_run.py | import pytest
from thefuck.rules.go_run import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('command', [
Command('go run foo', ''),
Command('go run bar', '')])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('go run foo', ''), 'go run foo.go'),
(Command('go run bar', ''), 'go run bar.go')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_dry.py | tests/rules/test_dry.py | import pytest
from thefuck.rules.dry import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('command', [
Command('cd cd foo', ''),
Command('git git push origin/master', '')])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('cd cd foo', ''), 'cd foo'),
(Command('git git push origin/master', ''), 'git push origin/master')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_ls_lah.py | tests/rules/test_ls_lah.py | from thefuck.rules.ls_lah import match, get_new_command
from thefuck.types import Command
def test_match():
assert match(Command('ls', ''))
assert match(Command('ls file.py', ''))
assert match(Command('ls /opt', ''))
assert not match(Command('ls -lah /opt', ''))
assert not match(Command('pacman -S binutils', ''))
assert not match(Command('lsof', ''))
def test_get_new_command():
assert get_new_command(Command('ls file.py', '')) == 'ls -lah file.py'
assert get_new_command(Command('ls', '')) == 'ls -lah'
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_yarn_command_not_found.py | tests/rules/test_yarn_command_not_found.py | # -*- encoding: utf-8 -*-
from io import BytesIO
import pytest
from thefuck.types import Command
from thefuck.rules.yarn_command_not_found import match, get_new_command
output = '''
error Command "{}" not found.
'''.format
yarn_help_stdout = b'''
Usage: yarn [command] [flags]
Options:
-h, --help output usage information
-V, --version output the version number
--verbose output verbose messages on internal operations
--offline trigger an error if any required dependencies are not available in local cache
--prefer-offline use network only if dependencies are not available in local cache
--strict-semver
--json
--ignore-scripts don't run lifecycle scripts
--har save HAR output of network traffic
--ignore-platform ignore platform checks
--ignore-engines ignore engines check
--ignore-optional ignore optional dependencies
--force ignore all caches
--no-bin-links don't generate bin links when setting up packages
--flat only allow one version of a package
--prod, --production [prod]
--no-lockfile don't read or generate a lockfile
--pure-lockfile don't generate a lockfile
--frozen-lockfile don't generate a lockfile and fail if an update is needed
--link-duplicates create hardlinks to the repeated modules in node_modules
--global-folder <path>
--modules-folder <path> rather than installing modules into the node_modules folder relative to the cwd, output them here
--cache-folder <path> specify a custom folder to store the yarn cache
--mutex <type>[:specifier] use a mutex to ensure only one yarn instance is executing
--no-emoji disable emoji in output
--proxy <host>
--https-proxy <host>
--no-progress disable progress bar
--network-concurrency <number> maximum number of concurrent network requests
Commands:
- access
- add
- bin
- cache
- check
- clean
- config
- generate-lock-entry
- global
- import
- info
- init
- install
- licenses
- link
- list
- login
- logout
- outdated
- owner
- pack
- publish
- remove
- run
- tag
- team
- unlink
- upgrade
- upgrade-interactive
- version
- versions
- why
Run `yarn help COMMAND` for more information on specific commands.
Visit https://yarnpkg.com/en/docs/cli/ to learn more about Yarn.
''' # noqa
@pytest.fixture(autouse=True)
def yarn_help(mocker):
patch = mocker.patch('thefuck.rules.yarn_command_not_found.Popen')
patch.return_value.stdout = BytesIO(yarn_help_stdout)
return patch
@pytest.mark.parametrize('command', [
Command('yarn whyy webpack', output('whyy'))])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('npm nuild', output('nuild')),
Command('yarn install', '')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, result', [
(Command('yarn whyy webpack', output('whyy')),
'yarn why webpack'),
(Command('yarn require lodash', output('require')),
'yarn add lodash')])
def test_get_new_command(command, result):
fixed_command = get_new_command(command)
if isinstance(fixed_command, list):
fixed_command = fixed_command[0]
assert fixed_command == result
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_brew_install.py | tests/rules/test_brew_install.py | import pytest
from thefuck.rules.brew_install import match, get_new_command, _get_suggestions
from thefuck.types import Command
@pytest.fixture
def brew_no_available_formula_one():
return '''Warning: No available formula with the name "giss". Did you mean gist?'''
@pytest.fixture
def brew_no_available_formula_two():
return '''Warning: No available formula with the name "elasticserar". Did you mean elasticsearch or elasticsearch@6?'''
@pytest.fixture
def brew_no_available_formula_three():
return '''Warning: No available formula with the name "gitt". Did you mean git, gitg or gist?'''
@pytest.fixture
def brew_install_no_argument():
return '''Install a formula or cask. Additional options specific to a formula may be'''
@pytest.fixture
def brew_already_installed():
return '''Warning: git-2.3.5 already installed'''
def test_suggestions():
assert _get_suggestions("one") == ['one']
assert _get_suggestions("one or two") == ['one', 'two']
assert _get_suggestions("one, two or three") == ['one', 'two', 'three']
def test_match(brew_no_available_formula_one, brew_no_available_formula_two,
brew_no_available_formula_three, brew_already_installed,
brew_install_no_argument):
assert match(Command('brew install giss',
brew_no_available_formula_one))
assert match(Command('brew install elasticserar',
brew_no_available_formula_two))
assert match(Command('brew install gitt',
brew_no_available_formula_three))
assert not match(Command('brew install git',
brew_already_installed))
assert not match(Command('brew install', brew_install_no_argument))
def test_get_new_command(brew_no_available_formula_one, brew_no_available_formula_two,
brew_no_available_formula_three):
assert get_new_command(Command('brew install giss',
brew_no_available_formula_one))\
== ['brew install gist']
assert get_new_command(Command('brew install elasticsear',
brew_no_available_formula_two))\
== ['brew install elasticsearch', 'brew install elasticsearch@6']
assert get_new_command(Command('brew install gitt',
brew_no_available_formula_three))\
== ['brew install git', 'brew install gitg', 'brew install gist']
assert get_new_command(Command('brew install aa',
brew_no_available_formula_one))\
!= 'brew install aha'
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_brew_update_formula.py | tests/rules/test_brew_update_formula.py | import pytest
from thefuck.types import Command
from thefuck.rules.brew_update_formula import get_new_command, match
output = ("Error: This command updates brew itself, and does not take formula"
" names.\nUse `brew upgrade thefuck`.")
def test_match():
command = Command('brew update thefuck', output)
assert match(command)
@pytest.mark.parametrize('script', [
'brew upgrade foo',
'brew update'])
def test_not_match(script):
assert not match(Command(script, ''))
@pytest.mark.parametrize('script, formula, ', [
('brew update foo', 'foo'),
('brew update bar zap', 'bar zap')])
def test_get_new_command(script, formula):
command = Command(script, output)
new_command = 'brew upgrade {}'.format(formula)
assert get_new_command(command) == new_command
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_git_rebase_no_changes.py | tests/rules/test_git_rebase_no_changes.py | import pytest
from thefuck.rules.git_rebase_no_changes import match, get_new_command
from thefuck.types import Command
@pytest.fixture
def output():
return '''Applying: Test commit
No changes - did you forget to use 'git add'?
If there is nothing left to stage, chances are that something else
already introduced the same changes; you might want to skip this patch.
When you have resolved this problem, run "git rebase --continue".
If you prefer to skip this patch, run "git rebase --skip" instead.
To check out the original branch and stop rebasing, run "git rebase --abort".
'''
def test_match(output):
assert match(Command('git rebase --continue', output))
assert not match(Command('git rebase --continue', ''))
assert not match(Command('git rebase --skip', ''))
def test_get_new_command(output):
assert (get_new_command(Command('git rebase --continue', output)) ==
'git rebase --skip')
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_rm_root.py | tests/rules/test_rm_root.py | import pytest
from thefuck.rules.rm_root import match, get_new_command
from thefuck.types import Command
def test_match():
assert match(Command('rm -rf /', 'add --no-preserve-root'))
@pytest.mark.parametrize('command', [
Command('ls', 'add --no-preserve-root'),
Command('rm --no-preserve-root /', 'add --no-preserve-root'),
Command('rm -rf /', '')])
def test_not_match(command):
assert not match(command)
def test_get_new_command():
assert (get_new_command(Command('rm -rf /', ''))
== 'rm -rf / --no-preserve-root')
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_aws_cli.py | tests/rules/test_aws_cli.py | import pytest
from thefuck.rules.aws_cli import match, get_new_command
from thefuck.types import Command
no_suggestions = '''\
usage: aws [options] <command> <subcommand> [<subcommand> ...] [parameters]
To see help text, you can run:
aws help
aws <command> help
aws <command> <subcommand> help
aws: error: argument command: Invalid choice, valid choices are:
dynamodb | dynamodbstreams
ec2 | ecr
'''
misspelled_command = '''\
usage: aws [options] <command> <subcommand> [<subcommand> ...] [parameters]
To see help text, you can run:
aws help
aws <command> help
aws <command> <subcommand> help
aws: error: argument command: Invalid choice, valid choices are:
dynamodb | dynamodbstreams
ec2 | ecr
Invalid choice: 'dynamdb', maybe you meant:
* dynamodb
'''
misspelled_subcommand = '''\
usage: aws [options] <command> <subcommand> [<subcommand> ...] [parameters]
To see help text, you can run:
aws help
aws <command> help
aws <command> <subcommand> help
aws: error: argument operation: Invalid choice, valid choices are:
query | scan
update-item | update-table
Invalid choice: 'scn', maybe you meant:
* scan
'''
misspelled_subcommand_with_multiple_options = '''\
usage: aws [options] <command> <subcommand> [<subcommand> ...] [parameters]
To see help text, you can run:
aws help
aws <command> help
aws <command> <subcommand> help
aws: error: argument operation: Invalid choice, valid choices are:
describe-table | get-item
list-tables | put-item
Invalid choice: 't-item', maybe you meant:
* put-item
* get-item
'''
@pytest.mark.parametrize('command', [
Command('aws dynamdb scan', misspelled_command),
Command('aws dynamodb scn', misspelled_subcommand),
Command('aws dynamodb t-item',
misspelled_subcommand_with_multiple_options)])
def test_match(command):
assert match(command)
def test_not_match():
assert not match(Command('aws dynamodb invalid', no_suggestions))
@pytest.mark.parametrize('command, result', [
(Command('aws dynamdb scan', misspelled_command),
['aws dynamodb scan']),
(Command('aws dynamodb scn', misspelled_subcommand),
['aws dynamodb scan']),
(Command('aws dynamodb t-item',
misspelled_subcommand_with_multiple_options),
['aws dynamodb put-item', 'aws dynamodb get-item'])])
def test_get_new_command(command, result):
assert get_new_command(command) == result
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_fix_file.py | tests/rules/test_fix_file.py | # -*- coding: utf-8 -*-
import pytest
import os
from collections import namedtuple
from thefuck.rules.fix_file import match, get_new_command
from thefuck.types import Command
FixFileTest = namedtuple('FixFileTest', ['script', 'file', 'line', 'col', 'output'])
tests = (
FixFileTest('gcc a.c', 'a.c', 3, 1, """
a.c: In function 'main':
a.c:3:1: error: expected expression before '}' token
}
^
"""),
FixFileTest('clang a.c', 'a.c', 3, 1, """
a.c:3:1: error: expected expression
}
^
"""),
FixFileTest('perl a.pl', 'a.pl', 3, None, """
syntax error at a.pl line 3, at EOF
Execution of a.pl aborted due to compilation errors.
"""),
FixFileTest('perl a.pl', 'a.pl', 2, None, """
Search pattern not terminated at a.pl line 2.
"""),
FixFileTest('sh a.sh', 'a.sh', 2, None, """
a.sh: line 2: foo: command not found
"""),
FixFileTest('zsh a.sh', 'a.sh', 2, None, """
a.sh:2: command not found: foo
"""),
FixFileTest('bash a.sh', 'a.sh', 2, None, """
a.sh: line 2: foo: command not found
"""),
FixFileTest('rustc a.rs', 'a.rs', 2, 5, """
a.rs:2:5: 2:6 error: unexpected token: `+`
a.rs:2 +
^
"""),
FixFileTest('cargo build', 'src/lib.rs', 3, 5, """
Compiling test v0.1.0 (file:///tmp/fix-error/test)
src/lib.rs:3:5: 3:6 error: unexpected token: `+`
src/lib.rs:3 +
^
Could not compile `test`.
To learn more, run the command again with --verbose.
"""),
FixFileTest('python a.py', 'a.py', 2, None, """
File "a.py", line 2
+
^
SyntaxError: invalid syntax
"""),
FixFileTest('python a.py', 'a.py', 8, None, """
Traceback (most recent call last):
File "a.py", line 8, in <module>
match("foo")
File "a.py", line 5, in match
m = re.search(None, command)
File "/usr/lib/python3.4/re.py", line 170, in search
return _compile(pattern, flags).search(string)
File "/usr/lib/python3.4/re.py", line 293, in _compile
raise TypeError("first argument must be string or compiled pattern")
TypeError: first argument must be string or compiled pattern
"""),
FixFileTest(u'python cafΓ©.py', u'cafΓ©.py', 8, None, u"""
Traceback (most recent call last):
File "cafΓ©.py", line 8, in <module>
match("foo")
File "cafΓ©.py", line 5, in match
m = re.search(None, command)
File "/usr/lib/python3.4/re.py", line 170, in search
return _compile(pattern, flags).search(string)
File "/usr/lib/python3.4/re.py", line 293, in _compile
raise TypeError("first argument must be string or compiled pattern")
TypeError: first argument must be string or compiled pattern
"""),
FixFileTest('ruby a.rb', 'a.rb', 3, None, """
a.rb:3: syntax error, unexpected keyword_end
"""),
FixFileTest('lua a.lua', 'a.lua', 2, None, """
lua: a.lua:2: unexpected symbol near '+'
"""),
FixFileTest('fish a.sh', '/tmp/fix-error/a.sh', 2, None, """
fish: Unknown command 'foo'
/tmp/fix-error/a.sh (line 2): foo
^
"""),
FixFileTest('./a', './a', 2, None, """
awk: ./a:2: BEGIN { print "Hello, world!" + }
awk: ./a:2: ^ syntax error
"""),
FixFileTest('llc a.ll', 'a.ll', 1, 2, """
llc: a.ll:1:2: error: expected top-level entity
+
^
"""),
FixFileTest('go build a.go', 'a.go', 1, 2, """
can't load package:
a.go:1:2: expected 'package', found '+'
"""),
FixFileTest('make', 'Makefile', 2, None, """
bidule
make: bidule: Command not found
Makefile:2: recipe for target 'target' failed
make: *** [target] Error 127
"""),
FixFileTest('git st', '/home/martin/.config/git/config', 1, None, """
fatal: bad config file line 1 in /home/martin/.config/git/config
"""),
FixFileTest('node fuck.js asdf qwer', '/Users/pablo/Workspace/barebones/fuck.js', '2', 5, """
/Users/pablo/Workspace/barebones/fuck.js:2
conole.log(arg); // this should read console.log(arg);
^
ReferenceError: conole is not defined
at /Users/pablo/Workspace/barebones/fuck.js:2:5
at Array.forEach (native)
at Object.<anonymous> (/Users/pablo/Workspace/barebones/fuck.js:1:85)
at Module._compile (module.js:460:26)
at Object.Module._extensions..js (module.js:478:10)
at Module.load (module.js:355:32)
at Function.Module._load (module.js:310:12)
at Function.Module.runMain (module.js:501:10)
at startup (node.js:129:16)
at node.js:814:3
"""),
FixFileTest('pep8', './tests/rules/test_systemctl.py', 17, 80, """
./tests/rules/test_systemctl.py:17:80: E501 line too long (93 > 79 characters)
./tests/rules/test_systemctl.py:18:80: E501 line too long (103 > 79 characters)
./tests/rules/test_whois.py:20:80: E501 line too long (89 > 79 characters)
./tests/rules/test_whois.py:22:80: E501 line too long (83 > 79 characters)
"""),
FixFileTest('pytest', '/home/thefuck/tests/rules/test_fix_file.py', 218, None, """
monkeypatch = <_pytest.monkeypatch.monkeypatch object at 0x7fdb76a25b38>
test = ('fish a.sh', '/tmp/fix-error/a.sh', 2, None, '', "\\nfish: Unknown command 'foo'\\n/tmp/fix-error/a.sh (line 2): foo\\n ^\\n")
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_get_new_command(monkeypatch, test):
> mocker.patch('os.path.isfile', return_value=True)
E NameError: name 'mocker' is not defined
/home/thefuck/tests/rules/test_fix_file.py:218: NameError
"""),
)
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_match(mocker, monkeypatch, test):
mocker.patch('os.path.isfile', return_value=True)
monkeypatch.setenv('EDITOR', 'dummy_editor')
assert match(Command('', test.output))
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_no_editor(mocker, monkeypatch, test):
mocker.patch('os.path.isfile', return_value=True)
if 'EDITOR' in os.environ:
monkeypatch.delenv('EDITOR')
assert not match(Command('', test.output))
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_not_file(mocker, monkeypatch, test):
mocker.patch('os.path.isfile', return_value=False)
monkeypatch.setenv('EDITOR', 'dummy_editor')
assert not match(Command('', test.output))
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_get_new_command(mocker, monkeypatch, test):
mocker.patch('os.path.isfile', return_value=True)
monkeypatch.setenv('EDITOR', 'dummy_editor')
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_get_new_command_with_settings(mocker, monkeypatch, test, settings):
mocker.patch('os.path.isfile', return_value=True)
monkeypatch.setenv('EDITOR', 'dummy_editor')
cmd = Command(test.script, test.output)
settings.fixcolcmd = '{editor} {file} +{line}:{col}'
if test.col:
assert (get_new_command(cmd) ==
u'dummy_editor {} +{}:{} && {}'.format(test.file, test.line, test.col, test.script))
else:
assert (get_new_command(cmd) ==
u'dummy_editor {} +{} && {}'.format(test.file, test.line, test.script))
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_sed_unterminated_s.py | tests/rules/test_sed_unterminated_s.py | import pytest
from thefuck.rules.sed_unterminated_s import match, get_new_command
from thefuck.types import Command
@pytest.fixture
def sed_unterminated_s():
return "sed: -e expression #1, char 9: unterminated `s' command"
def test_match(sed_unterminated_s):
assert match(Command('sed -e s/foo/bar', sed_unterminated_s))
assert match(Command('sed -es/foo/bar', sed_unterminated_s))
assert match(Command('sed -e s/foo/bar -e s/baz/quz', sed_unterminated_s))
assert not match(Command('sed -e s/foo/bar', ''))
assert not match(Command('sed -es/foo/bar', ''))
assert not match(Command('sed -e s/foo/bar -e s/baz/quz', ''))
def test_get_new_command(sed_unterminated_s):
assert (get_new_command(Command('sed -e s/foo/bar', sed_unterminated_s))
== 'sed -e s/foo/bar/')
assert (get_new_command(Command('sed -es/foo/bar', sed_unterminated_s))
== 'sed -es/foo/bar/')
assert (get_new_command(Command(r"sed -e 's/\/foo/bar'", sed_unterminated_s))
== r"sed -e 's/\/foo/bar/'")
assert (get_new_command(Command(r"sed -e s/foo/bar -es/baz/quz", sed_unterminated_s))
== r"sed -e s/foo/bar/ -es/baz/quz/")
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_cargo_no_command.py | tests/rules/test_cargo_no_command.py | import pytest
from thefuck.rules.cargo_no_command import match, get_new_command
from thefuck.types import Command
no_such_subcommand_old = """No such subcommand
Did you mean `build`?
"""
no_such_subcommand = """error: no such subcommand
\tDid you mean `build`?
"""
@pytest.mark.parametrize('command', [
Command('cargo buid', no_such_subcommand_old),
Command('cargo buils', no_such_subcommand)])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('cargo buid', no_such_subcommand_old), 'cargo build'),
(Command('cargo buils', no_such_subcommand), 'cargo build')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_git_add_force.py | tests/rules/test_git_add_force.py | import pytest
from thefuck.rules.git_add_force import match, get_new_command
from thefuck.types import Command
@pytest.fixture
def output():
return ('The following paths are ignored by one of your .gitignore files:\n'
'dist/app.js\n'
'dist/background.js\n'
'dist/options.js\n'
'Use -f if you really want to add them.\n')
def test_match(output):
assert match(Command('git add dist/*.js', output))
assert not match(Command('git add dist/*.js', ''))
def test_get_new_command(output):
assert (get_new_command(Command('git add dist/*.js', output))
== "git add --force dist/*.js")
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_pip_install.py | tests/rules/test_pip_install.py | # -*- coding: UTF-8 -*-
from thefuck.rules.pip_install import match, get_new_command
from thefuck.types import Command
def test_match():
response1 = """
Could not install packages due to an EnvironmentError: [Errno 13] Permission denied: '/Library/Python/2.7/site-packages/entrypoints.pyc'
Consider using the `--user` option or check the permissions.
"""
assert match(Command('pip install -r requirements.txt', response1))
response2 = """
Collecting bacon
Downloading https://files.pythonhosted.org/packages/b2/81/19fb79139ee71c8bc4e5a444546f318e2b87253b8939ec8a7e10d63b7341/bacon-0.3.1.zip (11.0MB)
100% |ββββββββββββββββββββββββββββββββ| 11.0MB 3.0MB/s
Installing collected packages: bacon
Running setup.py install for bacon ... done
Successfully installed bacon-0.3.1
"""
assert not match(Command('pip install bacon', response2))
def test_get_new_command():
assert get_new_command(Command('pip install -r requirements.txt', '')) == 'pip install --user -r requirements.txt'
assert get_new_command(Command('pip install bacon', '')) == 'pip install --user bacon'
assert get_new_command(Command('pip install --user -r requirements.txt', '')) == 'sudo pip install -r requirements.txt'
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_npm_wrong_command.py | tests/rules/test_npm_wrong_command.py | import pytest
from thefuck.rules.npm_wrong_command import match, get_new_command
from thefuck.types import Command
output = '''
Usage: npm <command>
where <command> is one of:
access, add-user, adduser, apihelp, author, bin, bugs, c,
cache, completion, config, ddp, dedupe, deprecate, dist-tag,
dist-tags, docs, edit, explore, faq, find, find-dupes, get,
help, help-search, home, i, info, init, install, issues, la,
link, list, ll, ln, login, logout, ls, outdated, owner,
pack, ping, prefix, prune, publish, r, rb, rebuild, remove,
repo, restart, rm, root, run-script, s, se, search, set,
show, shrinkwrap, star, stars, start, stop, t, tag, team,
test, tst, un, uninstall, unlink, unpublish, unstar, up,
update, upgrade, v, verison, version, view, whoami
npm <cmd> -h quick help on <cmd>
npm -l display full usage info
npm faq commonly asked questions
npm help <term> search for help on <term>
npm help npm involved overview
Specify configs in the ini-formatted file:
/home/nvbn/.npmrc
or on the command line via: npm <command> --key value
Config info can be viewed via: npm help config
npm@2.14.7 /opt/node/lib/node_modules/npm
'''
@pytest.mark.parametrize('script', [
'npm urgrdae',
'npm urgrade -g',
'npm -f urgrade -g',
'npm urg'])
def test_match(script):
assert match(Command(script, output))
@pytest.mark.parametrize('script, output', [
('npm urgrade', ''),
('npm', output),
('test urgrade', output),
('npm -e', output)])
def test_not_match(script, output):
assert not match(Command(script, output))
@pytest.mark.parametrize('script, result', [
('npm urgrade', 'npm upgrade'),
('npm -g isntall gulp', 'npm -g install gulp'),
('npm isntall -g gulp', 'npm install -g gulp')])
def test_get_new_command(script, result):
assert get_new_command(Command(script, output)) == result
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_git_pull_uncommitted_changes.py | tests/rules/test_git_pull_uncommitted_changes.py | import pytest
from thefuck.rules.git_pull_uncommitted_changes import match, get_new_command
from thefuck.types import Command
@pytest.fixture
def output():
return '''error: Cannot pull with rebase: You have unstaged changes.'''
def test_match(output):
assert match(Command('git pull', output))
assert not match(Command('git pull', ''))
assert not match(Command('ls', output))
def test_get_new_command(output):
assert (get_new_command(Command('git pull', output))
== "git stash && git pull && git stash pop")
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
nvbn/thefuck | https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/tests/rules/test_npm_run_script.py | tests/rules/test_npm_run_script.py | import pytest
from io import BytesIO
from thefuck.rules.npm_run_script import match, get_new_command
from thefuck.types import Command
output = '''
Usage: npm <command>
where <command> is one of:
access, add-user, adduser, apihelp, author, bin, bugs, c,
cache, completion, config, ddp, dedupe, deprecate, dist-tag,
dist-tags, docs, edit, explore, faq, find, find-dupes, get,
help, help-search, home, i, info, init, install, issues, la,
link, list, ll, ln, login, logout, ls, outdated, owner,
pack, ping, prefix, prune, publish, r, rb, rebuild, remove,
repo, restart, rm, root, run-script, s, se, search, set,
show, shrinkwrap, star, stars, start, stop, t, tag, team,
test, tst, un, uninstall, unlink, unpublish, unstar, up,
update, upgrade, v, version, view, whoami
npm <cmd> -h quick help on <cmd>
npm -l display full usage info
npm faq commonly asked questions
npm help <term> search for help on <term>
npm help npm involved overview
Specify configs in the ini-formatted file:
/home/nvbn/.npmrc
or on the command line via: npm <command> --key value
Config info can be viewed via: npm help config
'''
run_script_stdout = b'''
Lifecycle scripts included in code-view-web:
test
jest
available via `npm run-script`:
build
cp node_modules/ace-builds/src-min/ -a resources/ace/ && webpack --progress --colors -p --config ./webpack.production.config.js
develop
cp node_modules/ace-builds/src/ -a resources/ace/ && webpack-dev-server --progress --colors
watch-test
jest --verbose --watch
'''
@pytest.fixture(autouse=True)
def run_script(mocker):
patch = mocker.patch('thefuck.specific.npm.Popen')
patch.return_value.stdout = BytesIO(run_script_stdout)
return patch.return_value
@pytest.mark.usefixtures('no_memoize')
@pytest.mark.parametrize('script', [
'npm watch-test', 'npm develop'])
def test_match(script):
command = Command(script, output)
assert match(command)
@pytest.mark.usefixtures('no_memoize')
@pytest.mark.parametrize('command, run_script_out', [
(Command('npm test', 'TEST FAIL'), run_script_stdout),
(Command('npm watch-test', 'TEST FAIL'), run_script_stdout),
(Command('npm test', output), run_script_stdout),
(Command('vim watch-test', output), run_script_stdout)])
def test_not_match(run_script, command, run_script_out):
run_script.stdout = BytesIO(run_script_out)
assert not match(command)
@pytest.mark.usefixtures('no_memoize')
@pytest.mark.parametrize('script, result', [
('npm watch-test', 'npm run-script watch-test'),
('npm -i develop', 'npm run-script -i develop'),
('npm -i watch-script --path ..',
'npm run-script -i watch-script --path ..')])
def test_get_new_command(script, result):
command = Command(script, output)
assert get_new_command(command) == result
| python | MIT | c7e7e1d884d3bb241ea6448f72a989434c2a35ec | 2026-01-04T14:38:15.457096Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.