sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
frappe/erpnext:erpnext/patches/v16_0/add_new_stock_entry_types.py | import frappe
def execute():
for stock_entry_type in [
"Receive from Customer",
"Return Raw Material to Customer",
"Subcontracting Delivery",
"Subcontracting Return",
]:
if not frappe.db.exists("Stock Entry Type", stock_entry_type):
frappe.new_doc("Stock Entry Type", purpose=stock_entry_type, is_standard=1).insert(
set_name=stock_entry_type, ignore_permissions=True
)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v16_0/add_new_stock_entry_types.py",
"license": "GNU General Public License v3.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v16_0/rename_subcontracted_quantity.py | import frappe
from frappe.model.utils.rename_field import rename_field
def execute():
if frappe.db.has_column("Purchase Order Item", "subcontracted_quantity"):
rename_field("Purchase Order Item", "subcontracted_quantity", "subcontracted_qty")
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v16_0/rename_subcontracted_quantity.py",
"license": "GNU General Public License v3.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/subcontracting/doctype/subcontracting_inward_order/subcontracting_inward_order.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from frappe.utils import comma_and, flt, get_link_to_form
from erpnext.buying.utils import check_on_hold_or_closed_status
from erpnext.controllers.subcontracting_controller import SubcontractingController
class SubcontractingInwardOrder(SubcontractingController):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
from erpnext.subcontracting.doctype.subcontracting_inward_order_item.subcontracting_inward_order_item import (
SubcontractingInwardOrderItem,
)
from erpnext.subcontracting.doctype.subcontracting_inward_order_received_item.subcontracting_inward_order_received_item import (
SubcontractingInwardOrderReceivedItem,
)
from erpnext.subcontracting.doctype.subcontracting_inward_order_scrap_item.subcontracting_inward_order_scrap_item import (
SubcontractingInwardOrderScrapItem,
)
from erpnext.subcontracting.doctype.subcontracting_inward_order_service_item.subcontracting_inward_order_service_item import (
SubcontractingInwardOrderServiceItem,
)
amended_from: DF.Link | None
company: DF.Link
currency: DF.Link | None
customer: DF.Link
customer_name: DF.Data
customer_warehouse: DF.Link
items: DF.Table[SubcontractingInwardOrderItem]
naming_series: DF.Literal["SCI-ORD-.YYYY.-"]
per_delivered: DF.Percent
per_process_loss: DF.Percent
per_produced: DF.Percent
per_raw_material_received: DF.Percent
per_raw_material_returned: DF.Percent
per_returned: DF.Percent
received_items: DF.Table[SubcontractingInwardOrderReceivedItem]
sales_order: DF.Link
scrap_items: DF.Table[SubcontractingInwardOrderScrapItem]
service_items: DF.Table[SubcontractingInwardOrderServiceItem]
set_delivery_warehouse: DF.Link | None
status: DF.Literal[
"Draft", "Open", "Ongoing", "Produced", "Delivered", "Returned", "Cancelled", "Closed"
]
title: DF.Data | None
transaction_date: DF.Date
# end: auto-generated types
pass
def validate(self):
super().validate()
self.set_is_customer_provided_item()
self.validate_customer_provided_items()
self.validate_customer_warehouse()
self.validate_service_items()
self.set_missing_values()
def on_submit(self):
self.update_status()
self.update_subcontracted_quantity_in_so()
def on_cancel(self):
self.update_status()
self.update_subcontracted_quantity_in_so()
def update_status(self, status=None, update_modified=True):
if self.status == "Closed" and self.status != status:
check_on_hold_or_closed_status("Sales Order", self.sales_order)
total_to_be_received = total_received = total_rm_returned = 0
for rm in self.get("received_items"):
if rm.get("is_customer_provided_item"):
total_to_be_received += flt(rm.required_qty)
total_received += flt(rm.received_qty)
total_rm_returned += flt(rm.returned_qty)
total_to_be_produced = total_produced = total_process_loss = total_delivered = total_fg_returned = 0
for item in self.get("items"):
total_to_be_produced += flt(item.qty)
total_produced += flt(item.produced_qty)
total_process_loss += flt(item.process_loss_qty)
total_delivered += flt(item.delivered_qty)
total_fg_returned += flt(item.returned_qty)
per_raw_material_received = flt(total_received / total_to_be_received * 100, 2)
per_raw_material_returned = flt(total_rm_returned / total_received * 100, 2) if total_received else 0
per_produced = flt(total_produced / total_to_be_produced * 100, 2)
per_process_loss = flt(total_process_loss / total_produced * 100, 2) if total_produced else 0
per_delivered = flt(total_delivered / total_to_be_produced * 100, 2)
per_returned = flt(total_fg_returned / total_delivered * 100, 2) if total_delivered else 0
self.db_set("per_raw_material_received", per_raw_material_received, update_modified=update_modified)
self.db_set("per_raw_material_returned", per_raw_material_returned, update_modified=update_modified)
self.db_set("per_produced", per_produced, update_modified=update_modified)
self.db_set("per_process_loss", per_process_loss, update_modified=update_modified)
self.db_set("per_delivered", per_delivered, update_modified=update_modified)
self.db_set("per_returned", per_returned, update_modified=update_modified)
if self.docstatus >= 1 and not status:
if self.docstatus == 1:
if self.status == "Draft":
status = "Open"
elif self.per_returned == 100:
status = "Returned"
elif self.per_delivered == 100:
status = "Delivered"
elif self.per_produced == 100:
status = "Produced"
elif self.per_raw_material_received > 0:
status = "Ongoing"
else:
status = "Open"
elif self.docstatus == 2:
status = "Cancelled"
if status and self.status != status:
self.db_set("status", status, update_modified=update_modified)
def update_subcontracted_quantity_in_so(self):
for service_item in self.service_items:
doc = frappe.get_doc("Sales Order Item", service_item.sales_order_item)
doc.subcontracted_qty = (
(doc.subcontracted_qty + service_item.qty)
if self._action == "submit"
else (doc.subcontracted_qty - service_item.qty)
)
doc.save()
def validate_customer_warehouse(self):
if frappe.get_cached_value("Warehouse", self.customer_warehouse, "customer") != self.customer:
frappe.throw(
_("Customer Warehouse {0} does not belong to Customer {1}.").format(
frappe.bold(self.customer_warehouse), frappe.bold(self.customer)
)
)
def validate_service_items(self):
sales_order_items = [item.sales_order_item for item in self.items]
self.service_items = [
service_item
for service_item in self.service_items
if service_item.sales_order_item in sales_order_items
]
for service_item in self.service_items:
item = next(item for item in self.items if item.sales_order_item == service_item.sales_order_item)
service_item.qty = item.qty * item.subcontracting_conversion_factor
service_item.fg_item_qty = item.qty
service_item.amount = service_item.qty * service_item.rate
def populate_items_table(self):
items = []
for si in self.service_items:
if si.fg_item:
item = frappe.get_doc("Item", si.fg_item)
so_item = frappe.get_doc("Sales Order Item", si.sales_order_item)
available_qty = so_item.stock_qty - so_item.subcontracted_qty
if available_qty == 0:
continue
si.required_qty = available_qty
conversion_factor = so_item.stock_qty / so_item.fg_item_qty
si.fg_item_qty = flt(
available_qty / conversion_factor, frappe.get_precision("Sales Order Item", "qty")
)
si.amount = available_qty * si.rate
bom = (
frappe.db.get_value(
"Subcontracting BOM",
{"finished_good": item.name, "is_active": 1},
"finished_good_bom",
)
or item.default_bom
)
items.append(
{
"item_code": item.name,
"item_name": item.item_name,
"expected_delivery_date": frappe.get_cached_value(
"Sales Order Item", si.sales_order_item, "delivery_date"
),
"description": item.description,
"qty": si.fg_item_qty,
"subcontracting_conversion_factor": conversion_factor,
"stock_uom": item.stock_uom,
"bom": bom,
"sales_order_item": si.sales_order_item,
}
)
else:
frappe.throw(
_("Please select Finished Good Item for Service Item {0}").format(
si.item_name or si.item_code
)
)
if items:
for item in items:
self.append("items", item)
def validate_customer_provided_items(self):
"""Check if atleast one raw material is customer provided"""
for item in self.get("items"):
raw_materials = [rm for rm in self.get("received_items") if rm.main_item_code == item.item_code]
if not any([rm.is_customer_provided_item for rm in raw_materials]):
frappe.throw(
_(
"Atleast one raw material for Finished Good Item {0} should be customer provided."
).format(frappe.bold(item.item_code))
)
def set_is_customer_provided_item(self):
for item in self.get("received_items"):
item.is_customer_provided_item = frappe.get_cached_value(
"Item", item.rm_item_code, "is_customer_provided_item"
)
@frappe.whitelist()
def make_work_order(self):
"""Create Work Order from Subcontracting Inward Order."""
wo_list = []
for item in self.get_production_items():
work_order = self.create_work_order(item)
if work_order:
wo_list.append(work_order)
self.show_list_created_message("Work Order", wo_list)
if not wo_list:
frappe.msgprint(_("No Work Orders were created"))
return wo_list
def get_production_items(self):
item_list = []
for d in self.items:
if d.produced_qty >= d.qty:
continue
item_details = {
"production_item": d.item_code,
"use_multi_level_bom": d.include_exploded_items,
"subcontracting_inward_order": self.name,
"bom_no": d.bom,
"stock_uom": d.stock_uom,
"company": self.company,
"project": frappe.get_cached_value("Sales Order", self.sales_order, "project"),
"source_warehouse": self.customer_warehouse,
"subcontracting_inward_order_item": d.name,
"reserve_stock": 1,
"fg_warehouse": d.delivery_warehouse,
}
qty = min(
[
flt(
(item.received_qty - item.returned_qty - item.work_order_qty)
/ flt(item.required_qty / d.qty, d.precision("qty")),
d.precision("qty"),
)
for item in self.get("received_items")
if item.reference_name == d.name and item.is_customer_provided_item and item.required_qty
]
)
qty = min(
int(qty) if frappe.get_cached_value("UOM", d.stock_uom, "must_be_whole_number") else qty,
d.qty - d.produced_qty,
)
item_details.update({"qty": qty, "max_producible_qty": qty})
item_list.append(item_details)
return item_list
def create_work_order(self, item):
from erpnext.manufacturing.doctype.work_order.work_order import OverProductionError
if flt(item.get("qty")) <= 0:
return
wo = frappe.new_doc("Work Order")
wo.update(item)
wo.set_work_order_operations()
wo.set_required_items()
try:
wo.flags.ignore_mandatory = True
wo.flags.ignore_validate = True
wo.insert()
return wo.name
except OverProductionError:
pass
def show_list_created_message(self, doctype, doc_list=None):
if not doc_list:
return
frappe.flags.mute_messages = False
if doc_list:
doc_list = [get_link_to_form(doctype, p) for p in doc_list]
frappe.msgprint(_("{0} created").format(comma_and(doc_list)))
@frappe.whitelist()
def make_rm_stock_entry_inward(self, target_doc: Document | str | None = None):
def calculate_qty_as_per_bom(rm_item):
data = frappe.get_value(
"Subcontracting Inward Order Item",
{"name": rm_item.reference_name},
["process_loss_qty", "include_exploded_items"],
as_dict=True,
)
stock_qty = frappe.get_value(
"BOM Explosion Item" if data.include_exploded_items else "BOM Item",
{"name": rm_item.bom_detail_no},
"stock_qty",
)
qty = flt(
stock_qty * data.process_loss_qty,
frappe.get_precision("Subcontracting Inward Order Received Item", "required_qty"),
)
return rm_item.required_qty - rm_item.received_qty + rm_item.returned_qty + qty
if target_doc and target_doc.get("items"):
target_doc.items = []
stock_entry = get_mapped_doc(
"Subcontracting Inward Order",
self.name,
{
"Subcontracting Inward Order": {
"doctype": "Stock Entry",
"validation": {
"docstatus": ["=", 1],
},
},
},
target_doc,
ignore_child_tables=True,
)
stock_entry.purpose = "Receive from Customer"
stock_entry.subcontracting_inward_order = self.name
stock_entry.set_stock_entry_type()
for rm_item in self.received_items:
if not rm_item.required_qty or not rm_item.is_customer_provided_item:
continue
items_dict = {
rm_item.get("rm_item_code"): {
"scio_detail": rm_item.get("name"),
"qty": calculate_qty_as_per_bom(rm_item),
"to_warehouse": rm_item.get("warehouse"),
"stock_uom": rm_item.get("stock_uom"),
}
}
stock_entry.add_to_stock_entry_detail(items_dict)
if target_doc:
return stock_entry
else:
return stock_entry.as_dict()
@frappe.whitelist()
def make_rm_return(self, target_doc: Document | str | None = None):
if target_doc and target_doc.get("items"):
target_doc.items = []
stock_entry = get_mapped_doc(
"Subcontracting Inward Order",
self.name,
{
"Subcontracting Inward Order": {
"doctype": "Stock Entry",
"validation": {
"docstatus": ["=", 1],
},
},
},
target_doc,
ignore_child_tables=True,
)
stock_entry.purpose = "Return Raw Material to Customer"
stock_entry.set_stock_entry_type()
stock_entry.subcontracting_inward_order = self.name
for rm_item in self.received_items:
items_dict = {
rm_item.get("rm_item_code"): {
"scio_detail": rm_item.get("name"),
"qty": rm_item.received_qty - rm_item.work_order_qty - rm_item.returned_qty,
"from_warehouse": rm_item.get("warehouse"),
"stock_uom": rm_item.get("stock_uom"),
}
}
stock_entry.add_to_stock_entry_detail(items_dict)
if target_doc:
return stock_entry
else:
return stock_entry.as_dict()
@frappe.whitelist()
def make_subcontracting_delivery(self, target_doc: Document | str | None = None):
if target_doc and target_doc.get("items"):
target_doc.items = []
stock_entry = get_mapped_doc(
"Subcontracting Inward Order",
self.name,
{
"Subcontracting Inward Order": {
"doctype": "Stock Entry",
"validation": {
"docstatus": ["=", 1],
},
},
},
target_doc,
ignore_child_tables=True,
)
stock_entry.purpose = "Subcontracting Delivery"
stock_entry.set_stock_entry_type()
stock_entry.subcontracting_inward_order = self.name
scio_details = []
allow_over = frappe.get_single_value("Selling Settings", "allow_delivery_of_overproduced_qty")
for fg_item in self.items:
qty = (
fg_item.produced_qty
if allow_over
else min(fg_item.qty, fg_item.produced_qty) - fg_item.delivered_qty
)
if qty < 0:
continue
scio_details.append(fg_item.name)
items_dict = {
fg_item.item_code: {
"qty": qty,
"from_warehouse": fg_item.delivery_warehouse,
"stock_uom": fg_item.stock_uom,
"scio_detail": fg_item.name,
"is_finished_item": 1,
}
}
stock_entry.add_to_stock_entry_detail(items_dict)
if (
frappe.get_single_value("Selling Settings", "deliver_scrap_items")
and self.scrap_items
and scio_details
):
scrap_items = [
scrap_item for scrap_item in self.scrap_items if scrap_item.reference_name in scio_details
]
for scrap_item in scrap_items:
qty = scrap_item.produced_qty - scrap_item.delivered_qty
if qty > 0:
items_dict = {
scrap_item.item_code: {
"qty": scrap_item.produced_qty - scrap_item.delivered_qty,
"from_warehouse": scrap_item.warehouse,
"stock_uom": scrap_item.stock_uom,
"scio_detail": scrap_item.name,
"is_scrap_item": 1,
}
}
stock_entry.add_to_stock_entry_detail(items_dict)
if target_doc:
return stock_entry
else:
return stock_entry.as_dict()
@frappe.whitelist()
def make_subcontracting_return(self, target_doc: Document | str | None = None):
if target_doc and target_doc.get("items"):
target_doc.items = []
stock_entry = get_mapped_doc(
"Subcontracting Inward Order",
self.name,
{
"Subcontracting Inward Order": {
"doctype": "Stock Entry",
"validation": {
"docstatus": ["=", 1],
},
"field_map": {"name": "subcontracting_inward_order"},
},
},
target_doc,
ignore_child_tables=True,
)
stock_entry.purpose = "Subcontracting Return"
stock_entry.set_stock_entry_type()
for fg_item in self.items:
qty = fg_item.delivered_qty - fg_item.returned_qty
if qty < 0:
continue
items_dict = {
fg_item.item_code: {
"qty": qty,
"stock_uom": fg_item.stock_uom,
"scio_detail": fg_item.name,
"is_finished_item": 1,
}
}
stock_entry.add_to_stock_entry_detail(items_dict)
if target_doc:
return stock_entry
else:
return stock_entry.as_dict()
@frappe.whitelist()
def update_subcontracting_inward_order_status(scio: str | Document, status: str | None = None):
if isinstance(scio, str):
scio = frappe.get_doc("Subcontracting Inward Order", scio)
scio.update_status(status)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/subcontracting/doctype/subcontracting_inward_order/subcontracting_inward_order.py",
"license": "GNU General Public License v3.0",
"lines": 471,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/subcontracting/doctype/subcontracting_inward_order/subcontracting_inward_order_dashboard.py | from frappe import _
def get_data():
return {
"fieldname": "subcontracting_inward_order",
"transactions": [
{
"label": _("Transactions"),
"items": ["Stock Entry"],
},
{
"label": _("Manufacturing"),
"items": ["Work Order"],
},
],
}
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/subcontracting/doctype/subcontracting_inward_order/subcontracting_inward_order_dashboard.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/subcontracting/doctype/subcontracting_inward_order/test_subcontracting_inward_order.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import frappe
from frappe.tests import IntegrationTestCase
# On IntegrationTestCase, the doctype test records and all
# link-field test record dependencies are recursively loaded
# Use these module variables to add/remove to/from that list
EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
from erpnext.manufacturing.doctype.work_order.work_order import make_stock_entry as make_stock_entry_from_wo
from erpnext.selling.doctype.sales_order.sales_order import make_subcontracting_inward_order
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
from erpnext.stock.doctype.item.test_item import make_item
from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry
from erpnext.stock.doctype.warehouse.test_warehouse import create_warehouse
class IntegrationTestSubcontractingInwardOrder(IntegrationTestCase):
"""
Integration tests for SubcontractingInwardOrder.
Use this class for testing interactions between multiple components.
"""
def setUp(self):
create_test_data()
make_stock_entry(
item_code="Self RM", qty=100, to_warehouse="Stores - _TC", purpose="Material Receipt"
)
return super().setUp()
def test_customer_provided_item_cost_field(self):
so, scio = create_so_scio()
rm_in = frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward())
rm_in.save()
for item in rm_in.get("items"):
item.basic_rate = 10
rm_in.append(
"additional_costs",
{
"expense_account": "Freight and Forwarding Charges - _TC",
"description": "Test",
"amount": 100,
},
)
rm_in.submit()
for item in rm_in.get("items"):
self.assertEqual(item.customer_provided_item_cost, 15)
def test_customer_provided_item_cost_with_multiple_receipts(self):
"""
Validate that rate is calculated correctly (Weighted Average) when multiple receipts
occur for the same SCIO Received Item.
"""
so, scio = create_so_scio()
rm_item = "Basic RM"
# Receipt 1: 5 Qty @ Unit Cost 10
rm_in_1 = frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward())
rm_in_1.items = [item for item in rm_in_1.items if item.item_code == rm_item]
rm_in_1.items[0].qty = 5
rm_in_1.items[0].basic_rate = 10
rm_in_1.items[0].transfer_qty = 5
rm_in_1.submit()
scio.reload()
received_item = next(item for item in scio.received_items if item.rm_item_code == rm_item)
self.assertEqual(received_item.rate, 10)
# Receipt 2: 5 Qty @ Unit Cost 20
rm_in_2 = frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward())
rm_in_2.items = [item for item in rm_in_2.items if item.item_code == rm_item]
rm_in_2.items[0].qty = 5
rm_in_2.items[0].basic_rate = 20
rm_in_2.items[0].transfer_qty = 5
rm_in_2.save()
rm_in_2.submit()
# Check 2: Rate should be Weighted Average
# (5 * 10 + 5 * 20) / 10 = 150 / 10 = 15
scio.reload()
received_item = next(item for item in scio.received_items if item.rm_item_code == rm_item)
self.assertEqual(received_item.rate, 15)
# Cancel Receipt 2: Rate should revert to original
# (15 * 10 - 20 * 5) / 5 = 50 / 5 = 10
rm_in_2.cancel()
scio.reload()
received_item = next(item for item in scio.received_items if item.rm_item_code == rm_item)
self.assertEqual(received_item.received_qty, 5)
self.assertEqual(received_item.rate, 10)
def test_add_extra_customer_provided_item(self):
so, scio = create_so_scio()
rm_in = frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward())
rm_in.save()
rm_in.append(
"items",
{
"item_code": "Basic RM 2",
"qty": 5,
"t_warehouse": rm_in.items[0].t_warehouse,
"basic_rate": 10,
"transfer_qty": 5,
"uom": "Nos",
"conversion_factor": 1,
"against_fg": scio.items[0].name,
},
)
rm_in.submit()
scio.reload()
self.assertTrue(
next((item for item in scio.received_items if item.rm_item_code == "Basic RM 2"), None)
)
def test_add_extra_item_during_manufacture(self):
make_stock_entry(
item_code="Self RM 2", qty=5, to_warehouse="Stores - _TC", purpose="Material Receipt"
)
so, scio = create_so_scio()
frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward()).submit()
scio.reload()
wo = frappe.get_doc("Work Order", scio.make_work_order()[0])
wo.skip_transfer = 1
next(
item for item in wo.required_items if item.item_code == "Self RM"
).source_warehouse = "Stores - _TC"
wo.submit()
manufacture = frappe.new_doc("Stock Entry").update(make_stock_entry_from_wo(wo.name, "Manufacture"))
manufacture.save()
frappe.new_doc(
"Stock Entry Detail",
parent=manufacture.name,
parenttype="Stock Entry",
parentfield="items",
idx=6,
item_code="Self RM 2",
qty=5,
s_warehouse="Stores - _TC",
basic_rate=10,
transfer_qty=5,
uom="Nos",
conversion_factor=1,
cost_center="Main - _TC",
).insert()
manufacture.reload()
manufacture.submit()
scio.reload()
self.assertTrue(
next((item for item in scio.received_items if item.rm_item_code == "Self RM 2"), None)
)
def test_work_order_creation_qty(self):
new_bom = frappe.copy_doc(frappe.get_doc("BOM", "BOM-Basic FG Item-001"))
new_bom.items = new_bom.items[:3]
new_bom.items[1].qty = 2
new_bom.items[2].qty = 3
new_bom.submit()
sc_bom = frappe.get_doc("Subcontracting BOM", "SB-0001")
sc_bom.finished_good_bom = new_bom.name
sc_bom.save()
so, scio = create_so_scio()
rm_in = frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward())
rm_in.items[0].qty = 3
rm_in.items[1].qty = 5
rm_in.items[2].qty = 12
rm_in.submit()
scio.reload()
wo = frappe.get_doc("Work Order", scio.make_work_order()[0])
self.assertEqual(wo.qty, 2)
def test_rm_return(self):
from erpnext.stock.serial_batch_bundle import get_batch_nos, get_serial_nos
so, scio = create_so_scio()
rm_in = frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward())
rm_in.items[3].qty = 2
rm_in.submit()
serial_nos = get_serial_nos(rm_in.items[3].serial_and_batch_bundle)
batch_nos = list(get_batch_nos(rm_in.items[3].serial_and_batch_bundle).keys())
scio.reload()
rm_in = frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward())
backup = rm_in.items[-1]
rm_in.items.clear()
rm_in.items.append(backup)
rm_in.items[0].qty = 1
rm_in.submit()
serial_nos += get_serial_nos(rm_in.items[0].serial_and_batch_bundle)
batch_nos += list(get_batch_nos(rm_in.items[0].serial_and_batch_bundle).keys())
scio.reload()
rm_return = frappe.new_doc("Stock Entry").update(scio.make_rm_return())
rm_return.submit()
self.assertEqual(
sorted(get_serial_nos(rm_return.items[-1].serial_and_batch_bundle)), sorted(serial_nos)
)
self.assertEqual(
sorted(list(get_batch_nos(rm_return.items[-1].serial_and_batch_bundle).keys())), sorted(batch_nos)
)
def test_subcontracting_delivery(self):
from erpnext.stock.serial_batch_bundle import get_serial_batch_list_from_item
extra_serial, _ = get_serial_batch_list_from_item(
make_stock_entry(
item_code="FG Item with Serial",
qty=1,
to_warehouse="Stores - _TC",
purpose="Material Receipt",
).items[0]
)
so, scio = create_so_scio(service_item="Service Item 2", fg_item="FG Item with Serial")
frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward()).submit()
scio.reload()
wo = frappe.get_doc("Work Order", scio.make_work_order()[0])
wo.skip_transfer = 1
wo.required_items[-1].source_warehouse = "Stores - _TC"
wo.submit()
manufacture = frappe.new_doc("Stock Entry").update(make_stock_entry_from_wo(wo.name, "Manufacture"))
manufacture.submit()
serial_list, _ = get_serial_batch_list_from_item(
next(item for item in manufacture.items if item.is_finished_item)
)
scio.reload()
delivery = frappe.new_doc("Stock Entry").update(scio.make_subcontracting_delivery())
delivery.items[0].use_serial_batch_fields = 1
delivery.save()
delivery_serial_list, _ = get_serial_batch_list_from_item(delivery.items[0])
self.assertEqual(sorted(serial_list), sorted(delivery_serial_list))
delivery_serial_list[-1] = extra_serial[0]
delivery.items[0].serial_no = "\n".join(delivery_serial_list)
self.assertRaises(frappe.ValidationError, delivery.submit)
def test_fg_item_fields(self):
so, scio = create_so_scio()
frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward()).submit()
scio.reload()
wo = frappe.get_doc("Work Order", scio.make_work_order()[0])
wo.skip_transfer = 1
wo.required_items[-1].source_warehouse = "Stores - _TC"
wo.submit()
manufacture = frappe.new_doc("Stock Entry").update(make_stock_entry_from_wo(wo.name, "Manufacture"))
manufacture.save()
manufacture.fg_completed_qty = 5
manufacture.process_loss_qty = 1
manufacture.items[-1].qty = 4
manufacture.submit()
scio.reload()
self.assertEqual(scio.items[0].qty, 5)
self.assertEqual(scio.items[0].process_loss_qty, 1)
self.assertEqual(scio.items[0].produced_qty, 4)
rm_in = scio.make_rm_stock_entry_inward()
for item in rm_in.get("items"):
self.assertEqual(item.qty, 1)
delivery = frappe.new_doc("Stock Entry").update(scio.make_subcontracting_delivery())
delivery.items[0].qty = 5
self.assertRaises(frappe.ValidationError, delivery.submit)
delivery.items[0].qty = 2
delivery.submit()
scio.reload()
fg_return = frappe.new_doc("Stock Entry").update(scio.make_subcontracting_return())
self.assertEqual(fg_return.items[0].qty, 2)
fg_return.items[0].qty = 1
fg_return.items[0].t_warehouse = "Stores - _TC"
fg_return.submit()
scio.reload()
self.assertEqual(scio.items[0].delivered_qty, 2)
self.assertEqual(scio.items[0].returned_qty, 1)
@IntegrationTestCase.change_settings("Selling Settings", {"allow_delivery_of_overproduced_qty": 1})
@IntegrationTestCase.change_settings(
"Manufacturing Settings", {"overproduction_percentage_for_work_order": 20}
)
def test_over_production_delivery(self):
so, scio = create_so_scio()
frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward()).submit()
scio.reload()
wo = frappe.get_doc("Work Order", scio.make_work_order()[0])
wo.skip_transfer = 1
wo.required_items[-1].source_warehouse = "Stores - _TC"
wo.submit()
manufacture = frappe.new_doc("Stock Entry").update(make_stock_entry_from_wo(wo.name, "Manufacture"))
manufacture.items[-1].qty = 6
manufacture.fg_completed_qty = 6
manufacture.submit()
scio.reload()
self.assertEqual(scio.items[0].produced_qty, 6)
delivery = frappe.new_doc("Stock Entry").update(scio.make_subcontracting_delivery())
self.assertEqual(delivery.items[0].qty, 6)
delivery.submit()
frappe.db.set_single_value("Selling Settings", "allow_delivery_of_overproduced_qty", 0)
delivery.cancel()
scio.reload()
delivery = frappe.new_doc("Stock Entry").update(scio.make_subcontracting_delivery())
self.assertEqual(delivery.items[0].qty, 5)
delivery.items[0].qty = 6
self.assertRaises(frappe.ValidationError, delivery.submit)
@IntegrationTestCase.change_settings("Selling Settings", {"deliver_scrap_items": 1})
def test_scrap_delivery(self):
new_bom = frappe.copy_doc(frappe.get_doc("BOM", "BOM-Basic FG Item-001"))
new_bom.scrap_items.append(frappe.new_doc("BOM Scrap Item", item_code="Basic RM 2", qty=1))
new_bom.submit()
sc_bom = frappe.get_doc("Subcontracting BOM", "SB-0001")
sc_bom.finished_good_bom = new_bom.name
sc_bom.save()
so, scio = create_so_scio()
frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward()).submit()
scio.reload()
wo = frappe.get_doc("Work Order", scio.make_work_order()[0])
wo.skip_transfer = 1
wo.required_items[-1].source_warehouse = "Stores - _TC"
wo.submit()
frappe.new_doc("Stock Entry").update(make_stock_entry_from_wo(wo.name, "Manufacture")).submit()
scio.reload()
self.assertEqual(scio.scrap_items[0].item_code, "Basic RM 2")
delivery = frappe.new_doc("Stock Entry").update(scio.make_subcontracting_delivery())
self.assertEqual(delivery.items[-1].item_code, "Basic RM 2")
frappe.db.set_single_value("Selling Settings", "deliver_scrap_items", 0)
delivery = frappe.new_doc("Stock Entry").update(scio.make_subcontracting_delivery())
self.assertNotEqual(delivery.items[-1].item_code, "Basic RM 2")
def test_self_rm_billed_qty(self):
so, scio = create_so_scio()
frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward()).submit()
scio.reload()
wo = frappe.get_doc("Work Order", scio.make_work_order()[0])
wo.skip_transfer = 1
wo.required_items[-1].source_warehouse = "Stores - _TC"
wo.submit()
frappe.new_doc("Stock Entry").update(make_stock_entry_from_wo(wo.name, "Manufacture")).submit()
scio.reload()
frappe.new_doc("Stock Entry").update(scio.make_subcontracting_delivery()).submit()
scio.reload()
from erpnext.selling.doctype.sales_order.sales_order import make_sales_invoice
si = make_sales_invoice(so.name)
self.assertEqual(si.items[-1].item_code, "Self RM")
self.assertEqual(si.items[-1].qty, 5)
si.items[-1].qty = 3
si.submit()
scio.reload()
self.assertEqual(scio.received_items[-1].billed_qty, 3)
si = make_sales_invoice(so.name)
self.assertEqual(si.items[-1].qty, 2)
si.submit()
scio.reload()
self.assertEqual(scio.received_items[-1].billed_qty, 5)
scio.reload()
si = make_sales_invoice(so.name)
self.assertEqual(len(si.items), 1)
def test_extra_items_reservation_transfer(self):
so, scio = create_so_scio()
rm_in = frappe.new_doc("Stock Entry").update(scio.make_rm_stock_entry_inward())
rm_in.items[-2].qty = 7
rm_in.submit()
wo_list = []
scio.reload()
wo = frappe.get_doc("Work Order", scio.make_work_order()[0])
wo.skip_transfer = 1
wo.required_items[-1].source_warehouse = "Stores - _TC"
wo.qty = 3
wo.submit()
wo_list.append(wo.name)
self.assertEqual(wo.required_items[-2].stock_reserved_qty, 3)
scio.reload()
self.assertEqual(scio.received_items[-2].work_order_qty, 3)
wo = frappe.get_doc("Work Order", scio.make_work_order()[0])
wo.skip_transfer = 1
wo.required_items[-1].source_warehouse = "Stores - _TC"
wo.qty = 2
wo.submit()
wo_list.append(wo.name)
from frappe.query_builder.functions import Sum
table = frappe.qb.DocType("Stock Reservation Entry")
query = (
frappe.qb.from_(table)
.select(Sum(table.reserved_qty))
.where(
(table.voucher_type == "Work Order")
& (table.item_code == rm_in.items[-2].item_code)
& (table.voucher_no.isin(wo_list))
)
)
reserved_qty = query.run()[0][0]
self.assertEqual(reserved_qty, 7)
def create_so_scio(service_item="Service Item 1", fg_item="Basic FG Item"):
item_list = [{"item_code": service_item, "qty": 5, "fg_item": fg_item, "fg_item_qty": 5}]
so = make_sales_order(is_subcontracted=1, item_list=item_list)
scio = make_subcontracting_inward_order(so.name)
scio.items[0].delivery_warehouse = "_Test Warehouse - _TC"
scio.submit()
scio.reload()
return so, scio
def create_test_data():
make_subcontracted_items()
make_raw_materials()
make_service_items()
make_bom_for_subcontracted_items()
make_subcontracting_boms()
create_warehouse("_Test Customer Warehouse - _TC", {"customer": "_Test Customer"})
def make_subcontracted_items():
sub_contracted_items = {
"Basic FG Item": {},
"FG Item with Serial": {
"has_serial_no": 1,
"serial_no_series": "FGS.####",
},
"FG Item with Batch": {
"has_batch_no": 1,
"create_new_batch": 1,
"batch_series": "FGB.####",
},
"FG Item with Serial and Batch": {
"has_serial_no": 1,
"serial_no_series": "FGS.####",
"has_batch_no": 1,
"create_new_batch": 1,
"batch_series": "FGB.####",
},
}
for item, properties in sub_contracted_items.items():
if not frappe.db.exists("Item", item):
properties.update({"is_stock_item": 1, "is_sub_contracted_item": 1})
make_item(item, properties)
def make_raw_materials():
customer_provided_raw_materials = {
"Basic RM": {},
"Basic RM 2": {},
"RM with Serial": {"has_serial_no": 1, "serial_no_series": "RMS.####"},
"RM with Batch": {
"has_batch_no": 1,
"create_new_batch": 1,
"batch_number_series": "RMB.####",
},
"RM with Serial and Batch": {
"has_serial_no": 1,
"serial_no_series": "RMS.####",
"has_batch_no": 1,
"create_new_batch": 1,
"batch_number_series": "RMB.####",
},
}
for item, properties in customer_provided_raw_materials.items():
if not frappe.db.exists("Item", item):
properties.update({"is_stock_item": 1, "is_purchase_item": 0, "is_customer_provided_item": 1})
make_item(item, properties)
self_raw_materials = {
"Self RM": {},
"Self RM 2": {},
}
for item, properties in self_raw_materials.items():
if not frappe.db.exists("Item", item):
properties.update({"is_stock_item": 1, "valuation_rate": 10})
make_item(item, properties)
def make_service_items():
from erpnext.controllers.tests.test_subcontracting_controller import make_service_item
service_items = {
"Service Item 1": {},
"Service Item 2": {},
"Service Item 3": {},
"Service Item 4": {},
}
for item, properties in service_items.items():
make_service_item(item, properties)
def make_bom_for_subcontracted_items():
from erpnext.manufacturing.doctype.production_plan.test_production_plan import make_bom
boms = {
"Basic FG Item": [
"Basic RM",
"RM with Serial",
"RM with Batch",
"RM with Serial and Batch",
"Self RM",
],
"FG Item with Serial": [
"Basic RM",
"RM with Serial",
"RM with Batch",
"RM with Serial and Batch",
"Self RM",
],
"FG Item with Batch": [
"Basic RM",
"RM with Serial",
"RM with Batch",
"RM with Serial and Batch",
"Self RM",
],
"FG Item with Serial and Batch": [
"Basic RM",
"RM with Serial",
"RM with Batch",
"RM with Serial and Batch",
"Self RM",
],
}
for item_code, raw_materials in boms.items():
if not frappe.db.exists("BOM", {"item": item_code}):
make_bom(
item=item_code, raw_materials=raw_materials, rate=100, currency="INR", set_as_default_bom=1
)
def make_subcontracting_boms():
subcontracting_boms = [
{
"finished_good": "Basic FG Item",
"service_item": "Service Item 1",
},
{
"finished_good": "FG Item with Serial",
"service_item": "Service Item 2",
},
{
"finished_good": "FG Item with Batch",
"service_item": "Service Item 3",
},
{
"finished_good": "FG Item with Serial and Batch",
"service_item": "Service Item 4",
},
]
for subcontracting_bom in subcontracting_boms:
if not frappe.db.exists("Subcontracting BOM", {"finished_good": subcontracting_bom["finished_good"]}):
doc = frappe.get_doc(
{
"doctype": "Subcontracting BOM",
"finished_good": subcontracting_bom["finished_good"],
"service_item": subcontracting_bom["service_item"],
"is_active": 1,
}
)
doc.insert()
doc.save()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/subcontracting/doctype/subcontracting_inward_order/test_subcontracting_inward_order.py",
"license": "GNU General Public License v3.0",
"lines": 509,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/subcontracting/doctype/subcontracting_inward_order_item/subcontracting_inward_order_item.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.query_builder.functions import Sum
class SubcontractingInwardOrderItem(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
bom: DF.Link
conversion_factor: DF.Float
delivered_qty: DF.Float
delivery_warehouse: DF.Link
include_exploded_items: DF.Check
item_code: DF.Link
item_name: DF.Data
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
process_loss_qty: DF.Float
produced_qty: DF.Float
qty: DF.Float
returned_qty: DF.Float
sales_order_item: DF.Data | None
stock_uom: DF.Link
subcontracting_conversion_factor: DF.Float
# end: auto-generated types
pass
def update_manufacturing_qty_fields(self):
table = frappe.qb.DocType("Work Order")
query = (
frappe.qb.from_(table)
.select(
Sum(table.produced_qty).as_("produced_qty"),
Sum(table.process_loss_qty).as_("process_loss_qty"),
)
.where((table.subcontracting_inward_order_item == self.name) & (table.docstatus == 1))
)
result = query.run(as_dict=True)[0]
self.db_set("produced_qty", result.produced_qty)
self.db_set("process_loss_qty", result.process_loss_qty)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/subcontracting/doctype/subcontracting_inward_order_item/subcontracting_inward_order_item.py",
"license": "GNU General Public License v3.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/subcontracting/doctype/subcontracting_inward_order_received_item/subcontracting_inward_order_received_item.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class SubcontractingInwardOrderReceivedItem(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
billed_qty: DF.Float
bom_detail_no: DF.Data | None
consumed_qty: DF.Float
is_additional_item: DF.Check
is_customer_provided_item: DF.Check
main_item_code: DF.Link
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
rate: DF.Currency
received_qty: DF.Float
reference_name: DF.Data
required_qty: DF.Float
returned_qty: DF.Float
rm_item_code: DF.Link
stock_uom: DF.Link
warehouse: DF.Link | None
work_order_qty: DF.Float
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/subcontracting/doctype/subcontracting_inward_order_received_item/subcontracting_inward_order_received_item.py",
"license": "GNU General Public License v3.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/subcontracting/doctype/subcontracting_inward_order_scrap_item/subcontracting_inward_order_scrap_item.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class SubcontractingInwardOrderScrapItem(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
delivered_qty: DF.Float
fg_item_code: DF.Link
item_code: DF.Link
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
produced_qty: DF.Float
reference_name: DF.Data
stock_uom: DF.Link
warehouse: DF.Link
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/subcontracting/doctype/subcontracting_inward_order_scrap_item/subcontracting_inward_order_scrap_item.py",
"license": "GNU General Public License v3.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/subcontracting/doctype/subcontracting_inward_order_service_item/subcontracting_inward_order_service_item.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class SubcontractingInwardOrderServiceItem(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
amount: DF.Currency
fg_item: DF.Link
fg_item_qty: DF.Float
item_code: DF.Link
item_name: DF.Data
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
qty: DF.Float
rate: DF.Currency
sales_order_item: DF.Data | None
uom: DF.Link
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/subcontracting/doctype/subcontracting_inward_order_service_item/subcontracting_inward_order_service_item.py",
"license": "GNU General Public License v3.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/accounts/report/consolidated_trial_balance/consolidated_trial_balance.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.utils import flt, getdate, now_datetime, nowdate
import erpnext
from erpnext.accounts.doctype.account.account import get_root_company
from erpnext.accounts.report.financial_statements import (
filter_accounts,
filter_out_zero_value_rows,
set_gl_entries_by_account,
)
from erpnext.accounts.report.trial_balance.trial_balance import (
accumulate_values_into_parents,
calculate_total_row,
calculate_values,
get_opening_balances,
hide_group_accounts,
prepare_opening_closing,
value_fields,
)
from erpnext.accounts.report.trial_balance.trial_balance import (
validate_filters as tb_validate_filters,
)
from erpnext.accounts.report.utils import get_rate_as_at
from erpnext.accounts.utils import get_zero_cutoff
from erpnext.setup.utils import get_exchange_rate
def execute(filters: dict | None = None):
"""Return columns and data for the report.
This is the main entry point for the report. It accepts the filters as a
dictionary and should return columns and data. It is called by the framework
every time the report is refreshed or a filter is updated.
"""
validate_filters(filters=filters)
columns = get_columns()
data = get_data(filters)
return columns, data
def validate_filters(filters):
validate_companies(filters)
tb_validate_filters(filters)
def validate_companies(filters):
if not filters.company:
return
root_company = get_root_company(filters.company[0])
root_company = root_company[0] if root_company else filters.company[0]
lft, rgt = frappe.db.get_value("Company", root_company, fieldname=["lft", "rgt"])
company_subtree = frappe.db.get_all(
"Company",
{"lft": [">=", lft], "rgt": ["<=", rgt]},
"name",
order_by="lft",
pluck="name",
)
for company in filters.company:
if company not in company_subtree:
frappe.throw(
_("Consolidated Trial Balance can be generated for Companies having same root Company.")
)
sort_companies(filters)
def sort_companies(filters):
companies = frappe.db.get_all(
"Company", {"name": ["in", filters.company]}, "name", order_by="lft", pluck="name"
)
filters.company = companies
def get_data(filters) -> list[list]:
"""Return data for the report.
The report data is a list of rows, with each row being a list of cell values.
"""
data = []
if filters.company:
reporting_currency, ignore_reporting_currency = get_reporting_currency(filters)
else:
return data
for company in filters.company:
company_filter = frappe._dict(filters)
company_filter.company = company
tb_data = get_company_wise_tb_data(company_filter, reporting_currency, ignore_reporting_currency)
consolidate_trial_balance_data(data, tb_data)
if filters.get("show_net_values"):
prepare_opening_closing_for_ctb(data)
if not filters.get("show_group_accounts"):
data = hide_group_accounts(data)
total_row = calculate_total_row(
data, reporting_currency, show_group_accounts=filters.get("show_group_accounts")
)
calculate_foreign_currency_translation_reserve(total_row, data, filters=filters)
data.extend([total_row])
if filters.get("presentation_currency"):
update_to_presentation_currency(
data,
reporting_currency,
filters.get("presentation_currency"),
filters.get("to_date"),
ignore_reporting_currency,
)
return data
def get_company_wise_tb_data(filters, reporting_currency, ignore_reporting_currency):
accounts = frappe.db.sql(
"""select name, account_number, parent_account, account_name, root_type, report_type, account_type, is_group, lft, rgt
from `tabAccount` where company=%s order by lft""",
filters.company,
as_dict=True,
)
ignore_is_opening = frappe.get_single_value("Accounts Settings", "ignore_is_opening_check_for_reporting")
default_currency = erpnext.get_company_currency(filters.company)
opening_exchange_rate = get_exchange_rate(
default_currency,
reporting_currency,
filters.get("from_date"),
)
current_date = (
filters.get("to_date") if getdate(filters.get("to_date")) <= now_datetime().date() else nowdate()
)
closing_exchange_rate = get_exchange_rate(
default_currency,
reporting_currency,
current_date,
)
if not (opening_exchange_rate and closing_exchange_rate):
frappe.throw(
_(
"Consolidated Trial balance could not be generated as Exchange Rate from {0} to {1} is not available for {2}.",
).format(default_currency, reporting_currency, current_date)
)
if not accounts:
return []
accounts, accounts_by_name, parent_children_map = filter_accounts(accounts)
gl_entries_by_account = {}
opening_balances = get_opening_balances(
filters,
ignore_is_opening,
exchange_rate=opening_exchange_rate,
ignore_reporting_currency=ignore_reporting_currency,
)
set_gl_entries_by_account(
filters.company,
filters.from_date,
filters.to_date,
filters,
gl_entries_by_account,
root_lft=None,
root_rgt=None,
ignore_closing_entries=not flt(filters.with_period_closing_entry_for_current_period),
ignore_opening_entries=True,
group_by_account=True,
ignore_reporting_currency=ignore_reporting_currency,
)
calculate_values(
accounts,
gl_entries_by_account,
opening_balances,
filters.get("show_net_values"),
ignore_is_opening=ignore_is_opening,
exchange_rate=closing_exchange_rate,
ignore_reporting_currency=ignore_reporting_currency,
)
accumulate_values_into_parents(accounts, accounts_by_name)
data = prepare_companywise_tb_data(accounts, filters, parent_children_map, reporting_currency)
data = filter_out_zero_value_rows(
data, parent_children_map, show_zero_values=filters.get("show_zero_values")
)
return data
def prepare_companywise_tb_data(accounts, filters, parent_children_map, reporting_currency):
data = []
for d in accounts:
has_value = False
row = {
"account": d.name,
"parent_account": d.parent_account,
"indent": d.indent,
"from_date": filters.from_date,
"to_date": filters.to_date,
"currency": reporting_currency,
"is_group_account": d.is_group,
"acc_name": d.account_name,
"acc_number": d.account_number,
"account_name": (
f"{d.account_number} - {d.account_name}" if d.account_number else d.account_name
),
"root_type": d.root_type,
"account_type": d.account_type,
}
for key in value_fields:
row[key] = flt(d.get(key, 0.0), 3)
if abs(row[key]) >= get_zero_cutoff(reporting_currency):
# ignore zero values
has_value = True
row["has_value"] = has_value
data.append(row)
return data
def calculate_foreign_currency_translation_reserve(total_row, data, filters):
if not data or not total_row:
return
opening_dr_cr_diff = total_row["opening_debit"] - total_row["opening_credit"]
dr_cr_diff = total_row["debit"] - total_row["credit"]
idx = get_fctr_root_row_index(data)
fctr_row = {
"account": _("Foreign Currency Translation Reserve"),
"account_name": _("Foreign Currency Translation Reserve"),
"warn_if_negative": True,
"opening_debit": abs(opening_dr_cr_diff) if opening_dr_cr_diff < 0 else 0.0,
"opening_credit": abs(opening_dr_cr_diff) if opening_dr_cr_diff > 0 else 0.0,
"debit": abs(dr_cr_diff) if dr_cr_diff < 0 else 0.0,
"credit": abs(dr_cr_diff) if dr_cr_diff > 0 else 0.0,
"closing_debit": 0.0,
"closing_credit": 0.0,
"root_type": data[idx].get("root_type"),
"account_type": "Equity",
"parent_account": data[idx].get("account"),
"indent": data[idx].get("indent") + 1 if filters.get("show_group_accounts") else 0,
"has_value": True,
"currency": total_row.get("currency"),
}
fctr_row["closing_debit"] = fctr_row["opening_debit"] + fctr_row["debit"]
fctr_row["closing_credit"] = fctr_row["opening_credit"] + fctr_row["credit"]
if filters.get("show_net_values"):
prepare_opening_closing(fctr_row)
data.insert(idx + 1, fctr_row)
for field in value_fields:
total_row[field] += fctr_row[field]
def get_fctr_root_row_index(data):
"""
Returns: index, root_type, parent_account
"""
liabilities_idx, equity_idx, tmp_idx = -1, -1, 0
for d in data:
if liabilities_idx == -1 and d.get("root_type") == "Liability":
liabilities_idx = tmp_idx
if equity_idx == -1 and d.get("root_type") == "Equity":
equity_idx = tmp_idx
tmp_idx += 1
if equity_idx == -1:
return liabilities_idx
return equity_idx
def consolidate_trial_balance_data(data, tb_data):
if not data:
data.extend(list(tb_data))
return
for entry in tb_data:
if entry:
consolidate_gle_data(data, entry, tb_data)
def get_reporting_currency(filters):
reporting_currency = frappe.get_cached_value("Company", filters.company[0], "reporting_currency")
default_currency = None
for company in filters.company:
company_default_currency = erpnext.get_company_currency(company)
if not default_currency:
default_currency = company_default_currency
if company_default_currency != default_currency:
return (reporting_currency, False)
return (default_currency, True)
def consolidate_gle_data(data, entry, tb_data):
entry_gle_exists = False
for gle in data:
if gle and gle["account_name"] == entry["account_name"]:
entry_gle_exists = True
gle["closing_credit"] += entry["closing_credit"]
gle["closing_debit"] += entry["closing_debit"]
gle["credit"] += entry["credit"]
gle["debit"] += entry["debit"]
gle["opening_credit"] += entry["opening_credit"]
gle["opening_debit"] += entry["opening_debit"]
gle["has_value"] = 1
if not entry_gle_exists:
entry_parent_account = next(
(d for d in tb_data if d.get("account") == entry.get("parent_account")), None
)
parent_account_in_data = None
if entry_parent_account:
parent_account_in_data = next(
(d for d in data if d and d.get("account_name") == entry_parent_account.get("account_name")),
None,
)
if parent_account_in_data:
entry["parent_account"] = parent_account_in_data.get("account")
entry["indent"] = (parent_account_in_data.get("indent") or 0) + 1
data.insert(data.index(parent_account_in_data) + 1, entry)
else:
entry["parent_account"] = None
entry["indent"] = 0
data.append(entry)
def update_to_presentation_currency(data, from_currency, to_currency, date, ignore_reporting_currency):
if from_currency == to_currency:
return
exchange_rate = get_rate_as_at(date, from_currency, to_currency)
for d in data:
if not ignore_reporting_currency:
for field in value_fields:
if d.get(field):
d[field] = d[field] * flt(exchange_rate)
d.update(currency=to_currency)
def prepare_opening_closing_for_ctb(data):
for d in data:
prepare_opening_closing(d)
def get_columns():
return [
{
"fieldname": "account_name",
"label": _("Account"),
"fieldtype": "Data",
"width": 300,
},
{
"fieldname": "acc_name",
"label": _("Account Name"),
"fieldtype": "Data",
"hidden": 1,
"width": 250,
},
{
"fieldname": "acc_number",
"label": _("Account Number"),
"fieldtype": "Data",
"hidden": 1,
"width": 120,
},
{
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1,
},
{
"fieldname": "opening_debit",
"label": _("Opening (Dr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120,
},
{
"fieldname": "opening_credit",
"label": _("Opening (Cr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120,
},
{
"fieldname": "debit",
"label": _("Debit"),
"fieldtype": "Currency",
"options": "currency",
"width": 120,
},
{
"fieldname": "credit",
"label": _("Credit"),
"fieldtype": "Currency",
"options": "currency",
"width": 120,
},
{
"fieldname": "closing_debit",
"label": _("Closing (Dr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120,
},
{
"fieldname": "closing_credit",
"label": _("Closing (Cr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120,
},
]
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/accounts/report/consolidated_trial_balance/consolidated_trial_balance.py",
"license": "GNU General Public License v3.0",
"lines": 366,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/accounts/report/consolidated_trial_balance/test_consolidated_trial_balance.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
from frappe import _
from frappe.tests import IntegrationTestCase
from frappe.utils import flt, today
from erpnext.accounts.report.consolidated_trial_balance.consolidated_trial_balance import execute
from erpnext.setup.utils import get_exchange_rate
class ForeignCurrencyTranslationReserveNotFoundError(frappe.ValidationError):
pass
class TestConsolidatedTrialBalance(IntegrationTestCase):
@classmethod
def setUpClass(cls):
from erpnext.accounts.report.trial_balance.test_trial_balance import create_company
from erpnext.accounts.utils import get_fiscal_year
# Group Company
create_company(company_name="Parent Group Company India", is_group=1)
create_company(company_name="Child Company India", parent_company="Parent Group Company India")
# Child Company with different currency
create_company(
company_name="Child Company US",
country="United States",
currency="USD",
parent_company="Parent Group Company India",
)
create_journal_entry(
company="Parent Group Company India",
acc1="Marketing Expenses - PGCI",
acc2="Cash - PGCI",
amount=100000,
)
create_journal_entry(
company="Child Company India", acc1="Cash - CCI", acc2="Secured Loans - CCI", amount=50000
)
create_journal_entry(
company="Child Company US", acc1="Marketing Expenses - CCU", acc2="Cash - CCU", amount=1000
)
cls.fiscal_year = get_fiscal_year(today(), company="Parent Group Company India")[0]
def test_single_company_report(self):
filters = frappe._dict({"company": ["Parent Group Company India"], "fiscal_year": self.fiscal_year})
report = execute(filters)
total_row = report[1][-1]
self.assertEqual(total_row["closing_debit"], total_row["closing_credit"])
self.assertEqual(total_row["closing_credit"], 100000)
def test_child_company_report_with_same_default_currency_as_parent_company(self):
filters = frappe._dict(
{
"company": ["Parent Group Company India", "Child Company India"],
"fiscal_year": self.fiscal_year,
}
)
report = execute(filters)
total_row = report[1][-1]
self.assertEqual(total_row["closing_debit"], total_row["closing_credit"])
def test_child_company_with_different_default_currency_from_parent_company(self):
filters = frappe._dict(
{
"company": ["Parent Group Company India", "Child Company US"],
"fiscal_year": self.fiscal_year,
}
)
report = execute(filters)
total_row = report[1][-1]
exchange_rate = get_exchange_rate("USD", "INR")
fctr = [d for d in report[1] if d.get("account") == _("Foreign Currency Translation Reserve")]
if not fctr:
raise ForeignCurrencyTranslationReserveNotFoundError
ccu_total_credit = 1000 * flt(exchange_rate)
self.assertEqual(total_row["closing_debit"], total_row["closing_credit"])
self.assertNotEqual(total_row["closing_credit"], ccu_total_credit)
self.assertEqual(total_row["closing_credit"], flt(100000 + ccu_total_credit))
def create_journal_entry(**args):
args = frappe._dict(args)
je = frappe.new_doc("Journal Entry")
je.posting_date = args.posting_date or today()
je.company = args.company
je.set(
"accounts",
[
{
"account": args.acc1,
"debit_in_account_currency": args.amount if args.amount > 0 else 0,
"credit_in_account_currency": abs(args.amount) if args.amount < 0 else 0,
},
{
"account": args.acc2,
"credit_in_account_currency": args.amount if args.amount > 0 else 0,
"debit_in_account_currency": abs(args.amount) if args.amount < 0 else 0,
},
],
)
je.save()
je.submit()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/accounts/report/consolidated_trial_balance/test_consolidated_trial_balance.py",
"license": "GNU General Public License v3.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/patches/v16_0/set_posting_datetime_for_sabb_and_drop_indexes.py | import click
import frappe
def execute():
frappe.db.sql(
"""
UPDATE `tabSerial and Batch Bundle`
JOIN `tabStock Ledger Entry`
ON `tabSerial and Batch Bundle`.`name` = `tabStock Ledger Entry`.`serial_and_batch_bundle`
SET `tabSerial and Batch Bundle`.`posting_datetime` = `tabStock Ledger Entry`.`posting_datetime`
WHERE `tabStock Ledger Entry`.`is_cancelled` = 0
"""
)
drop_indexes()
def drop_indexes():
table = "tabSerial and Batch Bundle"
index_list = ["voucher_no_index", "item_code_index", "warehouse_index", "company_index"]
for index in index_list:
if not frappe.db.has_index(table, index):
continue
try:
frappe.db.sql_ddl(f"ALTER TABLE `{table}` DROP INDEX `{index}`")
click.echo(f"✓ dropped {index} index from {table}")
except Exception:
frappe.log_error("Failed to drop index")
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v16_0/set_posting_datetime_for_sabb_and_drop_indexes.py",
"license": "GNU General Public License v3.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/manufacturing/doctype/master_production_schedule/master_production_schedule.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import math
import frappe
from frappe import _, bold
from frappe.model.document import Document
from frappe.utils import add_days, getdate, parse_json
class MasterProductionSchedule(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
from erpnext.manufacturing.doctype.master_production_schedule_item.master_production_schedule_item import (
MasterProductionScheduleItem,
)
from erpnext.manufacturing.doctype.production_plan_material_request.production_plan_material_request import (
ProductionPlanMaterialRequest,
)
from erpnext.manufacturing.doctype.production_plan_sales_order.production_plan_sales_order import (
ProductionPlanSalesOrder,
)
amended_from: DF.Link | None
company: DF.Link
from_date: DF.Date
items: DF.Table[MasterProductionScheduleItem]
material_requests: DF.Table[ProductionPlanMaterialRequest]
naming_series: DF.Literal["MPS.YY.-.######"]
parent_warehouse: DF.Link | None
posting_date: DF.Date
sales_forecast: DF.Link | None
sales_orders: DF.Table[ProductionPlanSalesOrder]
select_items: DF.TableMultiSelect[MasterProductionScheduleItem]
to_date: DF.Date | None
# end: auto-generated types
@frappe.whitelist()
def get_actual_demand(self):
self.set("items", [])
actual_demand_data = self.get_demand_data()
item_wise_data = self.get_item_wise_mps_data(actual_demand_data)
if not item_wise_data:
return []
self.update_item_details(item_wise_data)
self.add_mps_data(item_wise_data)
if not self.is_new():
self.save()
def validate(self):
self.set_to_date()
self.validate_company()
def validate_company(self):
if self.sales_forecast:
sales_forecast_company = frappe.db.get_value("Sales Forecast", self.sales_forecast, "company")
if sales_forecast_company != self.company:
frappe.throw(
_(
"The Company {0} of Sales Forecast {1} does not match with the Company {2} of Master Production Schedule {3}."
).format(
bold(sales_forecast_company),
bold(self.sales_forecast),
bold(self.company),
bold(self.name),
)
)
def set_to_date(self):
self.to_date = None
for row in self.items:
if not self.to_date or getdate(row.delivery_date) > getdate(self.to_date):
self.to_date = row.delivery_date
forecast_delivery_dates = self.get_sales_forecast_data()
for date in forecast_delivery_dates:
if not self.to_date or getdate(date) > getdate(self.to_date):
self.to_date = date
def get_sales_forecast_data(self):
if not self.sales_forecast:
return []
filters = {"parent": self.sales_forecast}
if self.select_items:
items = [d.item_code for d in self.select_items if d.item_code]
filters["item_code"] = ("in", items)
return frappe.get_all(
"Sales Forecast Item",
filters=filters,
pluck="delivery_date",
order_by="delivery_date asc",
)
def update_item_details(self, data):
items = [item[0] for item in data if item[0]]
item_details = self.get_item_details(items)
for key in data:
item_data = data[key]
item_code = key[0]
if item_code in item_details:
item_data.update(item_details[item_code])
def get_item_details(self, items):
doctype = frappe.qb.DocType("Item")
query = (
frappe.qb.from_(doctype)
.select(
doctype.name.as_("item_code"),
doctype.default_bom.as_("bom_no"),
doctype.item_name,
)
.where(doctype.name.isin(items))
)
item_details = query.run(as_dict=True)
item_wise_details = frappe._dict({})
if not item_details:
return item_wise_details
for row in item_details:
row.cumulative_lead_time = self.get_cumulative_lead_time(row.item_code, row.bom_no)
for row in item_details:
item_wise_details.setdefault(row.item_code, row)
return item_wise_details
def get_cumulative_lead_time(self, item_code, bom_no, time_in_days=0):
if not time_in_days:
time_in_days = get_item_lead_time(item_code)
bom_materials = frappe.get_all(
"BOM Item",
filters={"parent": bom_no, "docstatus": 1},
fields=["item_code", "bom_no"],
)
for row in bom_materials:
if row.bom_no:
time_in_days += self.get_cumulative_lead_time(row.item_code, row.bom_no)
else:
lead_time = get_item_lead_time(row.item_code)
time_in_days += lead_time
return time_in_days
def get_demand_data(self):
sales_order_data = self.get_sales_orders_data()
material_request_data = self.get_material_requests_data()
return sales_order_data + material_request_data
def get_material_requests_data(self):
if not self.material_requests:
return []
doctype = frappe.qb.DocType("Material Request Item")
query = (
frappe.qb.from_(doctype)
.select(
doctype.item_code,
doctype.warehouse,
doctype.stock_uom,
doctype.schedule_date.as_("delivery_date"),
doctype.parent.as_("material_request"),
doctype.stock_qty.as_("qty"),
)
.orderby(doctype.schedule_date)
)
if self.material_requests:
material_requests = [m.material_request for m in self.material_requests if m.material_request]
query = query.where(doctype.parent.isin(material_requests))
if self.from_date:
query = query.where(doctype.schedule_date >= self.from_date)
if self.to_date:
query = query.where(doctype.schedule_date <= self.to_date)
return query.run(as_dict=True)
def get_sales_orders_data(self):
sales_order_schedules = self.get_sales_order_schedules()
ignore_orders = []
if sales_order_schedules:
for row in sales_order_schedules:
if row.sales_order_item and row.sales_order_item not in ignore_orders:
ignore_orders.append(row.sales_order_item)
sales_orders = self.get_items_from_sales_orders(ignore_orders)
return sales_orders + sales_order_schedules
def get_items_from_sales_orders(self, ignore_orders=None):
doctype = frappe.qb.DocType("Sales Order Item")
query = (
frappe.qb.from_(doctype)
.select(
doctype.item_code,
doctype.warehouse,
doctype.stock_uom,
doctype.delivery_date,
doctype.name.as_("sales_order"),
doctype.stock_qty.as_("qty"),
)
.where(doctype.docstatus == 1)
.orderby(doctype.delivery_date)
)
if self.from_date:
query = query.where(doctype.delivery_date >= self.from_date)
if self.to_date:
query = query.where(doctype.delivery_date <= self.to_date)
if self.sales_orders:
names = [s.sales_order for s in self.sales_orders if s.sales_order]
if not names:
return []
query = query.where(doctype.parent.isin(names))
if ignore_orders:
query = query.where(doctype.name.notin(ignore_orders))
return query.run(as_dict=True)
def get_sales_order_schedules(self):
doctype = frappe.qb.DocType("Delivery Schedule Item")
query = frappe.qb.from_(doctype).select(
doctype.item_code,
doctype.warehouse,
doctype.stock_uom,
doctype.delivery_date,
doctype.sales_order,
doctype.sales_order_item,
doctype.stock_qty.as_("qty"),
)
if self.sales_orders:
names = [s.sales_order for s in self.sales_orders if s.sales_order]
query = query.where(doctype.sales_order.isin(names))
if self.from_date:
query = query.where(doctype.delivery_date >= self.from_date)
return query.run(as_dict=True)
def get_item_wise_mps_data(self, data):
item_wise_data = frappe._dict({})
for item in data:
key = (item.item_code, item.delivery_date)
if key not in item_wise_data:
item_wise_data[key] = frappe._dict(
{
"item_code": item.item_code,
"delivery_date": item.delivery_date,
"stock_uom": item.stock_uom,
"qty": 0.0,
"cumulative_lead_time": 0.0,
"order_release_date": item.delivery_date,
}
)
item_details = item_wise_data[key]
item_details.qty += item.qty
return item_wise_data
def add_mps_data(self, data):
data = frappe._dict(sorted(data.items(), key=lambda x: x[0][1]))
for key in data:
row = data[key]
row.cumulative_lead_time = math.ceil(row.cumulative_lead_time)
row.order_release_date = add_days(row.delivery_date, -row.cumulative_lead_time)
row.planned_qty = row.qty
row.uom = row.stock_uom
row.warehouse = row.warehouse or self.parent_warehouse
self.append("items", row)
def get_distinct_items(self, data):
items = []
for item in data:
if item.item_code not in items:
items.append(item.item_code)
return items
@frappe.whitelist()
def fetch_materials_requests(self, **data):
if isinstance(data, str):
data = parse_json(data)
self.set("material_requests", [])
materials_requests = self.get_material_requests(data)
if not materials_requests:
frappe.msgprint(
_("No open Material Requests found for the given criteria."),
alert=True,
)
return
for row in materials_requests:
self.append(
"material_requests",
{
"material_request": row.name,
"material_request_date": row.transaction_date,
},
)
if not self.is_new():
self.save()
def get_material_requests(self, data):
doctype = frappe.qb.DocType("Material Request")
query = (
frappe.qb.from_(doctype)
.select(
doctype.name,
doctype.transaction_date,
)
.where((doctype.docstatus == 1) & (doctype.status.notin(["Closed", "Completed"])))
.orderby(doctype.schedule_date)
)
if data.get("material_request_type"):
query = query.where(doctype.material_request_type == data.get("material_request_type"))
if data.get("from_date"):
query = query.where(doctype.transaction_date >= data.get("from_date"))
if data.get("to_date"):
query = query.where(doctype.transaction_date <= data.get("to_date"))
if self.from_date:
query = query.where(doctype.schedule_date >= self.from_date)
if self.to_date:
query = query.where(doctype.schedule_date <= self.to_date)
return query.run(as_dict=True)
@frappe.whitelist()
def fetch_sales_orders(self, **data):
if isinstance(data, str):
data = parse_json(data)
self.set("sales_orders", [])
sales_orders = self.get_sales_orders(data)
if not sales_orders:
return
for row in sales_orders:
self.append(
"sales_orders",
{
"sales_order": row.name,
"sales_order_date": row.transaction_date,
"delivery_date": row.delivery_date,
"customer": row.customer,
"status": row.status,
"grand_total": row.grand_total,
},
)
if not self.is_new():
self.save()
def get_sales_orders(self, kwargs):
doctype = frappe.qb.DocType("Sales Order")
query = (
frappe.qb.from_(doctype)
.select(
doctype.name,
doctype.transaction_date,
doctype.delivery_date,
doctype.customer,
doctype.status,
doctype.grand_total,
)
.where((doctype.docstatus == 1) & (doctype.status.notin(["Closed", "Completed"])))
.orderby(doctype.delivery_date)
)
if kwargs.get("customer"):
query = query.where(doctype.customer == kwargs.get("customer"))
if kwargs.get("from_date"):
query = query.where(doctype.transaction_date >= kwargs.get("from_date"))
if kwargs.get("to_date"):
query = query.where(doctype.transaction_date <= kwargs.get("to_date"))
if kwargs.get("delivery_from_date"):
query = query.where(doctype.delivery_date >= kwargs.get("delivery_from_date"))
if kwargs.get("delivery_to_date"):
query = query.where(doctype.delivery_date <= kwargs.get("to_delivery_date"))
if items := self.get_items_for_mps():
doctype_item = frappe.qb.DocType("Sales Order Item")
query = query.join(doctype_item).on(doctype_item.parent == doctype.name)
query = query.where(doctype_item.item_code.isin(items))
return query.run(as_dict=True)
def get_items_for_mps(self):
if not self.select_items:
return
return [d.item_code for d in self.select_items if d.item_code]
def on_submit(self):
self.enqueue_mrp_creation()
def enqueue_mrp_creation(self):
frappe.enqueue_doc("Master Production Schedule", self.name, "make_mrp", queue="long", timeout=1800)
frappe.msgprint(
_("MRP Log documents are being created in the background."),
alert=True,
)
def get_item_lead_time(item_code):
doctype = frappe.qb.DocType("Item Lead Time")
query = (
frappe.qb.from_(doctype)
.select(
((doctype.manufacturing_time_in_mins / 1440) + doctype.purchase_time + doctype.buffer_time).as_(
"cumulative_lead_time"
)
)
.where(doctype.item_code == item_code)
)
result = query.run(as_dict=True)
if result:
return result[0].cumulative_lead_time or 0
return 0
@frappe.whitelist()
def get_mps_details(mps: str):
return frappe.db.get_value(
"Master Production Schedule",
mps,
["name", "from_date", "to_date", "company", "posting_date"],
as_dict=True,
)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/master_production_schedule/master_production_schedule.py",
"license": "GNU General Public License v3.0",
"lines": 374,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/manufacturing/doctype/master_production_schedule/test_master_production_schedule.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
from frappe.tests import IntegrationTestCase
# On IntegrationTestCase, the doctype test records and all
# link-field test record dependencies are recursively loaded
# Use these module variables to add/remove to/from that list
EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
class IntegrationTestMasterProductionSchedule(IntegrationTestCase):
"""
Integration tests for MasterProductionSchedule.
Use this class for testing interactions between multiple components.
"""
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/master_production_schedule/test_master_production_schedule.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/manufacturing/doctype/master_production_schedule_item/master_production_schedule_item.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class MasterProductionScheduleItem(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
bom_no: DF.Link | None
cumulative_lead_time: DF.Int
delivery_date: DF.Date | None
item_code: DF.Link | None
item_name: DF.Data | None
order_release_date: DF.Date | None
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
planned_qty: DF.Float
uom: DF.Link | None
warehouse: DF.Link | None
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/master_production_schedule_item/master_production_schedule_item.py",
"license": "GNU General Public License v3.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/manufacturing/doctype/sales_forecast/sales_forecast.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from frappe.utils import add_to_date
class SalesForecast(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
from erpnext.manufacturing.doctype.sales_forecast_item.sales_forecast_item import SalesForecastItem
amended_from: DF.Link | None
company: DF.Link
demand_number: DF.Int
frequency: DF.Literal["Weekly", "Monthly"]
from_date: DF.Date
items: DF.Table[SalesForecastItem]
naming_series: DF.Literal["SF.YY.-.######"]
parent_warehouse: DF.Link
posting_date: DF.Date | None
selected_items: DF.TableMultiSelect[SalesForecastItem]
status: DF.Literal["Planned", "MPS Generated", "Cancelled"]
# end: auto-generated types
def on_discard(self):
self.db_set("status", "Cancelled")
def generate_manual_demand(self):
forecast_demand = []
for row in self.selected_items:
item_details = frappe.db.get_value(
"Item", row.item_code, ["item_name", "stock_uom as uom"], as_dict=True
)
for index in range(self.demand_number):
if self.frequency == "Monthly":
delivery_date = add_to_date(self.from_date, months=index + 1)
else:
delivery_date = add_to_date(self.from_date, weeks=index + 1)
forecast_demand.append(
{
"item_code": row.item_code,
"delivery_date": delivery_date,
"item_name": item_details.item_name,
"uom": item_details.uom,
"demand_qty": 1.0,
}
)
for demand in forecast_demand:
self.append("items", demand)
@frappe.whitelist()
def generate_demand(self):
self.set("items", [])
self.generate_manual_demand()
@frappe.whitelist()
def create_mps(source_name: str, target_doc: Document | str | None = None):
def postprocess(source, doc):
doc.naming_series = "MPS.YY.-.######"
doc = get_mapped_doc(
"Sales Forecast",
source_name,
{
"Sales Forecast": {
"doctype": "Master Production Schedule",
"validation": {"docstatus": ["=", 1]},
"field_map": {
"name": "sales_forecast",
"from_date": "from_date",
},
},
},
target_doc,
postprocess,
)
return doc
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/sales_forecast/sales_forecast.py",
"license": "GNU General Public License v3.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/manufacturing/doctype/sales_forecast/sales_forecast_dashboard.py | from frappe import _
def get_data():
return {
"fieldname": "demand_planning",
"transactions": [
{
"label": _("MPS"),
"items": ["Master Production Schedule"],
},
],
}
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/sales_forecast/sales_forecast_dashboard.py",
"license": "GNU General Public License v3.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/manufacturing/doctype/sales_forecast/test_sales_forecast.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
from frappe.tests import IntegrationTestCase
# On IntegrationTestCase, the doctype test records and all
# link-field test record dependencies are recursively loaded
# Use these module variables to add/remove to/from that list
EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
class IntegrationTestSalesForecast(IntegrationTestCase):
"""
Integration tests for SalesForecast.
Use this class for testing interactions between multiple components.
"""
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/sales_forecast/test_sales_forecast.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/manufacturing/doctype/sales_forecast_item/sales_forecast_item.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class SalesForecastItem(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
adjust_qty: DF.Float
delivery_date: DF.Date | None
demand_qty: DF.Float
forecast_qty: DF.Float
item_code: DF.Link
item_name: DF.Data | None
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
uom: DF.Link | None
warehouse: DF.Link | None
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/sales_forecast_item/sales_forecast_item.py",
"license": "GNU General Public License v3.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/patches/v16_0/set_reporting_currency.py | import frappe
from frappe.utils import getdate
from frappe.utils.nestedset import get_descendants_of
from erpnext.accounts.utils import get_fiscal_year
from erpnext.setup.utils import get_exchange_rate
def execute():
set_company_reporting_currency()
set_amounts_in_reporting_currency_on_gle_and_acb()
def set_company_reporting_currency():
root_companies = frappe.db.get_all(
"Company", fields=["name", "default_currency"], filters={"parent_company": ""}, order_by="lft"
)
for d in root_companies:
company_subtree = get_descendants_of("Company", d.name)
company_subtree.append(d.name)
update_company_subtree_reporting_currency(company_subtree, d.default_currency)
def update_company_subtree_reporting_currency(companies, currency):
Company = frappe.qb.DocType("Company")
frappe.qb.update(Company).set(Company.reporting_currency, currency).where(
Company.name.isin(companies)
).run()
def set_amounts_in_reporting_currency_on_gle_and_acb():
# get all the companies
companies = frappe.db.get_all(
"Company", fields=["name", "default_currency", "reporting_currency"], order_by="lft"
)
# get current fiscal year
current_fiscal_year = get_fiscal_year(getdate(), as_dict=1, raise_on_missing=False)
if not current_fiscal_year:
return
previous_fiscal_year = frappe.db.get_value(
"Fiscal Year",
filters={"year_end_date": ("<", current_fiscal_year.year_start_date)},
fieldname=["name", "year_start_date", "year_end_date"],
order_by="year_end_date desc",
as_dict=1,
)
for d in companies:
posting_dates = get_posting_closing_date(d, current_fiscal_year, previous_fiscal_year)
exchange_rate_available = check_exchange_rate_availability(d, posting_dates)
if not exchange_rate_available:
continue
set_reporting_currency_by_doctype("GL Entry", d, posting_dates.get("GL Entry"))
set_reporting_currency_by_doctype(
"Account Closing Balance", d, posting_dates.get("Account Closing Balance")
)
def get_posting_closing_date(company_details, current_fiscal_year, previous_fiscal_year=None):
posting_dates = {}
posting_dates["GL Entry"] = get_closing_posting_dates(
"GL Entry", company_details.get("name"), current_fiscal_year
)
posting_dates["Account Closing Balance"] = get_closing_posting_dates(
"Account Closing Balance", company_details.get("name"), current_fiscal_year
)
if previous_fiscal_year:
prev_fy_last_pcv_closing_date = frappe.db.get_value(
"Period Closing Voucher",
filters={"fiscal_year": previous_fiscal_year.name, "company": company_details.get("name")},
fieldname=["transaction_date"],
order_by="period_start_date desc",
)
if prev_fy_last_pcv_closing_date:
prev_fy_acb_closing_dates = get_closing_posting_dates(
"Account Closing Balance",
company_details.get("name"),
closing_date=prev_fy_last_pcv_closing_date,
)
posting_dates.setdefault("Account Closing Balance", [])
posting_dates["Account Closing Balance"].extend(prev_fy_acb_closing_dates)
return posting_dates
def check_exchange_rate_availability(company_details, posting_dates):
exchange_rate_available = True
for doctype, values in posting_dates.items():
if not exchange_rate_available:
return False
date_column = "posting_date" if doctype == "GL Entry" else "closing_date"
for d in values:
exchange_rate = get_exchange_rate(
company_details.get("default_currency"),
company_details.get("reporting_currency"),
d[date_column],
)
if not exchange_rate:
exchange_rate_available = False
break
return exchange_rate_available
def set_reporting_currency_by_doctype(doctype, company_details, posting_closing_dates):
date_column = "posting_date" if doctype == "GL Entry" else "closing_date"
for d in posting_closing_dates:
exchange_rate = get_exchange_rate(
company_details.get("default_currency"),
company_details.get("reporting_currency"),
d[date_column],
)
set_reporting_currency_on_individual_documents(
doctype, company_details.get("name"), d[date_column], exchange_rate
)
def get_closing_posting_dates(doctype, company, fiscal_year=None, closing_date=None):
dt = frappe.qb.DocType(doctype)
date_column = "posting_date" if doctype == "GL Entry" else "closing_date"
query = frappe.qb.from_(dt).select(dt[date_column]).where(dt.company == company).groupby(dt[date_column])
if doctype == "GL Entry" and fiscal_year:
query = query.where(dt.fiscal_year == fiscal_year.name)
if doctype == "Account Closing Balance":
if fiscal_year:
query = query.where(dt.closing_date[fiscal_year.year_start_date : fiscal_year.year_end_date])
if closing_date:
query = query.where(dt.closing_date == closing_date)
posting_closing_dates = query.run(as_dict=1)
return posting_closing_dates
def set_reporting_currency_on_individual_documents(doctype, company, posting_closing_date, exchange_rate):
dt = frappe.qb.DocType(doctype)
date_column = "posting_date" if doctype == "GL Entry" else "closing_date"
frappe.qb.update(dt).set(dt.reporting_currency_exchange_rate, exchange_rate).set(
dt.debit_in_reporting_currency, exchange_rate * dt.debit
).set(dt.credit_in_reporting_currency, exchange_rate * dt.credit).where(
(dt.company == company) & (dt[date_column] == posting_closing_date)
).run()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v16_0/set_reporting_currency.py",
"license": "GNU General Public License v3.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
frappe/erpnext:erpnext/selling/doctype/delivery_schedule_item/delivery_schedule_item.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class DeliveryScheduleItem(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
conversion_factor: DF.Float
delivery_date: DF.Date | None
item_code: DF.Link | None
qty: DF.Float
sales_order: DF.Link | None
sales_order_item: DF.Data | None
stock_qty: DF.Float
stock_uom: DF.Link | None
uom: DF.Link | None
warehouse: DF.Link | None
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/selling/doctype/delivery_schedule_item/delivery_schedule_item.py",
"license": "GNU General Public License v3.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/selling/doctype/delivery_schedule_item/test_delivery_schedule_item.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
from frappe.tests import IntegrationTestCase
# On IntegrationTestCase, the doctype test records and all
# link-field test record dependencies are recursively loaded
# Use these module variables to add/remove to/from that list
EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
class IntegrationTestDeliveryScheduleItem(IntegrationTestCase):
"""
Integration tests for DeliveryScheduleItem.
Use this class for testing interactions between multiple components.
"""
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/selling/doctype/delivery_schedule_item/test_delivery_schedule_item.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/stock/doctype/item_lead_time/item_lead_time.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class ItemLeadTime(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
buffer_time: DF.Int
capacity_per_day: DF.Int
daily_yield: DF.Percent
item_code: DF.Link | None
item_name: DF.Data | None
manufacturing_time_in_mins: DF.Int
no_of_shift: DF.Int
no_of_units_produced: DF.Int
no_of_workstations: DF.Int
purchase_time: DF.Int
shift_time_in_hours: DF.Int
stock_uom: DF.Link | None
total_workstation_time: DF.Int
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/stock/doctype/item_lead_time/item_lead_time.py",
"license": "GNU General Public License v3.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/stock/doctype/item_lead_time/test_item_lead_time.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
from frappe.tests import IntegrationTestCase
# On IntegrationTestCase, the doctype test records and all
# link-field test record dependencies are recursively loaded
# Use these module variables to add/remove to/from that list
EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
class IntegrationTestItemLeadTime(IntegrationTestCase):
"""
Integration tests for ItemLeadTime.
Use this class for testing interactions between multiple components.
"""
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/stock/doctype/item_lead_time/test_item_lead_time.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/manufacturing/doctype/workstation_cost/test_workstation_cost.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
from frappe.tests import IntegrationTestCase
# On IntegrationTestCase, the doctype test records and all
# link-field test record dependencies are recursively loaded
# Use these module variables to add/remove to/from that list
EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
class IntegrationTestWorkstationCost(IntegrationTestCase):
"""
Integration tests for WorkstationCost.
Use this class for testing interactions between multiple components.
"""
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/workstation_cost/test_workstation_cost.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/manufacturing/doctype/workstation_cost/workstation_cost.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class WorkstationCost(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
operating_component: DF.Link
operating_cost: DF.Currency
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/workstation_cost/workstation_cost.py",
"license": "GNU General Public License v3.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/manufacturing/doctype/workstation_operating_component/test_workstation_operating_component.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
from frappe.tests import IntegrationTestCase
# On IntegrationTestCase, the doctype test records and all
# link-field test record dependencies are recursively loaded
# Use these module variables to add/remove to/from that list
EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
class IntegrationTestWorkstationOperatingComponent(IntegrationTestCase):
"""
Integration tests for WorkstationOperatingComponent.
Use this class for testing interactions between multiple components.
"""
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/workstation_operating_component/test_workstation_operating_component.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/manufacturing/doctype/workstation_operating_component/workstation_operating_component.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class WorkstationOperatingComponent(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
from erpnext.manufacturing.doctype.workstation_operating_component_account.workstation_operating_component_account import (
WorkstationOperatingComponentAccount,
)
accounts: DF.Table[WorkstationOperatingComponentAccount]
component_name: DF.Data
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/workstation_operating_component/workstation_operating_component.py",
"license": "GNU General Public License v3.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/manufacturing/doctype/workstation_operating_component_account/test_workstation_operating_component_account.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
from frappe.tests import IntegrationTestCase
# On IntegrationTestCase, the doctype test records and all
# link-field test record dependencies are recursively loaded
# Use these module variables to add/remove to/from that list
EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
class IntegrationTestWorkstationOperatingComponentAccount(IntegrationTestCase):
"""
Integration tests for WorkstationOperatingComponentAccount.
Use this class for testing interactions between multiple components.
"""
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/workstation_operating_component_account/test_workstation_operating_component_account.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/manufacturing/doctype/workstation_operating_component_account/workstation_operating_component_account.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class WorkstationOperatingComponentAccount(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
company: DF.Link
expense_account: DF.Link | None
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/manufacturing/doctype/workstation_operating_component_account/workstation_operating_component_account.py",
"license": "GNU General Public License v3.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/patches/v15_0/update_fieldname_in_accounting_dimension_filter.py | import frappe
from frappe.query_builder import DocType
def execute():
default_accounting_dimension()
ADF = DocType("Accounting Dimension Filter")
AD = DocType("Accounting Dimension")
accounting_dimension_filter = (
frappe.qb.from_(ADF)
.join(AD)
.on(AD.document_type == ADF.accounting_dimension)
.select(ADF.name, AD.fieldname, ADF.accounting_dimension)
).run(as_dict=True)
for doc in accounting_dimension_filter:
value = doc.fieldname or frappe.scrub(doc.accounting_dimension)
frappe.db.set_value(
"Accounting Dimension Filter",
doc.name,
"fieldname",
value,
update_modified=False,
)
def default_accounting_dimension():
ADF = DocType("Accounting Dimension Filter")
for dim in ("Cost Center", "Project"):
(
frappe.qb.update(ADF)
.set(ADF.fieldname, frappe.scrub(dim))
.where(ADF.accounting_dimension == dim)
.run()
)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/update_fieldname_in_accounting_dimension_filter.py",
"license": "GNU General Public License v3.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v16_0/make_workstation_operating_components.py | import frappe
from frappe import _
def get_operating_cost_account(company):
company_details = frappe.db.get_value(
"Company", company, ["default_operating_cost_account", "default_expense_account"], as_dict=True
)
return company_details.get("default_operating_cost_account") or company_details.get(
"default_expense_account"
)
def execute():
components = [
"Electricity",
"Consumables",
"Rent",
"Wages",
]
companies = frappe.get_all("Company", filters={"is_group": 0}, pluck="name")
for component in components:
component = _(component)
if not frappe.db.exists("Workstation Operating Component", component):
doc = frappe.new_doc("Workstation Operating Component")
doc.component_name = component
for company in companies:
operating_cost_account = get_operating_cost_account(company)
doc.append("accounts", {"company": company, "expense_account": operating_cost_account})
doc.insert()
workstations = frappe.get_all("Workstation", filters={"hour_rate": (">", 0.0)}, pluck="name") or []
workstation_types = (
frappe.get_all("Workstation Type", filters={"hour_rate": (">", 0.0)}, pluck="name") or []
)
if not workstations and not workstation_types:
return
components_map = {
"hour_rate_electricity": _("Electricity"),
"hour_rate_consumable": _("Consumables"),
"hour_rate_rent": _("Rent"),
"hour_rate_labour": _("Wages"),
}
for workstation in workstations:
doc = frappe.get_doc("Workstation", workstation)
for field, component in components_map.items():
if doc.get(field):
doc.append(
"workstation_costs",
{
"operating_component": component,
"operating_cost": doc.get(field),
},
)
doc.save()
for workstation_type in workstation_types:
doc = frappe.get_doc("Workstation Type", workstation_type)
for field, component in components_map.items():
if doc.get(field):
doc.append(
"workstation_costs",
{
"operating_component": component,
"operating_cost": doc.get(field),
},
)
doc.save()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v16_0/make_workstation_operating_components.py",
"license": "GNU General Public License v3.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/update_uae_zero_rated_fetch.py | import frappe
from erpnext.regional.united_arab_emirates.setup import make_custom_fields
def execute():
if not frappe.db.get_value("Company", {"country": "United Arab Emirates"}):
return
make_custom_fields()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/update_uae_zero_rated_fetch.py",
"license": "GNU General Public License v3.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v16_0/update_serial_no_reference_name.py | import frappe
def execute():
# Update the reference_name, reference_doctype fields for Serial No where it is null
if not frappe.db.has_column("Serial and Batch Bundle", "posting_date"):
return
sabb = frappe.qb.DocType("Serial and Batch Bundle")
sabb_entry = frappe.qb.DocType("Serial and Batch Entry")
serial_no = frappe.qb.DocType("Serial No").as_("sn")
query = (
frappe.qb.update(serial_no)
.join(sabb_entry)
.on(sabb_entry.serial_no == serial_no.name)
.join(sabb)
.on(sabb.name == sabb_entry.parent)
.set(serial_no.reference_name, serial_no.purchase_document_no)
.set(serial_no.reference_doctype, sabb.voucher_type)
.set(serial_no.posting_date, sabb.posting_date)
.where(
(sabb.voucher_no == serial_no.purchase_document_no)
& (sabb.is_cancelled == 0)
& (sabb_entry.docstatus == 1)
)
)
query.run()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v16_0/update_serial_no_reference_name.py",
"license": "GNU General Public License v3.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/stock/report/serial_no_and_batch_traceability/serial_no_and_batch_traceability.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.query_builder import Case
def execute(filters: dict | None = None):
report = ReportData(filters)
report.validate_filters()
data = report.get_data()
has_serial_no, has_batch_no = check_has_serial_no_in_data(data)
columns = report.get_columns(has_serial_no, has_batch_no)
return columns, data
def check_has_serial_no_in_data(data):
has_serial_no = False
has_batch_no = False
for row in data:
if row.get("serial_no"):
has_serial_no = True
if row.get("batch_no"):
has_batch_no = True
if has_serial_no and has_batch_no:
break
return has_serial_no, has_batch_no
class ReportData:
def __init__(self, filters):
self.filters = filters
self.doctype_name = self.get_doctype()
def validate_filters(self):
if not self.filters.item_code and not self.filters.batches and not self.filters.serial_nos:
frappe.throw(
_("Please select at least one filter: Item Code, Batch, or Serial No."),
title=_("Missing Filters"),
)
def get_data(self):
result_data = []
if self.filters.get("traceability_direction") in ["Backward", "Both"]:
data = self.get_serial_no_batches()
source_data = self.prepare_source_data(data)
# Prepare source data with raw materials
for key in source_data:
sabb_data = source_data[key]
if sabb_data.reference_doctype != "Stock Entry":
continue
self.set_backward_data(sabb_data)
# Source data has all the details including raw materials
self.parse_batch_details(source_data, result_data, "Backward")
if self.filters.get("traceability_direction") in ["Forward", "Both"]:
data = self.get_serial_no_batches()
batch_details = frappe._dict({})
for row in data:
value = row.serial_no or row.batch_no
self.set_forward_data(value, batch_details)
self.parse_batch_details(batch_details, result_data, "Forward")
return result_data
def parse_batch_details(self, sabb_data_details, data, direction, indent=0):
for key in sabb_data_details:
sabb = sabb_data_details[key]
row = {
"item_code": sabb.item_code,
"batch_no": sabb.batch_no,
"serial_no": sabb.serial_no,
"warehouse": sabb.warehouse,
"qty": sabb.qty,
"reference_doctype": sabb.reference_doctype,
"reference_name": sabb.reference_name,
"item_name": sabb.item_name,
"posting_datetime": sabb.posting_datetime,
"indent": indent,
"direction": direction,
"batch_expiry_date": sabb.get("batch_expiry_date"),
"warranty_expiry_date": sabb.get("warranty_expiry_date"),
"amc_expiry_date": sabb.get("amc_expiry_date"),
}
if data and indent == 0:
data.append({})
if direction == "Forward" and row["qty"] > 0:
row["direction"] = "Backward"
if sabb.reference_doctype == "Purchase Receipt":
row["supplier"] = frappe.db.get_value(
"Purchase Receipt",
sabb.reference_name,
"supplier",
)
elif sabb.reference_doctype == "Stock Entry":
row["work_order"] = frappe.db.get_value(
"Stock Entry",
sabb.reference_name,
"work_order",
)
elif sabb.reference_doctype == "Delivery Note":
row["customer"] = frappe.db.get_value(
"Delivery Note",
sabb.reference_name,
"customer",
)
data.append(row)
raw_materials = sabb.get("raw_materials")
if raw_materials:
self.parse_batch_details(raw_materials, data, direction, indent + 1)
return data
def prepare_source_data(self, data):
source_data = frappe._dict({})
for row in data:
key = (row.item_code, row.reference_name)
value = row.serial_no or row.batch_no
if value:
key = (row.item_code, row.reference_name, value)
sabb_details = self.get_data_from_sabb(row)
row.update(sabb_details)
if key not in source_data:
row["raw_materials"] = frappe._dict({})
source_data[key] = row
return source_data
def get_data_from_sabb(self, row):
sabb = frappe.qb.DocType("Serial and Batch Bundle")
sabb_entry = frappe.qb.DocType("Serial and Batch Entry")
query = (
frappe.qb.from_(sabb)
.inner_join(sabb_entry)
.on(sabb.name == sabb_entry.parent)
.select(
sabb_entry.qty,
sabb_entry.warehouse,
sabb_entry.posting_datetime,
)
.where(
(sabb.voucher_type == row.reference_doctype)
& (sabb.voucher_no == row.reference_name)
& (sabb.is_cancelled == 0)
& (sabb_entry.docstatus == 1)
)
)
if row.batch_no:
query = query.where(sabb_entry.batch_no == row.batch_no)
else:
query = query.where(sabb_entry.serial_no == row.serial_no)
results = query.run(as_dict=True)
return results[0] if results else {}
def set_backward_data(self, sabb_data, qty=None):
if qty:
sabb_data.qty = qty
if "raw_materials" not in sabb_data:
sabb_data.raw_materials = frappe._dict({})
materials = self.get_materials(sabb_data)
for material in materials:
# Recursive: batch has sub-components
if material.serial_no or material.batch_no:
key = (material.item_code, material.reference_name, material.name)
value = material.serial_no or material.batch_no
if key not in sabb_data.raw_materials:
details = self.get_serial_no_batches(value)
if not details:
inward_data = self.get_sabb_entries(value, "Inward")
if inward_data:
details = inward_data[-1]
if details:
details.update(self.get_data_from_sabb(details))
sabb_data.raw_materials[key] = details
if sabb_data.raw_materials.get(key):
self.set_backward_data(sabb_data.raw_materials[key], material.qty)
else:
sub_key = (material.item_code, material.name)
if sub_key not in sabb_data.raw_materials:
sabb_data.raw_materials[sub_key] = frappe._dict(
{
"item_code": material.item_code,
"item_name": material.item_name,
"qty": material.qty or material.quantity,
"warehouse": material.warehouse,
}
)
return sabb_data
def get_serial_no_batches(self, name=None):
batches = self.filters.get("batches", [])
serial_nos = self.filters.get("serial_nos", [])
doctype = frappe.qb.DocType(self.doctype_name)
query = frappe.qb.from_(doctype).select(
doctype.reference_doctype,
doctype.reference_name,
doctype.item_name,
)
if self.doctype_name == "Batch":
query = query.select(
doctype.item.as_("item_code"),
doctype.name.as_("batch_no"),
doctype.expiry_date.as_("batch_expiry_date"),
)
else:
query = query.select(
doctype.item_code,
doctype.name.as_("serial_no"),
doctype.warranty_expiry_date,
doctype.amc_expiry_date,
)
if name:
query = query.where(doctype.name == name)
data = query.run(as_dict=True)
return data[0] if data else {}
if batches:
query = query.where(doctype.name.isin(batches))
elif serial_nos:
query = query.where(doctype.name.isin(serial_nos))
if self.filters.get("item_code"):
if self.doctype_name == "Serial No":
query = query.where(doctype.item_code == self.filters.item_code)
else:
query = query.where(doctype.item == self.filters.item_code)
return query.run(as_dict=True)
def get_doctype(self):
if self.filters.item_code:
item_details = frappe.get_cached_value(
"Item",
self.filters.item_code,
["has_batch_no", "has_serial_no"],
as_dict=True,
)
if item_details.has_serial_no:
return "Serial No"
elif item_details.has_batch_no:
return "Batch"
elif self.filters.get("serial_nos"):
return "Serial No"
return "Batch"
def get_materials(self, sabb_data):
stock_entry = frappe.qb.DocType("Stock Entry")
stock_entry_detail = frappe.qb.DocType("Stock Entry Detail")
sabb_entry = frappe.qb.DocType("Serial and Batch Entry")
query = (
frappe.qb.from_(stock_entry)
.inner_join(stock_entry_detail)
.on(stock_entry.name == stock_entry_detail.parent)
.left_join(sabb_entry)
.on(
(stock_entry_detail.serial_and_batch_bundle == sabb_entry.parent)
& (sabb_entry.docstatus == 1)
)
.select(
stock_entry_detail.s_warehouse.as_("warehouse"),
stock_entry_detail.item_code,
stock_entry_detail.name,
stock_entry_detail.item_name,
stock_entry_detail.parenttype.as_("reference_doctype"),
stock_entry.name.as_("reference_name"),
(
(
stock_entry_detail.qty
/ Case()
.when(stock_entry.fg_completed_qty > 0, stock_entry.fg_completed_qty)
.else_(sabb_data.qty)
)
* sabb_data.qty
).as_("qty"),
sabb_entry.batch_no,
sabb_entry.serial_no,
sabb_entry.qty.as_("quantity"),
)
.where(
(stock_entry.docstatus == 1)
& (stock_entry.purpose.isin(["Manufacture", "Repack"]))
& (stock_entry.name == sabb_data.reference_name)
& (stock_entry_detail.s_warehouse.isnotnull())
)
)
return query.run(as_dict=True)
def set_forward_data(self, value, sabb_data):
outward_entries = self.get_sabb_entries(value)
for row in outward_entries:
if row.reference_doctype == "Stock Entry":
self.process_manufacture_or_repack_entry(row, sabb_data)
else:
self.add_direct_outward_entry(row, sabb_data)
def add_direct_outward_entry(self, row, batch_details):
key = (row.item_code, row.reference_name, row.serial_no, row.batch_no)
if key not in batch_details:
row["indent"] = 0
batch_details[key] = row
def get_sabb_entries(self, value, type_of_transaction=None):
if not type_of_transaction:
type_of_transaction = "Outward"
SABB = frappe.qb.DocType("Serial and Batch Bundle")
SABE = frappe.qb.DocType("Serial and Batch Entry")
query = (
frappe.qb.from_(SABB)
.inner_join(SABE)
.on(SABB.name == SABE.parent)
.select(
SABB.voucher_type.as_("reference_doctype"),
SABB.voucher_no.as_("reference_name"),
SABE.batch_no,
SABE.serial_no,
SABE.qty,
SABB.item_code,
SABB.item_name,
SABB.posting_datetime,
SABB.warehouse,
)
.where(
(SABB.is_cancelled == 0)
& (SABE.docstatus == 1)
& (SABB.type_of_transaction == type_of_transaction)
)
.orderby(SABB.posting_datetime)
)
query = query.where((SABE.serial_no == value) | (SABE.batch_no == value))
return query.run(as_dict=True)
def process_manufacture_or_repack_entry(self, row, batch_details):
ste = frappe.db.get_value("Stock Entry", row.reference_name, ["purpose", "work_order"], as_dict=True)
if ste and ste.purpose in ["Manufacture", "Repack"]:
fg_item = self.get_finished_item_from_stock_entry(row.reference_name)
if not fg_item:
return
key = (fg_item.item_code, row.reference_name)
if key not in batch_details:
serial_no, batch_no = self.get_serial_batch_no(fg_item.serial_and_batch_bundle)
fg_item.update(
{
"work_order": ste.work_order,
"posting_datetime": row.posting_datetime,
"serial_no": serial_no,
"batch_no": batch_no,
"indent": 0,
"warehouse": fg_item.warehouse,
"raw_materials": frappe._dict(
{(row.item_code, row.reference_name, row.serial_no, row.batch_no): row}
),
}
)
batch_details[key] = fg_item
else:
batch_details[key].raw_materials[
(row.item_code, row.reference_name, row.serial_no, row.batch_no)
] = row
def get_finished_item_from_stock_entry(self, reference_name):
return frappe.db.get_value(
"Stock Entry Detail",
{"parent": reference_name, "is_finished_item": 1},
[
"item_code",
"item_name",
"serial_and_batch_bundle",
"qty",
"parenttype as reference_doctype",
"parent as reference_name",
"t_warehouse as warehouse",
],
as_dict=True,
)
def get_serial_batch_no(self, serial_and_batch_bundle):
sabb_details = frappe.db.get_value(
"Serial and Batch Entry",
{"parent": serial_and_batch_bundle},
["batch_no", "serial_no"],
as_dict=True,
)
return (sabb_details.serial_no, sabb_details.batch_no) if sabb_details else (None, None)
def get_columns(self, has_serial_no=None, has_batch_no=None):
columns = [
{
"fieldname": "item_code",
"label": _("Item Code"),
"fieldtype": "Link",
"options": "Item",
"width": 180,
},
{
"fieldname": "item_name",
"label": _("Item Name"),
"fieldtype": "Data",
"width": 120,
},
]
if has_serial_no:
columns.append(
{
"fieldname": "serial_no",
"label": _("Serial No"),
"fieldtype": "Link",
"options": "Serial No",
"width": 120,
}
)
if has_batch_no:
columns.extend(
[
{
"fieldname": "batch_no",
"label": _("Batch No"),
"fieldtype": "Link",
"options": "Batch",
"width": 120,
},
{
"fieldname": "batch_expiry_date",
"label": _("Batch Expiry Date"),
"fieldtype": "Date",
"width": 150,
},
]
)
columns.extend(
[
{
"fieldname": "qty",
"label": _("Quantity"),
"fieldtype": "Float",
"width": 90,
},
{
"fieldname": "reference_doctype",
"label": _("Voucher Type"),
"fieldtype": "Data",
"width": 130,
},
{
"fieldname": "reference_name",
"label": _("Source Document No"),
"fieldtype": "Dynamic Link",
"options": "reference_doctype",
"width": 200,
},
{
"fieldname": "warehouse",
"label": _("Warehouse"),
"fieldtype": "Link",
"options": "Warehouse",
"width": 120,
},
{
"fieldname": "posting_datetime",
"label": _("Posting Datetime"),
"fieldtype": "Datetime",
"width": 120,
},
{
"fieldname": "work_order",
"label": _("Work Order"),
"fieldtype": "Link",
"options": "Work Order",
"width": 160,
},
]
)
if self.filters.get("traceability_direction") == "Backward":
columns.append(
{
"fieldname": "supplier",
"label": _("Supplier"),
"fieldtype": "Link",
"options": "Supplier",
"width": 150,
}
)
else:
columns.append(
{
"fieldname": "customer",
"label": _("Customer"),
"fieldtype": "Link",
"options": "Customer",
"width": 150,
}
)
if has_serial_no:
columns.extend(
[
{
"fieldname": "warranty_expiry_date",
"label": _("Warranty Expiry (Serial)"),
"fieldtype": "Date",
"width": 200,
},
{
"fieldname": "amc_expiry_date",
"label": _("AMC Expiry (Serial)"),
"fieldtype": "Date",
"width": 160,
},
]
)
return columns
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/stock/report/serial_no_and_batch_traceability/serial_no_and_batch_traceability.py",
"license": "GNU General Public License v3.0",
"lines": 480,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/patches/v15_0/add_company_payment_gateway_account.py | import frappe
def execute():
for gateway_account in frappe.get_list("Payment Gateway Account", fields=["name", "payment_account"]):
company = frappe.db.get_value("Account", gateway_account.payment_account, "company")
frappe.db.set_value("Payment Gateway Account", gateway_account.name, "company", company)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/add_company_payment_gateway_account.py",
"license": "GNU General Public License v3.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/repost_gl_entries_with_no_account_subcontracting.py | import frappe
def execute():
def cancel_incorrect_gl_entries(gl_entries):
table = frappe.qb.DocType("GL Entry")
frappe.qb.update(table).set(table.is_cancelled, 1).where(table.name.isin(gl_entries)).run()
def recreate_gl_entries(voucher_nos):
for doc in voucher_nos:
doc = frappe.get_doc("Subcontracting Receipt", doc)
for item in doc.supplied_items:
account, cost_center = frappe.db.get_values(
"Subcontracting Receipt Item", item.reference_name, ["expense_account", "cost_center"]
)[0]
if not item.expense_account:
item.db_set("expense_account", account)
if not item.cost_center:
item.db_set("cost_center", cost_center)
doc.make_gl_entries()
docs = frappe.get_all(
"GL Entry",
fields=["name", "voucher_no"],
filters={"voucher_type": "Subcontracting Receipt", "account": ["is", "not set"], "is_cancelled": 0},
)
if docs:
cancel_incorrect_gl_entries([d.name for d in docs])
recreate_gl_entries([d.voucher_no for d in docs])
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/repost_gl_entries_with_no_account_subcontracting.py",
"license": "GNU General Public License v3.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/update_payment_ledger_entries_against_advance_doctypes.py | import frappe
from erpnext.accounts.utils import get_advance_payment_doctypes
DOCTYPE = "Payment Ledger Entry"
def execute():
"""
Description:
Set against_voucher as entry for Payment Ledger Entry against advance vouchers.
"""
advance_payment_doctypes = get_advance_payment_doctypes()
if not advance_payment_doctypes:
return
ple = frappe.qb.DocType(DOCTYPE)
(
frappe.qb.update(ple)
.set(ple.against_voucher_type, ple.voucher_type)
.set(ple.against_voucher_no, ple.voucher_no)
.where(ple.against_voucher_type.isin(advance_payment_doctypes))
.run()
)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/update_payment_ledger_entries_against_advance_doctypes.py",
"license": "GNU General Public License v3.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/stock/doctype/landed_cost_vendor_invoice/landed_cost_vendor_invoice.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class LandedCostVendorInvoice(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
amount: DF.Currency
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
vendor_invoice: DF.Link | None
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/stock/doctype/landed_cost_vendor_invoice/landed_cost_vendor_invoice.py",
"license": "GNU General Public License v3.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/stock/report/landed_cost_report/landed_cost_report.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
def execute(filters: dict | None = None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns() -> list[dict]:
return [
{
"label": _("Landed Cost Id"),
"fieldname": "name",
"fieldtype": "Link",
"options": "Landed Cost Voucher",
},
{
"label": _("Total Landed Cost"),
"fieldname": "landed_cost",
"fieldtype": "Currency",
},
{
"label": _("Purchase Voucher Type"),
"fieldname": "voucher_type",
"fieldtype": "Data",
"width": 200,
},
{
"label": _("Purchase Voucher No"),
"fieldname": "voucher_no",
"fieldtype": "Dynamic Link",
"options": "voucher_type",
"width": 220,
},
{
"label": _("Vendor Invoice"),
"fieldname": "vendor_invoice",
"fieldtype": "Link",
"options": "Purchase Invoice",
"width": 200,
},
]
def get_data(filters) -> list[list]:
landed_cost_vouchers = get_landed_cost_vouchers(filters) or {}
landed_vouchers = list(landed_cost_vouchers.keys())
vendor_invoices = {}
if landed_vouchers:
vendor_invoices = get_vendor_invoices(landed_vouchers)
data = []
print(vendor_invoices)
for name, vouchers in landed_cost_vouchers.items():
res = {
"name": name,
}
last_index = 0
vendor_invoice_list = vendor_invoices.get(name, [])
for i, d in enumerate(vouchers):
if i == 0:
res.update(
{
"landed_cost": d.landed_cost,
"voucher_type": d.voucher_type,
"voucher_no": d.voucher_no,
}
)
else:
res = {
"voucher_type": d.voucher_type,
"voucher_no": d.voucher_no,
}
if len(vendor_invoice_list) > i:
res["vendor_invoice"] = vendor_invoice_list[i]
data.append(res)
last_index = i
if vendor_invoice_list and len(vendor_invoice_list) > len(vouchers):
for row in vendor_invoice_list[last_index + 1 :]:
print(row)
data.append({"vendor_invoice": row})
return data
def get_landed_cost_vouchers(filters):
lcv = frappe.qb.DocType("Landed Cost Voucher")
lcv_voucher = frappe.qb.DocType("Landed Cost Purchase Receipt")
query = (
frappe.qb.from_(lcv)
.inner_join(lcv_voucher)
.on(lcv.name == lcv_voucher.parent)
.select(
lcv.name,
lcv.total_taxes_and_charges.as_("landed_cost"),
lcv_voucher.receipt_document_type.as_("voucher_type"),
lcv_voucher.receipt_document.as_("voucher_no"),
)
.where((lcv.docstatus == 1) & (lcv.company == filters.company))
)
if filters.from_date and filters.to_date:
query = query.where(lcv.posting_date.between(filters.from_date, filters.to_date))
if filters.raw_material_voucher_type:
query = query.where(lcv_voucher.receipt_document_type == filters.raw_material_voucher_type)
if filters.raw_material_voucher_no:
query = query.where(lcv_voucher.receipt_document == filters.raw_material_voucher_no)
data = query.run(as_dict=True) or []
result = {}
for row in data:
result.setdefault((row.name), []).append(row)
return result
def get_vendor_invoices(landed_vouchers):
doctype = frappe.qb.DocType("Landed Cost Vendor Invoice")
query = (
frappe.qb.from_(doctype)
.select(
doctype.parent,
doctype.vendor_invoice,
)
.where((doctype.docstatus == 1) & (doctype.parent.isin(landed_vouchers)))
.orderby(
doctype.idx,
)
)
data = query.run(as_dict=True) or []
result = {}
for row in data:
result.setdefault(row.parent, []).append(row.vendor_invoice)
return result
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/stock/report/landed_cost_report/landed_cost_report.py",
"license": "GNU General Public License v3.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/patches/v15_0/patch_missing_buying_price_list_in_material_request.py | import frappe
import frappe.defaults
def execute():
if frappe.db.has_column("Material Request", "buying_price_list") and (
default_buying_price_list := frappe.defaults.get_defaults().buying_price_list
):
docs = frappe.get_all(
"Material Request", filters={"buying_price_list": ["is", "not set"], "docstatus": 1}, pluck="name"
)
frappe.db.auto_commit_on_many_writes = 1
try:
for doc in docs:
frappe.db.set_value("Material Request", doc, "buying_price_list", default_buying_price_list)
finally:
frappe.db.auto_commit_on_many_writes = 0
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/patch_missing_buying_price_list_in_material_request.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/remove_sales_partner_from_consolidated_sales_invoice.py | import frappe
def execute():
SalesInvoice = frappe.qb.DocType("Sales Invoice")
query = (
frappe.qb.update(SalesInvoice)
.set(SalesInvoice.sales_partner, "")
.set(SalesInvoice.commission_rate, 0)
.set(SalesInvoice.total_commission, 0)
.where(SalesInvoice.is_consolidated == 1)
)
# For develop/version-16
if frappe.db.has_column("Sales Invoice", "is_created_using_pos"):
query = query.where(SalesInvoice.is_created_using_pos == 0)
query.run()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/remove_sales_partner_from_consolidated_sales_invoice.py",
"license": "GNU General Public License v3.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/rename_price_list_to_buying_price_list.py | import frappe
from frappe.model.utils.rename_field import rename_field
def execute():
if frappe.db.has_column("Material Request", "price_list"):
rename_field(
"Material Request",
"price_list",
"buying_price_list",
)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/rename_price_list_to_buying_price_list.py",
"license": "GNU General Public License v3.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/set_company_on_pos_inv_merge_log.py | import frappe
def execute():
pos_invoice_merge_logs = frappe.db.get_all(
"POS Invoice Merge Log", {"docstatus": 1}, ["name", "pos_closing_entry"]
)
frappe.db.auto_commit_on_many_writes = 1
for log in pos_invoice_merge_logs:
if log.pos_closing_entry and frappe.db.exists("POS Closing Entry", log.pos_closing_entry):
company = frappe.db.get_value("POS Closing Entry", log.pos_closing_entry, "company")
frappe.db.set_value("POS Invoice Merge Log", log.name, "company", company)
frappe.db.auto_commit_on_many_writes = 0
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/set_company_on_pos_inv_merge_log.py",
"license": "GNU General Public License v3.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/set_status_cancelled_on_cancelled_pos_opening_entry_and_pos_closing_entry.py | import frappe
from frappe.query_builder import DocType
def execute():
POSOpeningEntry = DocType("POS Opening Entry")
POSClosingEntry = DocType("POS Closing Entry")
frappe.qb.update(POSOpeningEntry).set(POSOpeningEntry.status, "Cancelled").where(
POSOpeningEntry.docstatus == 2
).run()
frappe.qb.update(POSClosingEntry).set(POSClosingEntry.status, "Cancelled").where(
POSClosingEntry.docstatus == 2
).run()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/set_status_cancelled_on_cancelled_pos_opening_entry_and_pos_closing_entry.py",
"license": "GNU General Public License v3.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/accounts/doctype/pegged_currencies/pegged_currencies.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class PeggedCurrencies(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
from erpnext.accounts.doctype.pegged_currencies.pegged_currencies import PeggedCurrencies
pegged_currency_item: DF.Table[PeggedCurrencies]
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/accounts/doctype/pegged_currencies/pegged_currencies.py",
"license": "GNU General Public License v3.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/accounts/doctype/pegged_currencies/test_pegged_currencies.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
from frappe.tests import IntegrationTestCase, UnitTestCase
# On IntegrationTestCase, the doctype test records and all
# link-field test record dependencies are recursively loaded
# Use these module variables to add/remove to/from that list
EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
class UnitTestPeggedCurrencies(UnitTestCase):
"""
Unit tests for PeggedCurrencies.
Use this class for testing individual functions and methods.
"""
pass
class IntegrationTestPeggedCurrencies(IntegrationTestCase):
"""
Integration tests for PeggedCurrencies.
Use this class for testing interactions between multiple components.
"""
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/accounts/doctype/pegged_currencies/test_pegged_currencies.py",
"license": "GNU General Public License v3.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
frappe/erpnext:erpnext/accounts/doctype/pegged_currency_details/pegged_currency_details.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class PeggedCurrencyDetails(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
pegged_against: DF.Link | None
pegged_exchange_rate: DF.Data | None
source_currency: DF.Link | None
# end: auto-generated types
pass
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/accounts/doctype/pegged_currency_details/pegged_currency_details.py",
"license": "GNU General Public License v3.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/patches/v15_0/update_pick_list_fields.py | import frappe
from frappe.query_builder.functions import IfNull
def execute():
update_delivery_note()
update_pick_list_items()
def update_delivery_note():
DN = frappe.qb.DocType("Delivery Note")
DNI = frappe.qb.DocType("Delivery Note Item")
frappe.qb.update(DNI).join(DN).on(DN.name == DNI.parent).set(DNI.against_pick_list, DN.pick_list).where(
IfNull(DN.pick_list, "") != ""
).run()
def update_pick_list_items():
PL = frappe.qb.DocType("Pick List")
PLI = frappe.qb.DocType("Pick List Item")
pick_lists = frappe.qb.from_(PL).select(PL.name).where(PL.status == "Completed").run(pluck="name")
if not pick_lists:
return
frappe.qb.update(PLI).set(PLI.delivered_qty, PLI.picked_qty).where(PLI.parent.isin(pick_lists)).run()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/update_pick_list_fields.py",
"license": "GNU General Public License v3.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/rename_pos_closing_entry_fields.py | import frappe
from frappe.model.utils.rename_field import rename_field
def execute():
rename_field("POS Closing Entry", "pos_transactions", "pos_invoices", validate=False)
if frappe.db.exists("DocType", "Sales Invoice Reference"):
rename_field("POS Closing Entry", "sales_invoice_transactions", "sales_invoices", validate=False)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/rename_pos_closing_entry_fields.py",
"license": "GNU General Public License v3.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/accounts/report/calculated_discount_mismatch/calculated_discount_mismatch.py | # Copyright (c) 2025, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import json
import frappe
from frappe import _
from frappe.query_builder import Order, Tuple
from frappe.utils.formatters import format_value
AFFECTED_DOCTYPES = frozenset(
(
"POS Invoice",
"Purchase Invoice",
"Sales Invoice",
"Purchase Order",
"Supplier Quotation",
"Quotation",
"Sales Order",
"Delivery Note",
"Purchase Receipt",
)
)
LAST_MODIFIED_DATE_THRESHOLD = "2025-05-30"
def execute(filters=None):
columns = get_columns()
data = get_data()
return columns, data
def get_columns():
return [
{
"fieldname": "doctype",
"label": _("Transaction Type"),
"fieldtype": "Link",
"options": "DocType",
"width": 120,
},
{
"fieldname": "docname",
"label": _("Transaction Name"),
"fieldtype": "Dynamic Link",
"options": "doctype",
"width": 150,
},
{
"fieldname": "actual_discount_percentage",
"label": _("Discount Percentage in Transaction"),
"fieldtype": "Percent",
"width": 180,
},
{
"fieldname": "actual_discount_amount",
"label": _("Discount Amount in Transaction"),
"fieldtype": "Currency",
"width": 180,
},
{
"fieldname": "suspected_discount_amount",
"label": _("Suspected Discount Amount"),
"fieldtype": "Currency",
"width": 180,
},
]
def get_data():
transactions_with_discount_percentage = {}
for doctype in AFFECTED_DOCTYPES:
transactions = get_transactions_with_discount_percentage(doctype)
for transaction in transactions:
transactions_with_discount_percentage[(doctype, transaction.name)] = transaction
if not transactions_with_discount_percentage:
return []
VERSION = frappe.qb.DocType("Version")
versions = (
frappe.qb.from_(VERSION)
.select(VERSION.ref_doctype, VERSION.docname, VERSION.data)
.where(VERSION.creation > LAST_MODIFIED_DATE_THRESHOLD)
.where(Tuple(VERSION.ref_doctype, VERSION.docname).isin(list(transactions_with_discount_percentage)))
.where(
VERSION.data.like('%"discount\\_amount"%')
| VERSION.data.like('%"additional\\_discount\\_percentage"%')
)
.orderby(VERSION.creation, order=Order.desc)
.run(as_dict=True)
)
if not versions:
return []
version_map = {}
for version in versions:
key = (version.ref_doctype, version.docname)
if key not in version_map:
version_map[key] = []
version_map[key].append(version.data)
data = []
discount_amount_field_map = {
doctype: frappe.get_meta(doctype).get_field("discount_amount") for doctype in AFFECTED_DOCTYPES
}
for doc, versions in version_map.items():
for version_data in versions:
if '"additional_discount_percentage"' in version_data:
# don't consider doc if additional_discount_percentage is changed in newest version
break
version_data = json.loads(version_data)
changed_values = version_data.get("changed")
if not changed_values:
continue
discount_values = next((row for row in changed_values if row[0] == "discount_amount"), None)
if not discount_values:
continue
old = discount_values[1]
new = discount_values[2]
doctype = doc[0]
doc_values = transactions_with_discount_percentage.get(doc)
formatted_discount_amount = format_value(
doc_values.discount_amount,
df=discount_amount_field_map[doctype],
currency=doc_values.currency,
)
if new != formatted_discount_amount:
# if the discount amount in the version is not equal to the current value, skip
break
data.append(
{
"doctype": doctype,
"docname": doc_values.name,
"actual_discount_percentage": doc_values.additional_discount_percentage,
"actual_discount_amount": new,
"suspected_discount_amount": old,
}
)
break
return data
def get_transactions_with_discount_percentage(doctype):
transactions = frappe.get_all(
doctype,
fields=[
"name",
"currency",
"additional_discount_percentage",
"discount_amount",
],
filters={
"docstatus": ["<", 2],
"additional_discount_percentage": [">", 0],
"discount_amount": ["!=", 0],
"modified": [">", LAST_MODIFIED_DATE_THRESHOLD],
},
)
return transactions
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/accounts/report/calculated_discount_mismatch/calculated_discount_mismatch.py",
"license": "GNU General Public License v3.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
frappe/erpnext:erpnext/patches/v15_0/unset_incorrect_additional_discount_percentage.py | import frappe
from frappe import scrub
from frappe.model.meta import get_field_precision
from frappe.utils import flt
from semantic_version import Version
from erpnext.accounts.report.calculated_discount_mismatch.calculated_discount_mismatch import (
AFFECTED_DOCTYPES,
LAST_MODIFIED_DATE_THRESHOLD,
)
def execute():
# run this patch only if erpnext version before update is v15.64.0 or higher
if not should_run_patch():
return
for doctype in AFFECTED_DOCTYPES:
meta = frappe.get_meta(doctype)
filters = {
"modified": [">", LAST_MODIFIED_DATE_THRESHOLD],
"additional_discount_percentage": [">", 0],
"discount_amount": ["!=", 0],
}
# can't reverse calculate grand_total if shipping rule is set
if meta.has_field("shipping_rule"):
filters["shipping_rule"] = ["is", "not set"]
documents = frappe.get_all(
doctype,
fields=[
"name",
"additional_discount_percentage",
"discount_amount",
"apply_discount_on",
"grand_total",
"net_total",
],
filters=filters,
)
if not documents:
continue
precision = get_field_precision(frappe.get_meta(doctype).get_field("additional_discount_percentage"))
mismatched_documents = []
for doc in documents:
# we need grand_total before applying discount
doc.grand_total += doc.discount_amount
discount_applied_on = scrub(doc.apply_discount_on)
calculated_discount_amount = flt(
doc.additional_discount_percentage * doc.get(discount_applied_on) / 100,
precision,
)
# if difference is more than 0.02 (based on precision), unset the additional discount percentage
if abs(calculated_discount_amount - doc.discount_amount) > 2 / (10**precision):
mismatched_documents.append(doc.name)
if mismatched_documents:
# changing the discount percentage has no accounting effect
# so we can safely set it to 0 in the database
frappe.db.set_value(
doctype,
{"name": ["in", mismatched_documents]},
"additional_discount_percentage",
0,
update_modified=False,
)
def get_semantic_version(version):
try:
return Version(version)
except Exception:
pass
def should_run_patch():
installed_app = frappe.db.get_value(
"Installed Application",
{"app_name": "erpnext"},
["app_version", "git_branch"],
)
if not installed_app:
return True
version, git_branch = installed_app
semantic_version = get_semantic_version(version)
if not semantic_version:
return True
return not (
semantic_version.major < 15
or (git_branch == "version-15" and semantic_version.major == 15 and semantic_version.minor < 64)
)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/unset_incorrect_additional_discount_percentage.py",
"license": "GNU General Public License v3.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
frappe/erpnext:erpnext/patches/v15_0/drop_sle_indexes.py | import click
import frappe
def execute():
table = "tabStock Ledger Entry"
index_list = ["posting_datetime_creation_index", "item_warehouse", "batch_no_item_code_warehouse_index"]
for index in index_list:
if not frappe.db.has_index(table, index):
continue
try:
frappe.db.sql_ddl(f"ALTER TABLE `{table}` DROP INDEX `{index}`")
click.echo(f"✓ dropped {index} index from {table}")
except Exception:
frappe.log_error("Failed to drop index")
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/drop_sle_indexes.py",
"license": "GNU General Public License v3.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/set_cancelled_status_to_cancelled_pos_invoice.py | import frappe
from frappe.query_builder import DocType
def execute():
POSInvoice = DocType("POS Invoice")
frappe.qb.update(POSInvoice).set(POSInvoice.status, "Cancelled").where(POSInvoice.docstatus == 2).run()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/set_cancelled_status_to_cancelled_pos_invoice.py",
"license": "GNU General Public License v3.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v14_0/update_full_name_in_contract.py | import frappe
from frappe import qb
def execute():
con = qb.DocType("Contract")
for c in (
qb.from_(con)
.select(con.name, con.party_type, con.party_name)
.where(con.party_full_name.isnull())
.run(as_dict=True)
):
field = c.party_type.lower() + "_name"
if res := frappe.db.get_value(c.party_type, c.party_name, field):
frappe.db.set_value("Contract", c.name, "party_full_name", res)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v14_0/update_full_name_in_contract.py",
"license": "GNU General Public License v3.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/remove_agriculture_roles.py | import frappe
def execute():
if "agriculture" in frappe.get_installed_apps():
return
for role in ["Agriculture User", "Agriculture Manager"]:
assignments = frappe.get_all("Has Role", {"role": role}, pluck="name")
for assignment in assignments:
frappe.delete_doc("Has Role", assignment, ignore_missing=True, force=True)
frappe.delete_doc("Role", role, ignore_missing=True, force=True)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/remove_agriculture_roles.py",
"license": "GNU General Public License v3.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/rename_group_by_to_categorize_by_in_custom_reports.py | import json
import frappe
def execute():
custom_reports = frappe.get_all(
"Report",
filters={
"report_type": "Custom Report",
"reference_report": ["in", ["General Ledger", "Supplier Quotation Comparison"]],
},
fields=["name", "json"],
)
for report in custom_reports:
report_json = json.loads(report.json)
if "filters" in report_json and "group_by" in report_json["filters"]:
report_json["filters"]["categorize_by"] = (
report_json["filters"].pop("group_by").replace("Group", "Categorize")
)
frappe.db.set_value("Report", report.name, "json", json.dumps(report_json))
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/rename_group_by_to_categorize_by_in_custom_reports.py",
"license": "GNU General Public License v3.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v15_0/set_grand_total_to_default_mop.py | import frappe
def execute():
if frappe.db.has_column("POS Profile", "disable_grand_total_to_default_mop"):
POSProfile = frappe.qb.DocType("POS Profile")
frappe.qb.update(POSProfile).set(POSProfile.set_grand_total_to_default_mop, 1).where(
POSProfile.disable_grand_total_to_default_mop == 0
).run()
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v15_0/set_grand_total_to_default_mop.py",
"license": "GNU General Public License v3.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
frappe/erpnext:erpnext/patches/v14_0/set_update_price_list_based_on.py | import frappe
from frappe.utils import cint
def execute():
frappe.db.set_single_value(
"Stock Settings",
"update_price_list_based_on",
(
"Price List Rate"
if cint(frappe.db.get_single_value("Selling Settings", "editable_price_list_rate"))
else "Rate"
),
)
| {
"repo_id": "frappe/erpnext",
"file_path": "erpnext/patches/v14_0/set_update_price_list_based_on.py",
"license": "GNU General Public License v3.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
freqtrade/freqtrade:freqtrade/rpc/api_server/api_trading.py | import logging
from fastapi import APIRouter, Depends, Query
from fastapi.exceptions import HTTPException
from freqtrade.enums import TradingMode
from freqtrade.rpc import RPC
from freqtrade.rpc.api_server.api_schemas import (
Balances,
BlacklistPayload,
BlacklistResponse,
Count,
DailyWeeklyMonthly,
DeleteLockRequest,
DeleteTrade,
Entry,
Exit,
ForceEnterPayload,
ForceEnterResponse,
ForceExitPayload,
ListCustomData,
Locks,
LocksPayload,
MixTag,
OpenTradeSchema,
PairCandlesRequest,
PairHistory,
PerformanceEntry,
Profit,
ProfitAll,
ResultMsg,
Stats,
StatusMsg,
WhitelistResponse,
)
from freqtrade.rpc.api_server.deps import get_config, get_rpc
from freqtrade.rpc.rpc import RPCException
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/balance", response_model=Balances, tags=["Trading-info"])
def balance(rpc: RPC = Depends(get_rpc), config=Depends(get_config)):
"""Account Balances"""
return rpc._rpc_balance(
config["stake_currency"],
config.get("fiat_display_currency", ""),
)
@router.get("/count", response_model=Count, tags=["Trading-info"])
def count(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_count()
@router.get("/entries", response_model=list[Entry], tags=["Trading-info"])
def entries(pair: str | None = None, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_enter_tag_performance(pair)
@router.get("/exits", response_model=list[Exit], tags=["Trading-info"])
def exits(pair: str | None = None, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_exit_reason_performance(pair)
@router.get("/mix_tags", response_model=list[MixTag], tags=["Trading-info"])
def mix_tags(pair: str | None = None, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_mix_tag_performance(pair)
@router.get("/performance", response_model=list[PerformanceEntry], tags=["Trading-info"])
def performance(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_performance()
@router.get("/profit", response_model=Profit, tags=["Trading-info"])
def profit(rpc: RPC = Depends(get_rpc), config=Depends(get_config)):
return rpc._rpc_trade_statistics(config["stake_currency"], config.get("fiat_display_currency"))
@router.get("/profit_all", response_model=ProfitAll, tags=["Trading-info"])
def profit_all(rpc: RPC = Depends(get_rpc), config=Depends(get_config)):
response = {
"all": rpc._rpc_trade_statistics(
config["stake_currency"], config.get("fiat_display_currency")
),
}
if config.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT:
response["long"] = rpc._rpc_trade_statistics(
config["stake_currency"], config.get("fiat_display_currency"), direction="long"
)
response["short"] = rpc._rpc_trade_statistics(
config["stake_currency"], config.get("fiat_display_currency"), direction="short"
)
return response
@router.get("/stats", response_model=Stats, tags=["Trading-info"])
def stats(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_stats()
@router.get("/daily", response_model=DailyWeeklyMonthly, tags=["Trading-info"])
def daily(
timescale: int = Query(7, ge=1, description="Number of days to fetch data for"),
rpc: RPC = Depends(get_rpc),
config=Depends(get_config),
):
return rpc._rpc_timeunit_profit(
timescale, config["stake_currency"], config.get("fiat_display_currency", "")
)
@router.get("/weekly", response_model=DailyWeeklyMonthly, tags=["Trading-info"])
def weekly(
timescale: int = Query(4, ge=1, description="Number of weeks to fetch data for"),
rpc: RPC = Depends(get_rpc),
config=Depends(get_config),
):
return rpc._rpc_timeunit_profit(
timescale, config["stake_currency"], config.get("fiat_display_currency", ""), "weeks"
)
@router.get("/monthly", response_model=DailyWeeklyMonthly, tags=["Trading-info"])
def monthly(
timescale: int = Query(3, ge=1, description="Number of months to fetch data for"),
rpc: RPC = Depends(get_rpc),
config=Depends(get_config),
):
return rpc._rpc_timeunit_profit(
timescale, config["stake_currency"], config.get("fiat_display_currency", ""), "months"
)
@router.get("/status", response_model=list[OpenTradeSchema], tags=["Trading-info"])
def status(rpc: RPC = Depends(get_rpc)):
try:
return rpc._rpc_trade_status()
except RPCException:
return []
# Using the responsemodel here will cause a ~100% increase in response time (from 1s to 2s)
# on big databases. Correct response model: response_model=TradeResponse,
@router.get("/trades", tags=["Trading-info", "Trades"])
def trades(
limit: int = Query(500, ge=1, description="Maximum number of different trades to return data"),
offset: int = Query(0, ge=0, description="Number of trades to skip for pagination"),
order_by_id: bool = Query(
True, description="Sort trades by id (default: True). If False, sorts by latest timestamp"
),
rpc: RPC = Depends(get_rpc),
):
return rpc._rpc_trade_history(limit, offset=offset, order_by_id=order_by_id)
@router.get("/trade/{tradeid}", response_model=OpenTradeSchema, tags=["Trades"])
def trade(tradeid: int = 0, rpc: RPC = Depends(get_rpc)):
try:
return rpc._rpc_trade_status([tradeid])[0]
except (RPCException, KeyError):
raise HTTPException(status_code=404, detail="Trade not found.")
@router.delete("/trades/{tradeid}", response_model=DeleteTrade, tags=["Trades"])
def trades_delete(tradeid: int, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_delete(tradeid)
@router.delete("/trades/{tradeid}/open-order", response_model=OpenTradeSchema, tags=["Trades"])
def trade_cancel_open_order(tradeid: int, rpc: RPC = Depends(get_rpc)):
rpc._rpc_cancel_open_order(tradeid)
return rpc._rpc_trade_status([tradeid])[0]
@router.post("/trades/{tradeid}/reload", response_model=OpenTradeSchema, tags=["Trades"])
def trade_reload(tradeid: int, rpc: RPC = Depends(get_rpc)):
rpc._rpc_reload_trade_from_exchange(tradeid)
return rpc._rpc_trade_status([tradeid])[0]
@router.get("/trades/open/custom-data", response_model=list[ListCustomData], tags=["Trades"])
def list_open_trades_custom_data(
key: str | None = Query(None, description="Optional key to filter data"),
limit: int = Query(100, ge=1, description="Maximum number of different trades to return data"),
offset: int = Query(0, ge=0, description="Number of trades to skip for pagination"),
rpc: RPC = Depends(get_rpc),
):
"""
Fetch custom data for all open trades.
If a key is provided, it will be used to filter data accordingly.
Pagination is implemented via the `limit` and `offset` parameters.
"""
try:
return rpc._rpc_list_custom_data(key=key, limit=limit, offset=offset)
except RPCException as e:
raise HTTPException(status_code=404, detail=str(e))
@router.get("/trades/{trade_id}/custom-data", response_model=list[ListCustomData], tags=["Trades"])
def list_custom_data(trade_id: int, key: str | None = Query(None), rpc: RPC = Depends(get_rpc)):
"""
Fetch custom data for a specific trade.
If a key is provided, it will be used to filter data accordingly.
"""
try:
return rpc._rpc_list_custom_data(trade_id, key=key)
except RPCException as e:
raise HTTPException(status_code=404, detail=str(e))
# /forcebuy is deprecated with short addition. use /forceentry instead
@router.post("/forceenter", response_model=ForceEnterResponse, tags=["Trades"])
@router.post(
"/forcebuy",
response_model=ForceEnterResponse,
tags=["Trades"],
summary="(deprecated) Please use /forceenter instead",
)
def force_entry(payload: ForceEnterPayload, rpc: RPC = Depends(get_rpc)):
ordertype = payload.ordertype.value if payload.ordertype else None
trade = rpc._rpc_force_entry(
payload.pair,
payload.price,
order_side=payload.side,
order_type=ordertype,
stake_amount=payload.stakeamount,
enter_tag=payload.entry_tag or "force_entry",
leverage=payload.leverage,
)
if trade:
return ForceEnterResponse.model_validate(trade.to_json())
else:
return ForceEnterResponse.model_validate(
{"status": f"Error entering {payload.side} trade for pair {payload.pair}."}
)
# /forcesell is deprecated with short addition. use /forceexit instead
@router.post("/forceexit", response_model=ResultMsg, tags=["Trades"])
@router.post(
"/forcesell",
response_model=ResultMsg,
tags=["Trades"],
summary="(deprecated) Please use /forceexit instead",
)
def forceexit(payload: ForceExitPayload, rpc: RPC = Depends(get_rpc)):
ordertype = payload.ordertype.value if payload.ordertype else None
return rpc._rpc_force_exit(
str(payload.tradeid), ordertype, amount=payload.amount, price=payload.price
)
@router.get("/blacklist", response_model=BlacklistResponse, tags=["Trading-info", "Pairlist"])
def blacklist(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_blacklist()
@router.post("/blacklist", response_model=BlacklistResponse, tags=["Pairlist"])
def blacklist_post(payload: BlacklistPayload, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_blacklist(payload.blacklist)
@router.delete("/blacklist", response_model=BlacklistResponse, tags=["Pairlist"])
def blacklist_delete(pairs_to_delete: list[str] = Query([]), rpc: RPC = Depends(get_rpc)):
"""Provide a list of pairs to delete from the blacklist"""
return rpc._rpc_blacklist_delete(pairs_to_delete)
@router.get("/whitelist", response_model=WhitelistResponse, tags=["Trading-info", "Pairlist"])
def whitelist(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_whitelist()
@router.get("/locks", response_model=Locks, tags=["Trading-info", "Locks"])
def locks(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_locks()
@router.delete("/locks/{lockid}", response_model=Locks, tags=["Locks"])
def delete_lock(lockid: int, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_delete_lock(lockid=lockid)
@router.post("/locks/delete", response_model=Locks, tags=["Locks"])
def delete_lock_pair(payload: DeleteLockRequest, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_delete_lock(lockid=payload.lockid, pair=payload.pair)
@router.post("/locks", response_model=Locks, tags=["Locks"])
def add_locks(payload: list[LocksPayload], rpc: RPC = Depends(get_rpc)):
for lock in payload:
rpc._rpc_add_lock(lock.pair, lock.until, lock.reason, lock.side)
return rpc._rpc_locks()
@router.post("/start", response_model=StatusMsg, tags=["Bot-control"])
def start(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_start()
@router.post("/stop", response_model=StatusMsg, tags=["Bot-control"])
def stop(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_stop()
@router.post("/pause", response_model=StatusMsg, tags=["Bot-control"])
@router.post("/stopentry", response_model=StatusMsg, tags=["Bot-control"])
@router.post("/stopbuy", response_model=StatusMsg, tags=["Bot-control"])
def pause(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_pause()
@router.post("/reload_config", response_model=StatusMsg, tags=["Bot-control"])
def reload_config(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_reload_config()
@router.get("/pair_candles", response_model=PairHistory, tags=["Candle data"])
def pair_candles(pair: str, timeframe: str, limit: int | None = None, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_analysed_dataframe(pair, timeframe, limit, None)
@router.post("/pair_candles", response_model=PairHistory, tags=["Candle data"])
def pair_candles_filtered(payload: PairCandlesRequest, rpc: RPC = Depends(get_rpc)):
# Advanced pair_candles endpoint with column filtering
return rpc._rpc_analysed_dataframe(
payload.pair, payload.timeframe, payload.limit, payload.columns
)
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/rpc/api_server/api_trading.py",
"license": "GNU General Public License v3.0",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
freqtrade/freqtrade:freqtrade/rpc/api_server/api_webserver.py | import logging
from fastapi import APIRouter, Depends
from freqtrade.data.history.datahandlers import get_datahandler
from freqtrade.enums import CandleType, TradingMode
from freqtrade.rpc.api_server.api_schemas import (
AvailablePairs,
ExchangeListResponse,
FreqAIModelListResponse,
HyperoptLossListResponse,
StrategyListResponse,
)
from freqtrade.rpc.api_server.deps import get_config
logger = logging.getLogger(__name__)
# Private API, protected by authentication and webserver_mode dependency
router = APIRouter()
@router.get("/strategies", response_model=StrategyListResponse, tags=["Strategy"])
def list_strategies(config=Depends(get_config)):
from freqtrade.resolvers.strategy_resolver import StrategyResolver
strategies = StrategyResolver.search_all_objects(
config, False, config.get("recursive_strategy_search", False)
)
strategies = sorted(strategies, key=lambda x: x["name"])
return {"strategies": [x["name"] for x in strategies]}
@router.get("/exchanges", response_model=ExchangeListResponse, tags=[])
def list_exchanges(config=Depends(get_config)):
from freqtrade.exchange import list_available_exchanges
exchanges = list_available_exchanges(config)
return {
"exchanges": exchanges,
}
@router.get("/hyperoptloss", response_model=HyperoptLossListResponse, tags=["Hyperopt"])
def list_hyperoptloss(
config=Depends(get_config),
):
import textwrap
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
loss_functions = HyperOptLossResolver.search_all_objects(config, False)
loss_functions = sorted(loss_functions, key=lambda x: x["name"])
return {
"loss_functions": [
{
"name": x["name"],
"description": textwrap.dedent((x["class"].__doc__ or "").strip()),
}
for x in loss_functions
]
}
@router.get("/freqaimodels", response_model=FreqAIModelListResponse, tags=["FreqAI"])
def list_freqaimodels(config=Depends(get_config)):
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
models = FreqaiModelResolver.search_all_objects(config, False)
models = sorted(models, key=lambda x: x["name"])
return {"freqaimodels": [x["name"] for x in models]}
@router.get(
"/available_pairs", response_model=AvailablePairs, tags=["Candle data", "Download-data"]
)
def list_available_pairs(
timeframe: str | None = None,
stake_currency: str | None = None,
candletype: CandleType | None = None,
config=Depends(get_config),
):
dh = get_datahandler(config["datadir"], config.get("dataformat_ohlcv"))
trading_mode: TradingMode = config.get("trading_mode", TradingMode.SPOT)
pair_interval = dh.ohlcv_get_available_data(config["datadir"], trading_mode)
if timeframe:
pair_interval = [pair for pair in pair_interval if pair[1] == timeframe]
if stake_currency:
pair_interval = [pair for pair in pair_interval if pair[0].endswith(stake_currency)]
if candletype:
pair_interval = [pair for pair in pair_interval if pair[2] == candletype]
else:
candle_type = CandleType.get_default(trading_mode)
pair_interval = [pair for pair in pair_interval if pair[2] == candle_type]
pair_interval = sorted(pair_interval, key=lambda x: x[0])
pairs = list({x[0] for x in pair_interval})
pairs.sort()
result = {
"length": len(pairs),
"pairs": pairs,
"pair_interval": pair_interval,
}
return result
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/rpc/api_server/api_webserver.py",
"license": "GNU General Public License v3.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
freqtrade/freqtrade:freqtrade/system/set_mp_start_method.py | from multiprocessing import get_all_start_methods, get_start_method, set_start_method
def set_mp_start_method():
"""
Set multiprocessing start method to not be fork.
forkserver will become the default in 3.14 - and is deprecated in 3.13
"""
try:
sms = get_all_start_methods()
if "forkserver" in sms and get_start_method(True) is None:
set_start_method("forkserver")
except RuntimeError:
pass
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/system/set_mp_start_method.py",
"license": "GNU General Public License v3.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
freqtrade/freqtrade:freqtrade/util/ft_ttlcache.py | import time
from cachetools import TTLCache
class FtTTLCache(TTLCache):
"""
A TTLCache with a different default timer to allow for easier mocking in tests.
"""
def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None):
super().__init__(maxsize=maxsize, ttl=ttl, timer=timer, getsizeof=getsizeof)
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/util/ft_ttlcache.py",
"license": "GNU General Public License v3.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
freqtrade/freqtrade:freqtrade/util/singleton.py | from typing import Any
class SingletonMeta(type):
"""
A thread-safe implementation of Singleton.
Use as metaclass to create singleton classes.
"""
_instances: dict = {}
def __call__(cls, *args: Any, **kwargs: Any) -> Any:
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/util/singleton.py",
"license": "GNU General Public License v3.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
freqtrade/freqtrade:tests/test_pip_audit.py | """
Run pip audit to check for known security vulnerabilities in installed packages.
Original Idea and base for this implementation by Michael Kennedy's blog:
https://mkennedy.codes/posts/python-supply-chain-security-made-easy/
"""
import os
import subprocess
import sys
from pathlib import Path
import pytest
IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true"
# Skip this test in github actions - github issues a security warning on it's own.
# This is to detect local transient dependencies.
@pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Skip pip-audit in GitHub Actions")
def test_pip_audit_no_vulnerabilities():
"""
Run pip-audit to check for known security vulnerabilities.
This test will fail if any vulnerabilities are detected in the installed packages.
Note: CVE-2025-53000 (nbconvert Windows vulnerability) is ignored as it only affects
Windows platforms and is a known acceptable risk for this project.
"""
# Get the project root directory
project_root = Path(__file__).parent.parent
command = [
sys.executable,
"-m",
"pip_audit",
# "--format=json",
"--progress-spinner=off",
"--ignore-vuln",
"CVE-2025-53000",
"--skip-editable",
]
# Run pip-audit with JSON output for easier parsing
try:
result = subprocess.run(
command,
cwd=project_root,
capture_output=True,
text=True,
timeout=120, # 2 minute timeout
)
except subprocess.TimeoutExpired:
pytest.fail("pip-audit command timed out after 120 seconds")
except FileNotFoundError:
pytest.fail("pip-audit not installed or not accessible")
# Check if pip-audit found any vulnerabilities
if result.returncode != 0:
# pip-audit returns non-zero when vulnerabilities are found
error_output = result.stdout + "\n" + result.stderr
# Check if it's an actual vulnerability vs an error
if "vulnerabilities found" in error_output.lower() or '"dependencies"' in result.stdout:
pytest.fail(
f"pip-audit detected security vulnerabilities!\n\n"
f"Output:\n{result.stdout}\n\n"
f"Please review and update vulnerable packages.\n"
f"Run manually with: {' '.join(command)}"
)
else:
# Some other error occurred
pytest.fail(
f"pip-audit failed to run properly:\n\nReturn code: {result.returncode}\n"
f"Output: {error_output}\n"
)
# Success - no vulnerabilities found
assert result.returncode == 0, "pip-audit should return 0 when no vulnerabilities are found"
def test_pip_audit_runs_successfully():
"""
Verify that pip-audit can run successfully (even if vulnerabilities are found).
This is a smoke test to ensure pip-audit is properly installed and functional.
"""
try:
result = subprocess.run(
[sys.executable, "-m", "pip_audit", "--version"],
capture_output=True,
text=True,
timeout=10,
)
assert result.returncode == 0, f"pip-audit --version failed: {result.stderr}"
assert "pip-audit" in result.stdout.lower(), "pip-audit version output unexpected"
except FileNotFoundError:
pytest.fail("pip-audit not installed")
except subprocess.TimeoutExpired:
pytest.fail("pip-audit --version timed out")
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "tests/test_pip_audit.py",
"license": "GNU General Public License v3.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
freqtrade/freqtrade:freqtrade/exchange/coinex.py | import logging
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
logger = logging.getLogger(__name__)
class Coinex(Exchange):
"""
CoinEx exchange class. Contains adjustments needed for Freqtrade to work
with this exchange.
Please note that this exchange is not included in the list of exchanges
officially supported by the Freqtrade development team. So some features
may still not work as expected.
"""
_ft_has: FtHas = {
"l2_limit_range": [5, 10, 20, 50],
"tickers_have_bid_ask": False,
"tickers_have_quoteVolume": False,
}
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/exchange/coinex.py",
"license": "GNU General Public License v3.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
freqtrade/freqtrade:freqtrade/plugins/pairlist/DelistFilter.py | """
Delist pair list filter
"""
import logging
from datetime import UTC, datetime, timedelta
from freqtrade.exceptions import ConfigurationError
from freqtrade.exchange.exchange_types import Ticker
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter, SupportsBacktesting
from freqtrade.util import format_date
logger = logging.getLogger(__name__)
class DelistFilter(IPairList):
supports_backtesting = SupportsBacktesting.NO
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._max_days_from_now = self._pairlistconfig.get("max_days_from_now", 0)
if self._max_days_from_now < 0:
raise ConfigurationError("DelistFilter requires max_days_from_now to be >= 0")
if not self._exchange._ft_has["has_delisting"]:
raise ConfigurationError(
"DelistFilter doesn't support this exchange and trading mode combination.",
)
@property
def needstickers(self) -> bool:
"""
Boolean property defining if tickers are necessary.
If no Pairlist requires tickers, an empty Dict is passed
as tickers argument to filter_pairlist
"""
return False
def short_desc(self) -> str:
"""
Short whitelist method description - used for startup-messages
"""
return (
f"{self.name} - Filtering pairs that will be delisted"
+ (
f" in the next {self._max_days_from_now} days"
if self._max_days_from_now > 0
else ""
)
+ "."
)
@staticmethod
def description() -> str:
return "Filter pairs that will be delisted on exchange."
@staticmethod
def available_parameters() -> dict[str, PairlistParameter]:
return {
"max_days_from_now": {
"type": "number",
"default": 0,
"description": "Max days from now",
"help": (
"Remove pairs that will be delisted in the next X days. Set to 0 to remove all."
),
},
}
def _validate_pair(self, pair: str, ticker: Ticker | None) -> bool:
"""
Check if pair will be delisted.
:param pair: Pair that's currently validated
:param ticker: ticker dict as returned from ccxt.fetch_ticker
:return: True if the pair can stay, false if it should be removed
"""
delist_date = self._exchange.check_delisting_time(pair)
if delist_date is not None:
remove_pair = self._max_days_from_now == 0
if self._max_days_from_now > 0:
current_datetime = datetime.now(UTC)
max_delist_date = current_datetime + timedelta(days=self._max_days_from_now)
remove_pair = delist_date <= max_delist_date
if remove_pair:
self.log_once(
f"Removed {pair} from whitelist, because it will be delisted on "
f"{format_date(delist_date)}.",
logger.info,
)
return False
return True
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/plugins/pairlist/DelistFilter.py",
"license": "GNU General Public License v3.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
freqtrade/freqtrade:freqtrade/exchange/bitget.py | import logging
from datetime import datetime, timedelta
import ccxt
from freqtrade.constants import BuySell
from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, TradingMode
from freqtrade.exceptions import (
DDosProtection,
OperationalException,
RetryableOrderError,
TemporaryError,
)
from freqtrade.exchange import Exchange
from freqtrade.exchange.common import API_RETRY_COUNT, retrier
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.util import dt_from_ts, dt_now, dt_ts
logger = logging.getLogger(__name__)
class Bitget(Exchange):
"""Bitget exchange class.
Contains adjustments needed for Freqtrade to work with this exchange.
"""
_ft_has: FtHas = {
"stoploss_on_exchange": True,
"stop_price_param": "stopPrice",
"stop_price_prop": "stopPrice",
"stoploss_blocks_assets": False, # Stoploss orders do not block assets
"stoploss_order_types": {"limit": "limit", "market": "market"},
"stoploss_query_requires_stop_flag": True,
"ohlcv_candle_limit": 200, # 200 for historical candles, 1000 for recent ones.
"order_time_in_force": ["GTC", "FOK", "IOC", "PO"],
}
_ft_has_futures: FtHas = {
"funding_fee_candle_limit": 100,
"has_delisting": True,
}
_supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [
(TradingMode.SPOT, MarginMode.NONE),
(TradingMode.FUTURES, MarginMode.ISOLATED),
# (TradingMode.FUTURES, MarginMode.CROSS),
]
def ohlcv_candle_limit(
self, timeframe: str, candle_type: CandleType, since_ms: int | None = None
) -> int:
"""
Exchange ohlcv candle limit
bitget has the following behaviour:
* 1000 candles for up-to-date data
* 200 candles for historic data (prior to a certain date)
:param timeframe: Timeframe to check
:param candle_type: Candle-type
:param since_ms: Starting timestamp
:return: Candle limit as integer
"""
timeframe_map = self._api.options["fetchOHLCV"]["maxRecentDaysPerTimeframe"]
days = timeframe_map.get(timeframe, 30)
if candle_type in (CandleType.FUTURES, CandleType.SPOT, CandleType.MARK) and (
not since_ms or dt_ts(dt_now() - timedelta(days=days)) < since_ms
):
return 1000
return super().ohlcv_candle_limit(timeframe, candle_type, since_ms)
def _convert_stop_order(self, pair: str, order_id: str, order: CcxtOrder) -> CcxtOrder:
if order.get("status", "open") == "closed":
# Use orderID as cliendOrderId filter to fetch the regular followup order.
# Could be done with "fetch_order" - but clientOid as filter doesn't seem to work
# https://www.bitget.com/api-doc/spot/trade/Get-Order-Info
for method in (
self._api.fetch_canceled_and_closed_orders,
self._api.fetch_open_orders,
):
orders = method(pair)
orders_f = [order for order in orders if order["clientOrderId"] == order_id]
if orders_f:
order_reg = orders_f[0]
self._log_exchange_response("fetch_stoploss_order1", order_reg)
order_reg["id_stop"] = order_reg["id"]
order_reg["id"] = order_id
order_reg["type"] = "stoploss"
order_reg["status_stop"] = "triggered"
return order_reg
order = self._order_contracts_to_amount(order)
order["type"] = "stoploss"
return order
def _fetch_stop_order_fallback(self, order_id: str, pair: str) -> CcxtOrder:
params2 = {
"stop": True,
}
for method in (
self._api.fetch_open_orders,
self._api.fetch_canceled_and_closed_orders,
):
try:
orders = method(pair, params=params2)
orders_f = [order for order in orders if order["id"] == order_id]
if orders_f:
order = orders_f[0]
self._log_exchange_response("get_stop_order_fallback", order)
return self._convert_stop_order(pair, order_id, order)
except (ccxt.OrderNotFound, ccxt.InvalidOrder):
pass
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
raise TemporaryError(
f"Could not get order due to {e.__class__.__name__}. Message: {e}"
) from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
raise RetryableOrderError(f"StoplossOrder not found (pair: {pair} id: {order_id}).")
@retrier(retries=API_RETRY_COUNT)
def fetch_stoploss_order(
self, order_id: str, pair: str, params: dict | None = None
) -> CcxtOrder:
if self._config["dry_run"]:
return self.fetch_dry_run_order(order_id)
return self._fetch_stop_order_fallback(order_id, pair)
@retrier
def additional_exchange_init(self) -> None:
"""
Additional exchange initialization logic.
.api will be available at this point.
Must be overridden in child methods if required.
"""
try:
if not self._config["dry_run"]:
if self.trading_mode == TradingMode.FUTURES:
position_mode = self._api.set_position_mode(False)
self._log_exchange_response("set_position_mode", position_mode)
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
raise TemporaryError(
f"Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}"
) from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
if self.trading_mode != TradingMode.SPOT:
# Explicitly setting margin_mode is not necessary as marginMode can be set per order.
# self.set_margin_mode(pair, self.margin_mode, accept_fail)
self._set_leverage(leverage, pair, accept_fail)
def _get_params(
self,
side: BuySell,
ordertype: str,
leverage: float,
reduceOnly: bool,
time_in_force: str = "GTC",
) -> dict:
params = super()._get_params(
side=side,
ordertype=ordertype,
leverage=leverage,
reduceOnly=reduceOnly,
time_in_force=time_in_force,
)
if self.trading_mode == TradingMode.FUTURES and self.margin_mode:
params["marginMode"] = self.margin_mode.value.lower()
return params
def dry_run_liquidation_price(
self,
pair: str,
open_rate: float,
is_short: bool,
amount: float,
stake_amount: float,
leverage: float,
wallet_balance: float,
open_trades: list,
) -> float | None:
"""
Important: Must be fetching data from cached values as this is used by backtesting!
https://www.bitget.com/support/articles/12560603808759
MMR: Maintenance margin rate of the trading pair.
CoinMainIndexPrice: The index price for Coin-M futures. For USDT-M futures,
the index price is: 1.
TakerFeeRatio: The fee rate applied when placing taker orders.
Position direction: The current position direction of the trading pair.
1 indicates a long position, and -1 indicates a short position.
Formula:
Estimated liquidation price = [
position margin - position size x average entry price x position direction
] ÷ [position size x (MMR + TakerFeeRatio - position direction)]
:param pair: Pair to calculate liquidation price for
:param open_rate: Entry price of position
:param is_short: True if the trade is a short, false otherwise
:param amount: Absolute value of position size incl. leverage (in base currency)
:param stake_amount: Stake amount - Collateral in settle currency.
:param leverage: Leverage used for this position.
:param wallet_balance: Amount of margin_mode in the wallet being used to trade
Cross-Margin Mode: crossWalletBalance
Isolated-Margin Mode: isolatedWalletBalance
:param open_trades: List of other open trades in the same wallet
"""
market = self.markets[pair]
taker_fee_rate = market["taker"] or self._api.describe().get("fees", {}).get(
"trading", {}
).get("taker", 0.001)
mm_ratio, _ = self.get_maintenance_ratio_and_amt(pair, stake_amount)
if self.trading_mode == TradingMode.FUTURES and self.margin_mode == MarginMode.ISOLATED:
position_direction = -1 if is_short else 1
return (wallet_balance - (amount * open_rate * position_direction)) / (
amount * (mm_ratio + taker_fee_rate - position_direction)
)
else:
raise OperationalException(
"Freqtrade currently only supports isolated futures for bitget"
)
def check_delisting_time(self, pair: str) -> datetime | None:
"""
Check if the pair gonna be delisted.
By default, it returns None.
:param pair: Market symbol
:return: Datetime if the pair gonna be delisted, None otherwise
"""
if self._config["runmode"] in OPTIMIZE_MODES:
return None
if self.trading_mode == TradingMode.FUTURES:
return self._check_delisting_futures(pair)
return None
def _check_delisting_futures(self, pair: str) -> datetime | None:
delivery_time = self.markets.get(pair, {}).get("info", {}).get("limitOpenTime", None)
if delivery_time:
if isinstance(delivery_time, str) and (delivery_time != ""):
delivery_time = int(delivery_time)
if not isinstance(delivery_time, int) or delivery_time <= 0:
return None
max_delivery = dt_ts() + (
14 * 24 * 60 * 60 * 1000
) # Assume exchange don't announce delisting more than 14 days in advance
if delivery_time < max_delivery:
return dt_from_ts(delivery_time)
return None
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/exchange/bitget.py",
"license": "GNU General Public License v3.0",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
freqtrade/freqtrade:tests/exchange/test_bitget.py | from copy import deepcopy
from datetime import timedelta
from unittest.mock import MagicMock, PropertyMock
import pytest
from freqtrade.enums import CandleType, MarginMode, RunMode, TradingMode
from freqtrade.exceptions import OperationalException, RetryableOrderError
from freqtrade.exchange.common import API_RETRY_COUNT
from freqtrade.util import dt_now, dt_ts, dt_utc
from tests.conftest import EXMS, get_patched_exchange
from tests.exchange.test_exchange import ccxt_exceptionhandlers
@pytest.mark.usefixtures("init_persistence")
def test_fetch_stoploss_order_bitget(default_conf, mocker):
default_conf["dry_run"] = False
mocker.patch("freqtrade.exchange.common.time.sleep")
api_mock = MagicMock()
exchange = get_patched_exchange(mocker, default_conf, api_mock, exchange="bitget")
api_mock.fetch_open_orders = MagicMock(return_value=[])
api_mock.fetch_canceled_and_closed_orders = MagicMock(return_value=[])
with pytest.raises(RetryableOrderError):
exchange.fetch_stoploss_order("1234", "ETH/BTC")
assert api_mock.fetch_open_orders.call_count == API_RETRY_COUNT + 1
assert api_mock.fetch_canceled_and_closed_orders.call_count == API_RETRY_COUNT + 1
api_mock.fetch_open_orders.reset_mock()
api_mock.fetch_canceled_and_closed_orders.reset_mock()
api_mock.fetch_canceled_and_closed_orders = MagicMock(
return_value=[{"id": "1234", "status": "closed", "clientOrderId": "123455"}]
)
api_mock.fetch_open_orders = MagicMock(return_value=[{"id": "50110", "clientOrderId": "1234"}])
resp = exchange.fetch_stoploss_order("1234", "ETH/BTC")
assert api_mock.fetch_open_orders.call_count == 2
assert api_mock.fetch_canceled_and_closed_orders.call_count == 2
assert resp["id"] == "1234"
assert resp["id_stop"] == "50110"
assert resp["type"] == "stoploss"
default_conf["dry_run"] = True
exchange = get_patched_exchange(mocker, default_conf, api_mock, exchange="bitget")
dro_mock = mocker.patch(f"{EXMS}.fetch_dry_run_order", MagicMock(return_value={"id": "123455"}))
api_mock.fetch_open_orders.reset_mock()
api_mock.fetch_canceled_and_closed_orders.reset_mock()
resp = exchange.fetch_stoploss_order("1234", "ETH/BTC")
assert api_mock.fetch_open_orders.call_count == 0
assert api_mock.fetch_canceled_and_closed_orders.call_count == 0
assert dro_mock.call_count == 1
def test_fetch_stoploss_order_bitget_exceptions(default_conf_usdt, mocker):
default_conf_usdt["dry_run"] = False
api_mock = MagicMock()
# Test emulation of the stoploss getters
api_mock.fetch_canceled_and_closed_orders = MagicMock(return_value=[])
ccxt_exceptionhandlers(
mocker,
default_conf_usdt,
api_mock,
"bitget",
"fetch_stoploss_order",
"fetch_open_orders",
retries=API_RETRY_COUNT + 1,
order_id="12345",
pair="ETH/USDT",
)
def test_bitget_ohlcv_candle_limit(mocker, default_conf_usdt):
# This test is also a live test - so we're sure our limits are correct.
api_mock = MagicMock()
api_mock.options = {
"fetchOHLCV": {
"maxRecentDaysPerTimeframe": {
"1m": 30,
"5m": 30,
"15m": 30,
"30m": 30,
"1h": 60,
"4h": 60,
"1d": 60,
}
}
}
exch = get_patched_exchange(mocker, default_conf_usdt, api_mock, exchange="bitget")
timeframes = ("1m", "5m", "1h")
for timeframe in timeframes:
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT) == 1000
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES) == 1000
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK) == 1000
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE) == 200
start_time = dt_ts(dt_now() - timedelta(days=17))
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT, start_time) == 1000
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == 1000
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == 1000
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 200
start_time = dt_ts(dt_now() - timedelta(days=48))
length = 200 if timeframe in ("1m", "5m") else 1000
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT, start_time) == length
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == length
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == length
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 200
start_time = dt_ts(dt_now() - timedelta(days=61))
length = 200
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT, start_time) == length
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == length
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == length
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 200
def test_additional_exchange_init_bitget(default_conf, mocker):
default_conf["dry_run"] = False
default_conf["trading_mode"] = TradingMode.FUTURES
default_conf["margin_mode"] = MarginMode.ISOLATED
api_mock = MagicMock()
api_mock.set_position_mode = MagicMock(return_value={})
get_patched_exchange(mocker, default_conf, exchange="bitget", api_mock=api_mock)
assert api_mock.set_position_mode.call_count == 1
ccxt_exceptionhandlers(
mocker, default_conf, api_mock, "bitget", "additional_exchange_init", "set_position_mode"
)
def test_dry_run_liquidation_price_cross_bitget(default_conf, mocker):
default_conf["dry_run"] = True
default_conf["trading_mode"] = TradingMode.FUTURES
default_conf["margin_mode"] = MarginMode.CROSS
api_mock = MagicMock()
mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", MagicMock(return_value=(0.005, 0.0)))
exchange = get_patched_exchange(mocker, default_conf, exchange="bitget", api_mock=api_mock)
with pytest.raises(
OperationalException, match="Freqtrade currently only supports isolated futures for bitget"
):
exchange.dry_run_liquidation_price(
"ETH/USDT:USDT",
100_000,
False,
0.1,
100,
10,
100,
[],
)
def test__lev_prep_bitget(default_conf, mocker):
api_mock = MagicMock()
api_mock.set_margin_mode = MagicMock()
api_mock.set_leverage = MagicMock()
type(api_mock).has = PropertyMock(return_value={"setMarginMode": True, "setLeverage": True})
exchange = get_patched_exchange(mocker, default_conf, api_mock, exchange="bitget")
exchange._lev_prep("BTC/USDC:USDC", 3.2, "buy")
assert api_mock.set_margin_mode.call_count == 0
assert api_mock.set_leverage.call_count == 0
# test in futures mode
api_mock.set_margin_mode.reset_mock()
api_mock.set_leverage.reset_mock()
default_conf["dry_run"] = False
default_conf["trading_mode"] = "futures"
default_conf["margin_mode"] = "isolated"
exchange = get_patched_exchange(mocker, default_conf, api_mock, exchange="bitget")
exchange._lev_prep("BTC/USDC:USDC", 3.2, "buy")
assert api_mock.set_margin_mode.call_count == 0
assert api_mock.set_leverage.call_count == 1
api_mock.set_leverage.assert_called_with(symbol="BTC/USDC:USDC", leverage=3.2)
api_mock.reset_mock()
exchange._lev_prep("BTC/USDC:USDC", 19.99, "sell")
assert api_mock.set_margin_mode.call_count == 0
assert api_mock.set_leverage.call_count == 1
api_mock.set_leverage.assert_called_with(symbol="BTC/USDC:USDC", leverage=19.99)
def test_check_delisting_time_bitget(default_conf_usdt, mocker):
exchange = get_patched_exchange(mocker, default_conf_usdt, exchange="bitget")
exchange._config["runmode"] = RunMode.BACKTEST
delist_fut_mock = MagicMock(return_value=None)
mocker.patch.object(exchange, "_check_delisting_futures", delist_fut_mock)
# Invalid run mode
resp = exchange.check_delisting_time("BTC/USDT")
assert resp is None
assert delist_fut_mock.call_count == 0
# Delist spot called
exchange._config["runmode"] = RunMode.DRY_RUN
resp1 = exchange.check_delisting_time("BTC/USDT")
assert resp1 is None
assert delist_fut_mock.call_count == 0
# Delist futures called
exchange.trading_mode = TradingMode.FUTURES
resp1 = exchange.check_delisting_time("BTC/USDT:USDT")
assert resp1 is None
assert delist_fut_mock.call_count == 1
def test__check_delisting_futures_bitget(default_conf_usdt, mocker, markets):
markets["BTC/USDT:USDT"] = deepcopy(markets["SOL/BUSD:BUSD"])
markets["BTC/USDT:USDT"]["info"]["limitOpenTime"] = "-1"
markets["SOL/BUSD:BUSD"]["info"]["limitOpenTime"] = "-1"
markets["ADA/USDT:USDT"]["info"]["limitOpenTime"] = "1760745600000" # 2025-10-18
exchange = get_patched_exchange(mocker, default_conf_usdt, exchange="bitget")
mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets))
resp_sol = exchange._check_delisting_futures("SOL/BUSD:BUSD")
# No delisting date
assert resp_sol is None
# Has a delisting date
resp_ada = exchange._check_delisting_futures("ADA/USDT:USDT")
assert resp_ada == dt_utc(2025, 10, 18)
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "tests/exchange/test_bitget.py",
"license": "GNU General Public License v3.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
freqtrade/freqtrade:tests/strategy/test_strategy_parameters.py | # pragma pylint: disable=missing-docstring, C0103
import pytest
from freqtrade.enums import HyperoptState
from freqtrade.exceptions import OperationalException
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer
from freqtrade.strategy.parameters import (
BaseParameter,
BooleanParameter,
CategoricalParameter,
DecimalParameter,
IntParameter,
RealParameter,
)
def test_hyperopt_int_parameter():
from optuna.distributions import IntDistribution
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
with pytest.raises(OperationalException, match=r"Name is determined.*"):
IntParameter(low=0, high=5, default=1, name="hello")
with pytest.raises(OperationalException, match=r"IntParameter space must be.*"):
IntParameter(low=0, default=5, space="buy")
with pytest.raises(OperationalException, match=r"IntParameter space invalid\."):
IntParameter([0, 10], high=7, default=5, space="buy")
intpar = IntParameter(low=0, high=5, default=1, space="buy")
assert intpar.value == 1
assert isinstance(intpar.get_space(""), IntDistribution)
assert isinstance(intpar.range, range)
assert len(list(intpar.range)) == 1
# Range contains ONLY the default / value.
assert list(intpar.range) == [intpar.value]
intpar.in_space = True
assert len(list(intpar.range)) == 6
assert list(intpar.range) == [0, 1, 2, 3, 4, 5]
HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
assert len(list(intpar.range)) == 1
assert intpar.param_type == "IntParameter"
def test_hyperopt_real_parameter():
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
from optuna.distributions import FloatDistribution
with pytest.raises(OperationalException, match=r"RealParameter space must be.*"):
RealParameter(low=0, default=5, space="buy")
with pytest.raises(OperationalException, match=r"RealParameter space invalid\."):
RealParameter([0, 10], high=7, default=5, space="buy")
fltpar = RealParameter(low=0.0, high=5.5, default=1.0, space="buy")
assert fltpar.value == 1.0
assert isinstance(fltpar.get_space(""), FloatDistribution)
assert not hasattr(fltpar, "range")
assert fltpar.param_type == "RealParameter"
def test_hyperopt_decimal_parameter():
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
from freqtrade.optimize.space import SKDecimal
with pytest.raises(OperationalException, match=r"DecimalParameter space must be.*"):
DecimalParameter(low=0, default=5, space="buy")
with pytest.raises(OperationalException, match=r"DecimalParameter space invalid\."):
DecimalParameter([0, 10], high=7, default=5, space="buy")
decimalpar = DecimalParameter(low=0.0, high=0.5, default=0.14, decimals=1, space="buy")
assert decimalpar.value == 0.1
assert isinstance(decimalpar.get_space(""), SKDecimal)
assert isinstance(decimalpar.range, list)
assert len(list(decimalpar.range)) == 1
# Range contains ONLY the default / value.
assert list(decimalpar.range) == [decimalpar.value]
decimalpar.in_space = True
assert len(list(decimalpar.range)) == 6
assert list(decimalpar.range) == [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
decimalpar2 = DecimalParameter(low=0.01, high=0.03, decimals=3, default=0.02, space="buy")
decimalpar2.in_space = True
assert len(list(decimalpar2.range)) == 21
expected_range = [round(0.01 + i * 0.001, 3) for i in range(21)]
assert list(decimalpar2.range) == expected_range
assert decimalpar2.value == 0.02
decimalpar2.value = 0.022222
assert decimalpar2.value == 0.022
HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
assert len(list(decimalpar.range)) == 1
assert decimalpar.param_type == "DecimalParameter"
def test_hyperopt_categorical_parameter():
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
from optuna.distributions import CategoricalDistribution
with pytest.raises(OperationalException, match=r"CategoricalParameter space must.*"):
CategoricalParameter(["aa"], default="aa", space="buy")
with pytest.raises(TypeError):
BaseParameter(opt_range=[0, 1], default=1, space="buy")
catpar = CategoricalParameter(
["buy_rsi", "buy_macd", "buy_none"], default="buy_macd", space="buy"
)
assert catpar.value == "buy_macd"
assert isinstance(catpar.get_space(""), CategoricalDistribution)
assert isinstance(catpar.range, list)
assert len(list(catpar.range)) == 1
# Range contains ONLY the default / value.
assert list(catpar.range) == [catpar.value]
catpar.in_space = True
assert len(list(catpar.range)) == 3
assert list(catpar.range) == ["buy_rsi", "buy_macd", "buy_none"]
boolpar = BooleanParameter(default=True, space="buy")
assert boolpar.value is True
assert isinstance(boolpar.get_space(""), CategoricalDistribution)
assert isinstance(boolpar.range, list)
assert len(list(boolpar.range)) == 1
boolpar.in_space = True
assert len(list(boolpar.range)) == 2
assert list(boolpar.range) == [True, False]
HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
assert len(list(catpar.range)) == 1
assert len(list(boolpar.range)) == 1
assert boolpar.param_type == "BooleanParameter"
assert catpar.param_type == "CategoricalParameter"
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "tests/strategy/test_strategy_parameters.py",
"license": "GNU General Public License v3.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
freqtrade/freqtrade:freqtrade/exchange/luno.py | import logging
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
logger = logging.getLogger(__name__)
class Luno(Exchange):
"""
Luno exchange class. Contains adjustments needed for Freqtrade to work
with this exchange.
Please note that this exchange is not included in the list of exchanges
officially supported by the Freqtrade development team. So some features
may still not work as expected.
"""
_ft_has: FtHas = {
"ohlcv_has_history": False, # Only provides the last 1000 candles
"always_require_api_keys": True, # Requires API keys to fetch candles
"trades_has_history": False, # Only the last 24h are available
}
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/exchange/luno.py",
"license": "GNU General Public License v3.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
freqtrade/freqtrade:freqtrade/exchange/modetrade.py | import logging
# from freqtrade.enums import MarginMode, TradingMode
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
logger = logging.getLogger(__name__)
class Modetrade(Exchange):
"""
MOdetrade exchange class. Contains adjustments needed for Freqtrade to work
with this exchange.
Please note that this exchange is not included in the list of exchanges
officially supported by the Freqtrade development team. So some features
may still not work as expected.
"""
_ft_has: FtHas = {
"always_require_api_keys": True, # Requires API keys to fetch candles
}
# _supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [
# (TradingMode.FUTURES, MarginMode.ISOLATED),
# ]
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "freqtrade/exchange/modetrade.py",
"license": "GNU General Public License v3.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
freqtrade/freqtrade:tests/data/test_historic_precision.py | # pragma pylint: disable=missing-docstring, C0103
from datetime import UTC
import pandas as pd
from numpy import nan
from pandas import DataFrame, Timestamp
from freqtrade.data.btanalysis.historic_precision import get_tick_size_over_time
def test_get_tick_size_over_time():
"""
Test the get_tick_size_over_time function with predefined data
"""
# Create test dataframe with different levels of precision
data = {
"date": [
Timestamp("2020-01-01 00:00:00", tz=UTC),
Timestamp("2020-01-02 00:00:00", tz=UTC),
Timestamp("2020-01-03 00:00:00", tz=UTC),
Timestamp("2020-01-15 00:00:00", tz=UTC),
Timestamp("2020-01-16 00:00:00", tz=UTC),
Timestamp("2020-01-31 00:00:00", tz=UTC),
Timestamp("2020-02-01 00:00:00", tz=UTC),
Timestamp("2020-02-15 00:00:00", tz=UTC),
Timestamp("2020-03-15 00:00:00", tz=UTC),
],
"open": [1.23456, 1.234, 1.23, 1.2, 1.23456, 1.234, 2.3456, 2.34, 2.34],
"high": [1.23457, 1.235, 1.24, 1.3, 1.23456, 1.235, 2.3457, 2.34, 2.34],
"low": [1.23455, 1.233, 1.22, 1.1, 1.23456, 1.233, 2.3455, 2.34, 2.34],
"close": [1.23456, 1.234, 1.23, 1.2, 1.23456, 1.234, 2.3456, 2.34, 2.34],
"volume": [100, 200, 300, 400, 500, 600, 700, 800, 900],
}
candles = DataFrame(data)
# Calculate significant digits
result = get_tick_size_over_time(candles)
# Check that the result is a pandas Series
assert isinstance(result, pd.Series)
# Check that we have three months of data (Jan, Feb and March 2020 )
assert len(result) == 3
# Before
assert result.asof("2019-01-01 00:00:00+00:00") is nan
# January should have 5 significant digits (based on 1.23456789 being the most precise value)
# which should be converted to 0.00001
assert result.asof("2020-01-01 00:00:00+00:00") == 0.00001
assert result.asof("2020-01-01 00:00:00+00:00") == 0.00001
assert result.asof("2020-02-25 00:00:00+00:00") == 0.0001
assert result.asof("2020-03-25 00:00:00+00:00") == 0.01
assert result.asof("2020-04-01 00:00:00+00:00") == 0.01
# Value far past the last date should be the last value
assert result.asof("2025-04-01 00:00:00+00:00") == 0.01
assert result.iloc[0] == 0.00001
def test_get_tick_size_over_time_real_data(testdatadir):
"""
Test the get_tick_size_over_time function with real data from the testdatadir
"""
from freqtrade.data.history import load_pair_history
# Load some test data from the testdata directory
pair = "UNITTEST/BTC"
timeframe = "1m"
candles = load_pair_history(
datadir=testdatadir,
pair=pair,
timeframe=timeframe,
)
# Make sure we have test data
assert not candles.empty, "No test data found, cannot run test"
# Calculate significant digits
result = get_tick_size_over_time(candles)
assert isinstance(result, pd.Series)
# Verify that all values are between 0 and 1 (valid precision values)
assert all(result > 0)
assert all(result < 1)
assert all(result <= 0.0001)
assert all(result >= 0.00000001)
def test_get_tick_size_over_time_small_numbers():
"""
Test the get_tick_size_over_time function with predefined data
"""
# Create test dataframe with different levels of precision
data = {
"date": [
Timestamp("2020-01-01 00:00:00", tz=UTC),
Timestamp("2020-01-02 00:00:00", tz=UTC),
Timestamp("2020-01-03 00:00:00", tz=UTC),
Timestamp("2020-01-15 00:00:00", tz=UTC),
Timestamp("2020-01-16 00:00:00", tz=UTC),
Timestamp("2020-01-31 00:00:00", tz=UTC),
Timestamp("2020-02-01 00:00:00", tz=UTC),
Timestamp("2020-02-15 00:00:00", tz=UTC),
Timestamp("2020-03-15 00:00:00", tz=UTC),
],
"open": [
0.000000123456,
0.0000001234,
0.000000123,
0.00000012,
0.000000123456,
0.0000001234,
0.00000023456,
0.000000234,
0.000000234,
],
"high": [
0.000000123457,
0.0000001235,
0.000000124,
0.00000013,
0.000000123456,
0.0000001235,
0.00000023457,
0.000000234,
0.000000234,
],
"low": [
0.000000123455,
0.0000001233,
0.000000122,
0.00000011,
0.000000123456,
0.0000001233,
0.00000023455,
0.000000234,
0.000000234,
],
"close": [
0.000000123456,
0.0000001234,
0.000000123,
0.00000012,
0.000000123456,
0.0000001234,
0.00000023456,
0.000000234,
0.000000234,
],
"volume": [100, 200, 300, 400, 500, 600, 700, 800, 900],
}
candles = DataFrame(data)
# Calculate significant digits
result = get_tick_size_over_time(candles)
# Check that the result is a pandas Series
assert isinstance(result, pd.Series)
# Check that we have three months of data (Jan, Feb and March 2020 )
assert len(result) == 3
# Before
assert result.asof("2019-01-01 00:00:00+00:00") is nan
# January should have 5 significant digits (based on 1.23456789 being the most precise value)
# which should be converted to 0.00001
assert result.asof("2020-01-01 00:00:00+00:00") == 0.000000000001
assert result.asof("2020-02-25 00:00:00+00:00") == 0.00000000001
assert result.asof("2020-03-25 00:00:00+00:00") == 0.000000001
assert result.asof("2020-04-01 00:00:00+00:00") == 0.000000001
# Value far past the last date should be the last value
assert result.asof("2025-04-01 00:00:00+00:00") == 0.000000001
assert result.iloc[0] == 0.000000000001
def test_get_tick_size_over_time_big_numbers():
"""
Test the get_tick_size_over_time function with predefined data
"""
# Create test dataframe with different levels of precision
data = {
"date": [
Timestamp("2020-01-01 00:00:00", tz=UTC),
Timestamp("2020-01-02 00:00:00", tz=UTC),
Timestamp("2020-01-03 00:00:00", tz=UTC),
Timestamp("2020-01-15 00:00:00", tz=UTC),
Timestamp("2020-01-16 00:00:00", tz=UTC),
Timestamp("2020-01-31 00:00:00", tz=UTC),
Timestamp("2020-02-01 00:00:00", tz=UTC),
Timestamp("2020-02-15 00:00:00", tz=UTC),
Timestamp("2020-03-15 00:00:00", tz=UTC),
],
"open": [
12345.123456,
12345.1234,
12345.123,
12345.12,
12345.123456,
12345.1234,
12345.23456,
12345,
12345.234,
],
"high": [
12345.123457,
12345.1235,
12345.124,
12345.13,
12345.123456,
12345.1235,
12345.23457,
12345,
12345.234,
],
"low": [
12345.123455,
12345.1233,
12345.122,
12345.11,
12345.123456,
12345.1233,
12345.23455,
12345,
12345.234,
],
"close": [
12345.123456,
12345.1234,
12345.123,
12345.12,
12345.123456,
12345.1234,
12345.23456,
12345,
12345.234,
],
"volume": [100, 200, 300, 400, 500, 600, 700, 800, 900],
}
candles = DataFrame(data)
# Calculate significant digits
result = get_tick_size_over_time(candles)
# Check that the result is a pandas Series
assert isinstance(result, pd.Series)
# Check that we have three months of data (Jan, Feb and March 2020 )
assert len(result) == 3
# Before
assert result.asof("2019-01-01 00:00:00+00:00") is nan
# January should have 5 significant digits (based on 1.23456789 being the most precise value)
# which should be converted to 0.00001
assert result.asof("2020-01-01 00:00:00+00:00") == 0.000001
assert result.asof("2020-02-25 00:00:00+00:00") == 0.00001
assert result.asof("2020-03-25 00:00:00+00:00") == 0.001
assert result.asof("2020-04-01 00:00:00+00:00") == 0.001
# Value far past the last date should be the last value
assert result.asof("2025-04-01 00:00:00+00:00") == 0.001
assert result.iloc[0] == 0.000001
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "tests/data/test_historic_precision.py",
"license": "GNU General Public License v3.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
freqtrade/freqtrade:tests/util/test_wallet_util.py | import pytest
from freqtrade.util import get_dry_run_wallet
@pytest.mark.parametrize(
"wallet,stake_currency,expected",
[
(1000, "USDT", 1000),
({"USDT": 1000, "USDC": 500}, "USDT", 1000),
({"USDT": 1000, "USDC": 500}, "USDC", 500),
({"USDT": 1000, "USDC": 500}, "NOCURR", 0.0),
],
)
def test_get_dry_run_wallet(default_conf_usdt, wallet, stake_currency, expected):
# As int
default_conf_usdt["dry_run_wallet"] = wallet
default_conf_usdt["stake_currency"] = stake_currency
assert get_dry_run_wallet(default_conf_usdt) == expected
| {
"repo_id": "freqtrade/freqtrade",
"file_path": "tests/util/test_wallet_util.py",
"license": "GNU General Public License v3.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
geekcomputers/Python:Collatz Sequence/Collaze-Visualize.py | import time
import matplotlib.pyplot as plt
def collatz_sequence(n):
"""Generate the Collatz sequence for n."""
steps = [n]
while n != 1:
n = n // 2 if n % 2 == 0 else 3 * n + 1
steps.append(n)
return steps
def visualize(sequence, title="Collatz Sequence"):
plt.clf()
plt.plot(sequence, marker='o')
plt.title(title)
plt.xlabel("Step")
plt.ylabel("Value")
plt.yscale("log") # makes visualization MUCH nicer
plt.grid(True)
plt.pause(0.01)
def auto_mode(interval):
print("\nAuto mode started.")
print("Press SPACE in the plot window to stop.\n")
plt.ion()
stop = False
def on_key(event):
nonlocal stop
if event.key == ' ':
stop = True
fig = plt.figure()
fig.canvas.mpl_connect("key_press_event", on_key)
n = 1
while not stop:
seq = collatz_sequence(n)
visualize(seq, f"Collatz Sequence for n = {n}")
n += 1
time.sleep(interval)
plt.ioff()
plt.show()
print("Auto mode stopped.")
# --- Main Program ---
try:
num = int(input("Enter a positive integer (or -1 for auto mode): "))
if num == -1:
interval = float(input("Enter step interval time (seconds): "))
auto_mode(interval)
elif num <= 0:
print("Please enter a positive number greater than 0.")
else:
seq = collatz_sequence(num)
print("\nCollatz sequence:")
for i, value in enumerate(seq, start=1):
print(f"Step {i}: {value}")
plt.ion()
visualize(seq, f"Collatz Sequence for n = {num}")
plt.ioff()
plt.show()
except ValueError:
print("Invalid input! Please enter a valid number.")
| {
"repo_id": "geekcomputers/Python",
"file_path": "Collatz Sequence/Collaze-Visualize.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:password_checker_code.py | import string
def check_password_strength(password):
strength = 0
# Criteria 1: Length (Must be at least 8 characters)
if len(password) >= 8:
strength += 1
# Criteria 2: Must contain Digits (0-9)
has_digit = False
for char in password:
if char.isdigit():
has_digit = True
break
if has_digit:
strength += 1
# Criteria 3: Must contain Uppercase Letters (A-Z)
has_upper = False
for char in password:
if char.isupper():
has_upper = True
break
if has_upper:
strength += 1
return strength
if __name__ == "__main__":
print("--- Password Strength Checker ---")
# Note: We cannot run input() on the website, but this code is correct.
# If users download it, it will work.
print("Run this script locally to test your password!")
| {
"repo_id": "geekcomputers/Python",
"file_path": "password_checker_code.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:image_compressor.py | import os
import sys
from PIL import Image
def compress_image(image_path, quality=60):
"""
Compresses an image by reducing its quality.
Args:
image_path (str): Path to the image file.
quality (int): Quality of the output image (1-100). Default is 60.
"""
try:
# Open the image
with Image.open(image_path) as img:
# Check if file is an image
if img.format not in ["JPEG", "PNG", "JPG"]:
print(f"Skipping {image_path}: Not a standard image format.")
return
# Create output filename
filename, ext = os.path.splitext(image_path)
output_path = f"{filename}_compressed{ext}"
# Save with reduced quality
# Optimize=True ensures the encoder does extra work to minimize size
img.save(output_path, quality=quality, optimize=True)
# Calculate savings
original_size = os.path.getsize(image_path)
new_size = os.path.getsize(output_path)
savings = ((original_size - new_size) / original_size) * 100
print(f"[+] Compressed: {output_path}")
print(f" Original: {original_size/1024:.2f} KB")
print(f" New: {new_size/1024:.2f} KB")
print(f" Saved: {savings:.2f}%")
except Exception as e:
print(f"[-] Error compressing {image_path}: {e}")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python image_compressor.py <image_file>")
print("Example: python image_compressor.py photo.jpg")
else:
target_file = sys.argv[1]
if os.path.exists(target_file):
compress_image(target_file)
else:
print(f"Error: File '{target_file}' not found.") | {
"repo_id": "geekcomputers/Python",
"file_path": "image_compressor.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:photo_timestamp_renamer.py | #!/usr/bin/env python3
"""
Author: Ivan Costa Neto
Date: 13-01-26
Auto-rename photos by timestamp, so you can organize those vacation trip photos!!
Name format: YYYY-MM-DD_HH-MM-SS[_NN].ext
Uses EXIF DateTimeOriginal when available (best for JPEG),
otherwise falls back to file modified time,
i.e.
python rename_photos.py ~/Pictures/Trip --dry-run
python rename_photos.py ~/Pictures/Trip --recursive
python rename_photos.py . --prefix Japan --recursive
"""
from __future__ import annotations
import argparse
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
import re
import sys
SUPPORTED_EXTS = {".jpg", ".jpeg", ".png", ".heic", ".webp", ".tif", ".tiff"}
# EXIF support is optional (w\ Pillow)
try:
from PIL import Image, ExifTags # type: ignore
PIL_OK = True
except Exception:
PIL_OK = False
def is_photo(p: Path) -> bool:
return p.is_file() and p.suffix.lower() in SUPPORTED_EXTS
def sanitize_prefix(s: str) -> str:
s = s.strip()
if not s:
return ""
s = re.sub(r"[^\w\-]+", "_", s)
return s[:50]
def exif_datetime_original(path: Path) -> datetime | None:
"""
Try to read EXIF DateTimeOriginal/DateTime from image.
Returns None if unavailable.
"""
if not PIL_OK:
return None
try:
img = Image.open(path)
exif = img.getexif()
if not exif:
return None
# map EXIF tag ids -> names
tag_map = {}
for k, v in ExifTags.TAGS.items():
tag_map[k] = v
# common EXIF datetime tags
dto = None
dt = None
for tag_id, value in exif.items():
name = tag_map.get(tag_id)
if name == "DateTimeOriginal":
dto = value
elif name == "DateTime":
dt = value
raw = dto or dt
if not raw:
return None
# EXIF datetime format: "YYYY:MM:DD HH:MM:SS"
raw = str(raw).strip()
return datetime.strptime(raw, "%Y:%m:%d %H:%M:%S")
except Exception:
return None
def file_mtime(path: Path) -> datetime:
return datetime.fromtimestamp(path.stat().st_mtime)
def unique_name(dest_dir: Path, base: str, ext: str) -> Path:
"""
If base.ext exists, append _01, _02, ...
"""
cand = dest_dir / f"{base}{ext}"
if not cand.exists():
return cand
i = 1
while True:
cand = dest_dir / f"{base}_{i:02d}{ext}"
if not cand.exists():
return cand
i += 1
@dataclass
class Options:
folder: Path
recursive: bool
dry_run: bool
prefix: str
keep_original: bool # if true, don't rename if it already matches our format
def already_formatted(name: str) -> bool:
# matches: YYYY-MM-DD_HH-MM-SS or with prefix and/or _NN
pattern = r"^(?:[A-Za-z0-9_]+_)?\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(?:_\d{2})?$"
return re.match(pattern, Path(name).stem) is not None
def gather_photos(folder: Path, recursive: bool) -> list[Path]:
if recursive:
return [p for p in folder.rglob("*") if is_photo(p)]
return [p for p in folder.iterdir() if is_photo(p)]
def rename_photos(opts: Options) -> int:
photos = gather_photos(opts.folder, opts.recursive)
photos.sort()
if not photos:
print("No supported photo files found.")
return 0
if opts.prefix:
pref = sanitize_prefix(opts.prefix)
else:
pref = ""
renamed = 0
for p in photos:
if opts.keep_original and already_formatted(p.name):
continue
dt = exif_datetime_original(p) or file_mtime(p)
base = dt.strftime("%Y-%m-%d_%H-%M-%S")
if pref:
base = f"{pref}_{base}"
dest = unique_name(p.parent, base, p.suffix.lower())
if dest.name == p.name:
continue
if opts.dry_run:
print(f"[DRY] {p.relative_to(opts.folder)} -> {dest.name}")
else:
p.rename(dest)
print(f"[OK ] {p.relative_to(opts.folder)} -> {dest.name}")
renamed += 1
if not opts.dry_run:
print(f"\nDone. Renamed {renamed} file(s).")
return renamed
def main(argv: list[str]) -> int:
ap = argparse.ArgumentParser(description="Auto-rename photos using EXIF date (or file modified time).")
ap.add_argument("folder", help="Folder containing photos")
ap.add_argument("--recursive", action="store_true", help="Process subfolders too")
ap.add_argument("--dry-run", action="store_true", help="Preview changes without renaming")
ap.add_argument("--prefix", default="", help="Optional prefix (e.g., Japan, RWTH, Trip)")
ap.add_argument("--keep-original", action="store_true",
help="Skip files that already match YYYY-MM-DD_HH-MM-SS naming")
args = ap.parse_args(argv)
folder = Path(args.folder).expanduser()
if not folder.exists() or not folder.is_dir():
print(f"Not a directory: {folder}", file=sys.stderr)
return 2
if not PIL_OK:
print("[Note] Pillow not installed; EXIF dates won't be read (mtime fallback only).")
print(" Install for best results: pip install pillow")
opts = Options(
folder=folder,
recursive=args.recursive,
dry_run=args.dry_run,
prefix=args.prefix,
keep_original=args.keep_original,
)
rename_photos(opts)
return 0
if __name__ == "__main__":
raise SystemExit(main(sys.argv[1:]))
| {
"repo_id": "geekcomputers/Python",
"file_path": "photo_timestamp_renamer.py",
"license": "MIT License",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:ML/examples/neural_architecture_search.py | import sys
sys.path.insert(0, '.')
import torch
from src.python.neuralforge.nas.search_space import SearchSpace
from src.python.neuralforge.nas.evolution import EvolutionarySearch
from src.python.neuralforge.nas.evaluator import ProxyEvaluator
from src.python.neuralforge.data.dataset import SyntheticDataset, DataLoaderBuilder
from src.python.neuralforge.config import Config
def main():
config = Config()
config.nas_enabled = True
config.nas_population_size = 15
config.nas_generations = 20
config.nas_mutation_rate = 0.15
search_config = {
'num_layers': 15,
'num_blocks': 4
}
search_space = SearchSpace(search_config)
train_dataset = SyntheticDataset(num_samples=1000, num_classes=10)
val_dataset = SyntheticDataset(num_samples=200, num_classes=10)
loader_builder = DataLoaderBuilder(config)
train_loader = loader_builder.build_train_loader(train_dataset)
val_loader = loader_builder.build_val_loader(val_dataset)
evaluator = ProxyEvaluator(device=config.device)
evolution = EvolutionarySearch(
search_space=search_space,
evaluator=evaluator,
population_size=config.nas_population_size,
generations=config.nas_generations,
mutation_rate=config.nas_mutation_rate
)
print("Starting Neural Architecture Search...")
best_architecture = evolution.search()
print(f"\nBest Architecture Found:")
print(f"Fitness: {best_architecture.fitness:.4f}")
print(f"Accuracy: {best_architecture.accuracy:.2f}%")
print(f"Parameters: {best_architecture.params:,}")
print(f"FLOPs: {best_architecture.flops:,}")
print("\nTop 5 Architectures:")
top_k = evolution.get_top_k_architectures(k=5)
for i, arch in enumerate(top_k, 1):
print(f"{i}. Fitness: {arch.fitness:.4f}, Acc: {arch.accuracy:.2f}%, Params: {arch.params:,}")
model = search_space.build_model(best_architecture, num_classes=10)
print(f"\nModel created with {sum(p.numel() for p in model.parameters()):,} parameters")
if __name__ == '__main__':
main()
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/examples/neural_architecture_search.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/examples/train_cifar10.py | import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import torch
import torch.nn as nn
from src.python.neuralforge import Trainer, Config
from src.python.neuralforge.data.datasets import get_dataset
from src.python.neuralforge.data.dataset import DataLoaderBuilder
from src.python.neuralforge.models.resnet import ResNet18
from src.python.neuralforge.optim.optimizers import AdamW
from src.python.neuralforge.optim.schedulers import CosineAnnealingWarmRestarts
def main():
print("Training ResNet18 on CIFAR-10")
config = Config()
config.batch_size = 128
config.epochs = 100
config.learning_rate = 0.001
config.num_classes = 10
config.image_size = 32
config.model_name = "resnet18_cifar10"
config.device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"Downloading CIFAR-10 dataset...")
train_dataset = get_dataset('cifar10', root='./data', train=True, download=True)
val_dataset = get_dataset('cifar10', root='./data', train=False, download=True)
print(f"Train: {len(train_dataset)} samples")
print(f"Val: {len(val_dataset)} samples")
loader_builder = DataLoaderBuilder(config)
train_loader = loader_builder.build_train_loader(train_dataset)
val_loader = loader_builder.build_val_loader(val_dataset)
model = ResNet18(num_classes=10, in_channels=3)
print(f"Model: {sum(p.numel() for p in model.parameters()):,} parameters")
criterion = nn.CrossEntropyLoss()
optimizer = AdamW(model.parameters(), lr=config.learning_rate, weight_decay=0.01)
scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2)
trainer = Trainer(
model=model,
train_loader=train_loader,
val_loader=val_loader,
optimizer=optimizer,
criterion=criterion,
config=config,
scheduler=scheduler
)
print("Starting training...")
trainer.train()
print(f"\nTraining completed!")
print(f"Best validation loss: {trainer.best_val_loss:.4f}")
print(f"Model saved to: ./models/best_model.pt")
print(f"\nTest the model:")
print(f" python tests/test_model.py --dataset cifar10 --mode interactive")
if __name__ == '__main__':
main()
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/examples/train_cifar10.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/examples/train_custom.py | import sys
sys.path.insert(0, '.')
import torch
import torch.nn as nn
from src.python.neuralforge import Trainer, Config
from src.python.neuralforge.data.dataset import SyntheticDataset, DataLoaderBuilder
from src.python.neuralforge.models.resnet import ResNet18
from src.python.neuralforge.optim.optimizers import AdamW
from src.python.neuralforge.optim.schedulers import CosineAnnealingWarmRestarts
def main():
config = Config()
config.batch_size = 64
config.epochs = 100
config.learning_rate = 0.001
config.num_classes = 100
config.model_name = "resnet18_custom"
train_dataset = SyntheticDataset(num_samples=10000, num_classes=100)
val_dataset = SyntheticDataset(num_samples=2000, num_classes=100)
loader_builder = DataLoaderBuilder(config)
train_loader = loader_builder.build_train_loader(train_dataset)
val_loader = loader_builder.build_val_loader(val_dataset)
model = ResNet18(num_classes=100)
criterion = nn.CrossEntropyLoss()
optimizer = AdamW(model.parameters(), lr=config.learning_rate, weight_decay=0.01)
scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2)
trainer = Trainer(
model=model,
train_loader=train_loader,
val_loader=val_loader,
optimizer=optimizer,
criterion=criterion,
config=config,
scheduler=scheduler
)
trainer.train()
print(f"Best validation loss: {trainer.best_val_loss:.4f}")
if __name__ == '__main__':
main()
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/examples/train_custom.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/cli/gui.py | import sys
import os
def main():
try:
from PyQt6.QtWidgets import QApplication
except ImportError:
print("Error: PyQt6 not installed")
print("Install with: pip install neuralforge[gui]")
print("Or: pip install PyQt6")
sys.exit(1)
current_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(current_dir))))
sys.path.insert(0, root_dir)
from PyQt6.QtWidgets import (QMainWindow, QWidget, QVBoxLayout, QHBoxLayout,
QPushButton, QLabel, QLineEdit, QFileDialog,
QProgressBar, QTextEdit, QGroupBox)
from PyQt6.QtCore import Qt, QThread, pyqtSignal
from PyQt6.QtGui import QPixmap, QFont
import torch
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
from neuralforge.data.datasets import get_dataset, get_num_classes
from neuralforge.models.resnet import ResNet18
class PredictionThread(QThread):
finished = pyqtSignal(list, list, str)
error = pyqtSignal(str)
def __init__(self, model, image_path, classes, device):
super().__init__()
self.model = model
self.image_path = image_path
self.classes = classes
self.device = device
def run(self):
try:
image = Image.open(self.image_path).convert('RGB')
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
image_tensor = transform(image).unsqueeze(0).to(self.device)
with torch.no_grad():
outputs = self.model(image_tensor)
probabilities = F.softmax(outputs, dim=1)
top5_prob, top5_idx = torch.topk(probabilities, min(5, len(self.classes)), dim=1)
predictions = []
confidences = []
for idx, prob in zip(top5_idx[0].cpu().numpy(), top5_prob[0].cpu().numpy()):
predictions.append(self.classes[idx])
confidences.append(float(prob) * 100)
main_prediction = predictions[0]
self.finished.emit(predictions, confidences, main_prediction)
except Exception as e:
self.error.emit(str(e))
class NeuralForgeGUI(QMainWindow):
def __init__(self):
super().__init__()
self.model = None
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.classes = []
self.dataset_name = 'cifar10'
self.init_ui()
self.apply_stylesheet()
def init_ui(self):
self.setWindowTitle('NeuralForge - Model Tester')
self.setGeometry(100, 100, 1200, 800)
central_widget = QWidget()
self.setCentralWidget(central_widget)
main_layout = QHBoxLayout()
central_widget.setLayout(main_layout)
left_panel = self.create_left_panel()
right_panel = self.create_right_panel()
main_layout.addWidget(left_panel, 1)
main_layout.addWidget(right_panel, 1)
def create_left_panel(self):
panel = QWidget()
layout = QVBoxLayout()
panel.setLayout(layout)
title = QLabel('🚀 NeuralForge Model Tester')
title.setFont(QFont('Arial', 20, QFont.Weight.Bold))
title.setAlignment(Qt.AlignmentFlag.AlignCenter)
layout.addWidget(title)
model_group = QGroupBox('Model Selection')
model_layout = QVBoxLayout()
model_path_layout = QHBoxLayout()
self.model_path_input = QLineEdit()
self.model_path_input.setPlaceholderText('Path to model file (.pt)')
model_path_layout.addWidget(self.model_path_input)
browse_btn = QPushButton('Browse')
browse_btn.clicked.connect(self.browse_model)
model_path_layout.addWidget(browse_btn)
default_btn = QPushButton('Use Default')
default_btn.clicked.connect(self.use_default_model)
model_path_layout.addWidget(default_btn)
model_layout.addLayout(model_path_layout)
dataset_layout = QHBoxLayout()
dataset_label = QLabel('Dataset:')
self.dataset_input = QLineEdit('cifar10')
self.dataset_input.setPlaceholderText('cifar10, mnist, stl10, tiny_imagenet, etc.')
self.dataset_input.setToolTip('Supported: cifar10, cifar100, mnist, fashion_mnist, stl10,\ntiny_imagenet, imagenet, food101, caltech256, oxford_pets')
dataset_layout.addWidget(dataset_label)
dataset_layout.addWidget(self.dataset_input)
model_layout.addLayout(dataset_layout)
self.load_model_btn = QPushButton('Load Model')
self.load_model_btn.clicked.connect(self.load_model)
model_layout.addWidget(self.load_model_btn)
self.model_status = QLabel('No model loaded')
self.model_status.setAlignment(Qt.AlignmentFlag.AlignCenter)
model_layout.addWidget(self.model_status)
model_group.setLayout(model_layout)
layout.addWidget(model_group)
image_group = QGroupBox('Image Selection')
image_layout = QVBoxLayout()
image_path_layout = QHBoxLayout()
self.image_path_input = QLineEdit()
self.image_path_input.setPlaceholderText('Path to image file')
image_path_layout.addWidget(self.image_path_input)
browse_image_btn = QPushButton('Browse')
browse_image_btn.clicked.connect(self.browse_image)
image_path_layout.addWidget(browse_image_btn)
image_layout.addLayout(image_path_layout)
self.image_preview = QLabel()
self.image_preview.setAlignment(Qt.AlignmentFlag.AlignCenter)
self.image_preview.setMinimumHeight(300)
self.image_preview.setStyleSheet('border: 2px dashed #666; border-radius: 10px;')
self.image_preview.setText('No image selected')
image_layout.addWidget(self.image_preview)
self.predict_btn = QPushButton('🔍 Predict')
self.predict_btn.clicked.connect(self.predict_image)
self.predict_btn.setEnabled(False)
image_layout.addWidget(self.predict_btn)
image_group.setLayout(image_layout)
layout.addWidget(image_group)
layout.addStretch()
return panel
def create_right_panel(self):
panel = QWidget()
layout = QVBoxLayout()
panel.setLayout(layout)
results_group = QGroupBox('Prediction Results')
results_layout = QVBoxLayout()
self.main_prediction = QLabel('No prediction yet')
self.main_prediction.setFont(QFont('Arial', 24, QFont.Weight.Bold))
self.main_prediction.setAlignment(Qt.AlignmentFlag.AlignCenter)
self.main_prediction.setStyleSheet('color: #4CAF50; padding: 20px;')
results_layout.addWidget(self.main_prediction)
self.confidence_label = QLabel('')
self.confidence_label.setFont(QFont('Arial', 16))
self.confidence_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
results_layout.addWidget(self.confidence_label)
self.progress_bar = QProgressBar()
self.progress_bar.setVisible(False)
results_layout.addWidget(self.progress_bar)
results_group.setLayout(results_layout)
layout.addWidget(results_group)
top5_group = QGroupBox('Top-5 Predictions')
top5_layout = QVBoxLayout()
self.top5_display = QTextEdit()
self.top5_display.setReadOnly(True)
self.top5_display.setMinimumHeight(200)
top5_layout.addWidget(self.top5_display)
top5_group.setLayout(top5_layout)
layout.addWidget(top5_group)
info_group = QGroupBox('Model Information')
info_layout = QVBoxLayout()
self.model_info = QTextEdit()
self.model_info.setReadOnly(True)
self.model_info.setMaximumHeight(150)
info_layout.addWidget(self.model_info)
info_group.setLayout(info_layout)
layout.addWidget(info_group)
layout.addStretch()
return panel
def apply_stylesheet(self):
qss = """
QMainWindow {
background-color: #1e1e1e;
}
QWidget {
background-color: #1e1e1e;
color: #e0e0e0;
font-family: 'Segoe UI', Arial;
font-size: 12px;
}
QGroupBox {
border: 2px solid #3d3d3d;
border-radius: 8px;
margin-top: 10px;
padding-top: 15px;
font-weight: bold;
color: #4CAF50;
}
QGroupBox::title {
subcontrol-origin: margin;
left: 10px;
padding: 0 5px;
}
QPushButton {
background-color: #4CAF50;
color: white;
border: none;
padding: 10px 20px;
border-radius: 5px;
font-weight: bold;
font-size: 13px;
}
QPushButton:hover {
background-color: #45a049;
}
QPushButton:pressed {
background-color: #3d8b40;
}
QPushButton:disabled {
background-color: #555555;
color: #888888;
}
QLineEdit {
background-color: #2d2d2d;
border: 2px solid #3d3d3d;
border-radius: 5px;
padding: 8px;
color: #e0e0e0;
}
QLineEdit:focus {
border: 2px solid #4CAF50;
}
QTextEdit {
background-color: #2d2d2d;
border: 2px solid #3d3d3d;
border-radius: 5px;
padding: 10px;
color: #e0e0e0;
}
QLabel {
color: #e0e0e0;
}
QProgressBar {
border: 2px solid #3d3d3d;
border-radius: 5px;
text-align: center;
background-color: #2d2d2d;
}
QProgressBar::chunk {
background-color: #4CAF50;
border-radius: 3px;
}
"""
self.setStyleSheet(qss)
def browse_model(self):
file_path, _ = QFileDialog.getOpenFileName(
self,
'Select Model File',
'./models',
'Model Files (*.pt *.pth);;All Files (*.*)'
)
if file_path:
self.model_path_input.setText(file_path)
def use_default_model(self):
default_path = './models/final_model.pt'
if not os.path.exists(default_path):
default_path = './models/best_model.pt'
self.model_path_input.setText(os.path.abspath(default_path))
def browse_image(self):
file_path, _ = QFileDialog.getOpenFileName(
self,
'Select Image File',
'',
'Image Files (*.png *.jpg *.jpeg *.bmp *.gif);;All Files (*.*)'
)
if file_path:
self.image_path_input.setText(file_path)
self.display_image(file_path)
def display_image(self, image_path):
try:
pixmap = QPixmap(image_path)
scaled_pixmap = pixmap.scaled(400, 300, Qt.AspectRatioMode.KeepAspectRatio,
Qt.TransformationMode.SmoothTransformation)
self.image_preview.setPixmap(scaled_pixmap)
except Exception as e:
self.image_preview.setText(f'Error loading image: {e}')
def load_model(self):
model_path = self.model_path_input.text()
dataset_input = self.dataset_input.text().lower().strip()
dataset_aliases = {
'cifar10': 'cifar10', 'cifar-10': 'cifar10', 'cifar_10': 'cifar10',
'cifar100': 'cifar100', 'cifar-100': 'cifar100', 'cifar_100': 'cifar100',
'mnist': 'mnist',
'fashionmnist': 'fashion_mnist', 'fashion-mnist': 'fashion_mnist', 'fashion_mnist': 'fashion_mnist',
'stl10': 'stl10', 'stl-10': 'stl10', 'stl_10': 'stl10',
'tinyimagenet': 'tiny_imagenet', 'tiny-imagenet': 'tiny_imagenet', 'tiny_imagenet': 'tiny_imagenet',
'imagenet': 'imagenet',
'food101': 'food101', 'food-101': 'food101', 'food_101': 'food101',
'caltech256': 'caltech256', 'caltech-256': 'caltech256', 'caltech_256': 'caltech256',
'oxfordpets': 'oxford_pets', 'oxford-pets': 'oxford_pets', 'oxford_pets': 'oxford_pets',
}
self.dataset_name = dataset_aliases.get(dataset_input, dataset_input)
if not model_path:
self.model_status.setText('Please select a model file')
self.model_status.setStyleSheet('color: #f44336;')
return
if not os.path.exists(model_path):
self.model_status.setText('Model file not found')
self.model_status.setStyleSheet('color: #f44336;')
return
try:
self.model_status.setText('Loading model...')
self.model_status.setStyleSheet('color: #FFC107;')
QApplication.processEvents()
num_classes = get_num_classes(self.dataset_name)
self.model = ResNet18(num_classes=num_classes)
self.model = self.model.to(self.device)
checkpoint = torch.load(model_path, map_location=self.device, weights_only=False)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.model.eval()
try:
dataset = get_dataset(self.dataset_name, train=False, download=False)
self.classes = getattr(dataset, 'classes', [str(i) for i in range(num_classes)])
except:
from neuralforge.data.datasets import get_class_names
self.classes = get_class_names(self.dataset_name)
self.model_status.setText(f'✓ Model loaded successfully')
self.model_status.setStyleSheet('color: #4CAF50;')
self.predict_btn.setEnabled(True)
total_params = sum(p.numel() for p in self.model.parameters())
epoch = checkpoint.get('epoch', 'Unknown')
val_loss = checkpoint.get('best_val_loss', 'Unknown')
val_loss_str = f"{val_loss:.4f}" if isinstance(val_loss, float) else str(val_loss)
info_text = f"""
Model: ResNet18
Dataset: {self.dataset_name.upper()}
Classes: {num_classes}
Parameters: {total_params:,}
Epoch: {epoch}
Best Val Loss: {val_loss_str}
Device: {self.device.upper()}
"""
self.model_info.setText(info_text.strip())
except Exception as e:
self.model_status.setText(f'Error: {str(e)}')
self.model_status.setStyleSheet('color: #f44336;')
def predict_image(self):
image_path = self.image_path_input.text()
if not image_path or not os.path.exists(image_path):
self.main_prediction.setText('Please select a valid image')
self.main_prediction.setStyleSheet('color: #f44336;')
return
if self.model is None:
self.main_prediction.setText('Please load a model first')
self.main_prediction.setStyleSheet('color: #f44336;')
return
self.predict_btn.setEnabled(False)
self.progress_bar.setVisible(True)
self.progress_bar.setRange(0, 0)
self.prediction_thread = PredictionThread(self.model, image_path, self.classes, self.device)
self.prediction_thread.finished.connect(self.display_results)
self.prediction_thread.error.connect(self.display_error)
self.prediction_thread.start()
def display_results(self, predictions, confidences, main_prediction):
self.progress_bar.setVisible(False)
self.predict_btn.setEnabled(True)
self.main_prediction.setText(f'🎯 {main_prediction}')
self.main_prediction.setStyleSheet('color: #4CAF50; padding: 20px; font-size: 28px;')
self.confidence_label.setText(f'Confidence: {confidences[0]:.2f}%')
top5_text = '<h3>Top-5 Predictions:</h3><hr>'
for i, (pred, conf) in enumerate(zip(predictions, confidences), 1):
bar_width = int(conf * 3)
bar = '█' * bar_width
top5_text += f'<p style="margin: 10px 0;"><b>{i}. {pred}</b><br>'
top5_text += f'<span style="color: #4CAF50;">{bar}</span> {conf:.2f}%</p>'
self.top5_display.setHtml(top5_text)
def display_error(self, error_msg):
self.progress_bar.setVisible(False)
self.predict_btn.setEnabled(True)
self.main_prediction.setText(f'Error: {error_msg}')
self.main_prediction.setStyleSheet('color: #f44336;')
app = QApplication(sys.argv)
window = NeuralForgeGUI()
window.show()
sys.exit(app.exec())
if __name__ == '__main__':
main()
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/cli/gui.py",
"license": "MIT License",
"lines": 383,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:ML/src/python/neuralforge/cli/nas.py | import argparse
import torch
from neuralforge.nas.search_space import SearchSpace
from neuralforge.nas.evolution import EvolutionarySearch
from neuralforge.nas.evaluator import ProxyEvaluator
from neuralforge.data.datasets import get_dataset
from neuralforge.data.dataset import SyntheticDataset, DataLoaderBuilder
from neuralforge.config import Config
def main():
parser = argparse.ArgumentParser(
description='NeuralForge - Neural Architecture Search',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
neuralforge-nas --population 20 --generations 50
neuralforge-nas --dataset cifar10 --population 15 --generations 30
"""
)
parser.add_argument('--dataset', type=str, default='synthetic', help='Dataset for evaluation')
parser.add_argument('--population', type=int, default=15, help='Population size')
parser.add_argument('--generations', type=int, default=20, help='Number of generations')
parser.add_argument('--mutation-rate', type=float, default=0.15, help='Mutation rate')
parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu')
args = parser.parse_args()
config = Config()
config.device = args.device
config.nas_enabled = True
config.nas_population_size = args.population
config.nas_generations = args.generations
config.nas_mutation_rate = args.mutation_rate
search_config = {
'num_layers': 15,
'num_blocks': 4
}
search_space = SearchSpace(search_config)
train_dataset = SyntheticDataset(num_samples=1000, num_classes=10)
val_dataset = SyntheticDataset(num_samples=200, num_classes=10)
loader_builder = DataLoaderBuilder(config)
train_loader = loader_builder.build_train_loader(train_dataset)
val_loader = loader_builder.build_val_loader(val_dataset)
evaluator = ProxyEvaluator(device=config.device)
evolution = EvolutionarySearch(
search_space=search_space,
evaluator=evaluator,
population_size=config.nas_population_size,
generations=config.nas_generations,
mutation_rate=config.nas_mutation_rate
)
print("Starting Neural Architecture Search...")
best_architecture = evolution.search()
print(f"\nBest Architecture Found:")
print(f"Fitness: {best_architecture.fitness:.4f}")
print(f"Accuracy: {best_architecture.accuracy:.2f}%")
print(f"Parameters: {best_architecture.params:,}")
print(f"FLOPs: {best_architecture.flops:,}")
if __name__ == '__main__':
main()
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/cli/nas.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/cli/test.py | import argparse
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import torch
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
import numpy as np
from neuralforge.data.datasets import get_dataset, get_num_classes
from neuralforge.models.resnet import ResNet18
def main():
parser = argparse.ArgumentParser(
description='NeuralForge - Test trained models',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
neuralforge-test --model models/best_model.pt --dataset cifar10 --mode random
neuralforge-test --dataset mnist --mode accuracy
neuralforge-test --dataset stl10 --image cat.jpg
"""
)
default_model = './models/best_model.pt'
parser.add_argument('--model', type=str, default=default_model, help='Path to model checkpoint')
parser.add_argument('--dataset', type=str, default='cifar10', help='Dataset name')
parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu')
parser.add_argument('--mode', type=str, default='random', choices=['random', 'accuracy', 'interactive'])
parser.add_argument('--samples', type=int, default=10, help='Number of samples for random mode')
parser.add_argument('--image', type=str, default=None, help='Path to image file')
args = parser.parse_args()
print("=" * 60)
print(" NeuralForge - Model Testing")
print("=" * 60)
print(f"Device: {args.device}")
dataset_aliases = {
'cifar-10': 'cifar10', 'stl-10': 'stl10', 'fashion-mnist': 'fashion_mnist',
'tiny-imagenet': 'tiny_imagenet', 'food-101': 'food101',
}
dataset_name = dataset_aliases.get(args.dataset.lower(), args.dataset.lower())
num_classes = get_num_classes(dataset_name)
model = ResNet18(num_classes=num_classes)
model = model.to(args.device)
if os.path.exists(args.model):
print(f"Loading model from: {args.model}")
checkpoint = torch.load(args.model, map_location=args.device, weights_only=False)
model.load_state_dict(checkpoint['model_state_dict'])
print(f"Model loaded from epoch {checkpoint.get('epoch', 'Unknown')}")
else:
print(f"Warning: No model found at {args.model}")
return
model.eval()
test_dataset = get_dataset(dataset_name, root='./data', train=False, download=True)
classes = getattr(test_dataset, 'classes', [str(i) for i in range(num_classes)])
print(f"Dataset: {dataset_name} ({len(test_dataset.dataset)} test samples)")
print("=" * 60)
if args.image:
image = Image.open(args.image).convert('RGB')
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
image_tensor = transform(image).unsqueeze(0).to(args.device)
with torch.no_grad():
outputs = model(image_tensor)
probabilities = F.softmax(outputs, dim=1)
top5_prob, top5_idx = torch.topk(probabilities, min(5, num_classes), dim=1)
print(f"\nPrediction for {args.image}:")
print(f"Main: {classes[top5_idx[0][0].item()]} ({top5_prob[0][0].item()*100:.2f}%)")
print("\nTop-5:")
for i, (idx, prob) in enumerate(zip(top5_idx[0], top5_prob[0]), 1):
print(f" {i}. {classes[idx.item()]:15s} {prob.item()*100:.2f}%")
elif args.mode == 'random':
print(f"\nTesting {args.samples} random samples...")
print("-" * 60)
correct = 0
indices = np.random.choice(len(test_dataset.dataset), args.samples, replace=False)
for i, idx in enumerate(indices, 1):
image, label = test_dataset.dataset[idx]
with torch.no_grad():
image = image.unsqueeze(0).to(args.device)
outputs = model(image)
pred_class = outputs.argmax(1).item()
confidence = F.softmax(outputs, dim=1)[0][pred_class].item() * 100
is_correct = pred_class == label
correct += is_correct
status = "✓" if is_correct else "✗"
print(f"{i:2d}. {status} True: {classes[label]:15s} | Pred: {classes[pred_class]:15s} | Conf: {confidence:.1f}%")
print("-" * 60)
print(f"Accuracy: {correct/args.samples:.1%} ({correct}/{args.samples})")
elif args.mode == 'accuracy':
print("\nCalculating full test accuracy...")
correct = 0
total = 0
with torch.no_grad():
for image, label in test_dataset.dataset:
image = image.unsqueeze(0).to(args.device)
outputs = model(image)
pred_class = outputs.argmax(1).item()
total += 1
if pred_class == label:
correct += 1
if total % 100 == 0:
print(f"Processed {total}/{len(test_dataset.dataset)}...", end='\r')
print(f"\nOverall Accuracy: {100.0 * correct / total:.2f}% ({correct}/{total})")
if __name__ == '__main__':
main()
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/cli/test.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:ML/src/python/neuralforge/cli/train.py | import argparse
import sys
import torch
import torch.nn as nn
import random
import numpy as np
from neuralforge.trainer import Trainer
from neuralforge.config import Config
from neuralforge.data.datasets import get_dataset, get_num_classes
from neuralforge.data.dataset import SyntheticDataset, DataLoaderBuilder
from neuralforge.models.resnet import ResNet18
from neuralforge.optim.optimizers import AdamW
from neuralforge.optim.schedulers import CosineAnnealingWarmRestarts, OneCycleLR
from neuralforge.utils.logger import Logger
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_simple_model(num_classes=10):
return nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(128, num_classes)
)
def main():
parser = argparse.ArgumentParser(
description='NeuralForge - Train neural networks with CUDA acceleration',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
neuralforge --dataset cifar10 --epochs 50
neuralforge --dataset mnist --model simple --batch-size 64
neuralforge --dataset stl10 --model resnet18 --epochs 100 --lr 0.001
neuralforge --dataset tiny_imagenet --batch-size 128 --epochs 200
"""
)
parser.add_argument('--config', type=str, default=None, help='Path to config file')
parser.add_argument('--model', type=str, default='simple',
choices=['simple', 'resnet18', 'efficientnet', 'vit'],
help='Model architecture')
parser.add_argument('--dataset', type=str, default='synthetic',
help='Dataset (cifar10, mnist, stl10, tiny_imagenet, etc.)')
parser.add_argument('--batch-size', type=int, default=32, help='Batch size')
parser.add_argument('--epochs', type=int, default=50, help='Number of epochs')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu',
help='Device (cuda/cpu)')
parser.add_argument('--num-samples', type=int, default=5000, help='Number of synthetic samples')
parser.add_argument('--num-classes', type=int, default=10, help='Number of classes (for synthetic)')
parser.add_argument('--seed', type=int, default=42, help='Random seed')
parser.add_argument('--optimizer', type=str, default='adamw',
choices=['adamw', 'adam', 'sgd'],
help='Optimizer')
parser.add_argument('--scheduler', type=str, default='cosine',
choices=['cosine', 'onecycle', 'none'],
help='Learning rate scheduler')
args = parser.parse_args()
if args.config:
config = Config.load(args.config)
else:
config = Config()
config.batch_size = args.batch_size
config.epochs = args.epochs
config.learning_rate = args.lr
config.device = args.device
config.num_classes = args.num_classes
config.seed = args.seed
config.optimizer = args.optimizer
config.scheduler = args.scheduler
# Set paths relative to current working directory (not package directory)
import os
cwd = os.getcwd()
config.model_dir = os.path.join(cwd, "models")
config.log_dir = os.path.join(cwd, "logs")
config.data_path = os.path.join(cwd, "data")
set_seed(config.seed)
logger = Logger(config.log_dir, "training")
logger.info("=" * 80)
logger.info("NeuralForge Training Framework")
logger.info("=" * 80)
logger.info(f"Configuration:\n{config}")
dataset_aliases = {
'cifar-10': 'cifar10', 'cifar_10': 'cifar10',
'cifar-100': 'cifar100', 'cifar_100': 'cifar100',
'fashion-mnist': 'fashion_mnist', 'fashionmnist': 'fashion_mnist',
'stl-10': 'stl10', 'stl_10': 'stl10',
'tiny-imagenet': 'tiny_imagenet', 'tinyimagenet': 'tiny_imagenet',
'food-101': 'food101', 'food_101': 'food101',
'caltech-256': 'caltech256', 'caltech_256': 'caltech256',
'oxford-pets': 'oxford_pets', 'oxfordpets': 'oxford_pets',
}
dataset_name = dataset_aliases.get(args.dataset.lower(), args.dataset.lower())
if dataset_name == 'synthetic':
logger.info("Creating synthetic dataset...")
train_dataset = SyntheticDataset(
num_samples=args.num_samples,
num_classes=config.num_classes,
image_size=config.image_size,
channels=3
)
val_dataset = SyntheticDataset(
num_samples=args.num_samples // 5,
num_classes=config.num_classes,
image_size=config.image_size,
channels=3
)
else:
logger.info(f"Downloading and loading {dataset_name} dataset...")
config.num_classes = get_num_classes(dataset_name)
train_dataset = get_dataset(dataset_name, root=config.data_path, train=True, download=True)
val_dataset = get_dataset(dataset_name, root=config.data_path, train=False, download=True)
if dataset_name in ['mnist', 'fashion_mnist']:
config.image_size = 28
elif dataset_name in ['cifar10', 'cifar100']:
config.image_size = 32
elif dataset_name == 'tiny_imagenet':
config.image_size = 64
elif dataset_name == 'stl10':
config.image_size = 96
elif dataset_name in ['imagenet', 'food101', 'caltech256', 'oxford_pets']:
config.image_size = 224
loader_builder = DataLoaderBuilder(config)
train_loader = loader_builder.build_train_loader(train_dataset)
val_loader = loader_builder.build_val_loader(val_dataset)
logger.info(f"Train dataset size: {len(train_dataset)}")
logger.info(f"Validation dataset size: {len(val_dataset)}")
logger.info(f"Creating model: {args.model}")
if args.model == 'simple':
model = create_simple_model(config.num_classes)
elif args.model == 'resnet18':
model = ResNet18(num_classes=config.num_classes)
else:
model = create_simple_model(config.num_classes)
logger.log_model_summary(model)
criterion = nn.CrossEntropyLoss()
if config.optimizer.lower() == 'adamw':
optimizer = AdamW(model.parameters(), lr=config.learning_rate, weight_decay=config.weight_decay)
elif config.optimizer.lower() == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate, weight_decay=config.weight_decay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=config.learning_rate, momentum=0.9, weight_decay=config.weight_decay)
scheduler = None
if config.scheduler == 'cosine':
scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2, eta_min=1e-6)
elif config.scheduler == 'onecycle':
scheduler = OneCycleLR(optimizer, max_lr=config.learning_rate, total_steps=config.epochs * len(train_loader))
logger.info(f"Optimizer: {config.optimizer}")
logger.info(f"Scheduler: {config.scheduler}")
trainer = Trainer(
model=model,
train_loader=train_loader,
val_loader=val_loader,
optimizer=optimizer,
criterion=criterion,
config=config,
scheduler=scheduler,
device=config.device
)
logger.info("Starting training...")
trainer.train()
logger.info("Training completed successfully!")
logger.info(f"Best validation loss: {trainer.best_val_loss:.4f}")
if __name__ == '__main__':
main()
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/cli/train.py",
"license": "MIT License",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:ML/src/python/neuralforge/config.py | import json
import os
from typing import Any, Dict, Optional
from dataclasses import dataclass, asdict
@dataclass
class Config:
model_name: str = "neuralforge_model"
batch_size: int = 32
epochs: int = 100
learning_rate: float = 0.001
weight_decay: float = 0.0001
optimizer: str = "adamw"
scheduler: str = "cosine"
warmup_epochs: int = 5
grad_clip: float = 1.0
data_path: str = "./data"
num_workers: int = 4
pin_memory: bool = True
model_dir: str = "./models"
log_dir: str = "./logs"
checkpoint_freq: int = 10
use_amp: bool = True
device: str = "cuda"
seed: int = 42
nas_enabled: bool = False
nas_population_size: int = 20
nas_generations: int = 50
nas_mutation_rate: float = 0.1
image_size: int = 224
num_classes: int = 1000
def save(self, path: str):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
json.dump(asdict(self), f, indent=2)
@classmethod
def load(cls, path: str) -> 'Config':
with open(path, 'r') as f:
data = json.load(f)
return cls(**data)
def update(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
def __str__(self) -> str:
return json.dumps(asdict(self), indent=2) | {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/config.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/data/augmentation.py | import torch
import random
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
from typing import List, Tuple
class RandAugment:
def __init__(self, n: int = 2, m: int = 9):
self.n = n
self.m = m
self.augment_list = [
(self.auto_contrast, 0, 1),
(self.equalize, 0, 1),
(self.invert, 0, 1),
(self.rotate, 0, 30),
(self.posterize, 0, 4),
(self.solarize, 0, 256),
(self.color, 0.1, 1.9),
(self.contrast, 0.1, 1.9),
(self.brightness, 0.1, 1.9),
(self.sharpness, 0.1, 1.9),
(self.shear_x, 0, 0.3),
(self.shear_y, 0, 0.3),
(self.translate_x, 0, 0.3),
(self.translate_y, 0, 0.3),
]
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
@staticmethod
def auto_contrast(img, _):
return ImageOps.autocontrast(img)
@staticmethod
def equalize(img, _):
return ImageOps.equalize(img)
@staticmethod
def invert(img, _):
return ImageOps.invert(img)
@staticmethod
def rotate(img, magnitude):
return img.rotate(magnitude)
@staticmethod
def posterize(img, magnitude):
magnitude = int(magnitude)
return ImageOps.posterize(img, magnitude)
@staticmethod
def solarize(img, magnitude):
return ImageOps.solarize(img, int(magnitude))
@staticmethod
def color(img, magnitude):
return ImageEnhance.Color(img).enhance(magnitude)
@staticmethod
def contrast(img, magnitude):
return ImageEnhance.Contrast(img).enhance(magnitude)
@staticmethod
def brightness(img, magnitude):
return ImageEnhance.Brightness(img).enhance(magnitude)
@staticmethod
def sharpness(img, magnitude):
return ImageEnhance.Sharpness(img).enhance(magnitude)
@staticmethod
def shear_x(img, magnitude):
return img.transform(img.size, Image.AFFINE, (1, magnitude, 0, 0, 1, 0))
@staticmethod
def shear_y(img, magnitude):
return img.transform(img.size, Image.AFFINE, (1, 0, 0, magnitude, 1, 0))
@staticmethod
def translate_x(img, magnitude):
magnitude = magnitude * img.size[0]
return img.transform(img.size, Image.AFFINE, (1, 0, magnitude, 0, 1, 0))
@staticmethod
def translate_y(img, magnitude):
magnitude = magnitude * img.size[1]
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude))
class MixUp:
def __init__(self, alpha: float = 1.0, num_classes: int = 1000):
self.alpha = alpha
self.num_classes = num_classes
def __call__(self, images, labels):
batch_size = images.size(0)
if self.alpha > 0:
lam = np.random.beta(self.alpha, self.alpha)
else:
lam = 1
index = torch.randperm(batch_size).to(images.device)
mixed_images = lam * images + (1 - lam) * images[index]
labels_a = labels
labels_b = labels[index]
return mixed_images, labels_a, labels_b, lam
class CutMix:
def __init__(self, alpha: float = 1.0, num_classes: int = 1000):
self.alpha = alpha
self.num_classes = num_classes
def __call__(self, images, labels):
batch_size = images.size(0)
if self.alpha > 0:
lam = np.random.beta(self.alpha, self.alpha)
else:
lam = 1
index = torch.randperm(batch_size).to(images.device)
_, _, H, W = images.shape
cut_rat = np.sqrt(1.0 - lam)
cut_w = int(W * cut_rat)
cut_h = int(H * cut_rat)
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
images[:, :, bby1:bby2, bbx1:bbx2] = images[index, :, bby1:bby2, bbx1:bbx2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (W * H))
return images, labels, labels[index], lam
class GridMask:
def __init__(self, d1: int = 96, d2: int = 224, rotate: float = 1, ratio: float = 0.5):
self.d1 = d1
self.d2 = d2
self.rotate = rotate
self.ratio = ratio
def __call__(self, img):
h, w = img.shape[-2:]
d = np.random.randint(self.d1, self.d2)
l = int(d * self.ratio + 0.5)
mask = np.ones((h, w), np.float32)
st_h = np.random.randint(d)
st_w = np.random.randint(d)
for i in range(h // d + 1):
s_h = d * i + st_h
t_h = min(s_h + l, h)
for j in range(w // d + 1):
s_w = d * j + st_w
t_w = min(s_w + l, w)
mask[s_h:t_h, s_w:t_w] = 0
mask = torch.from_numpy(mask).to(img.device)
img = img * mask
return img
class RandomErasing:
def __init__(self, probability: float = 0.5, sl: float = 0.02, sh: float = 0.4, r1: float = 0.3):
self.probability = probability
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(np.sqrt(target_area * aspect_ratio)))
w = int(round(np.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
img[0, x1:x1 + h, y1:y1 + w] = random.uniform(0, 1)
img[1, x1:x1 + h, y1:y1 + w] = random.uniform(0, 1)
img[2, x1:x1 + h, y1:y1 + w] = random.uniform(0, 1)
return img
return img
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/data/augmentation.py",
"license": "MIT License",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/data/dataset.py | import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms
from PIL import Image
import os
from typing import Optional, Callable, Tuple, List
import numpy as np
class ImageDataset(Dataset):
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
split: str = 'train'
):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.split = split
self.samples = []
self.class_to_idx = {}
self._load_dataset()
def _load_dataset(self):
split_dir = os.path.join(self.root, self.split)
if not os.path.exists(split_dir):
raise FileNotFoundError(f"Dataset directory not found: {split_dir}")
classes = sorted([d for d in os.listdir(split_dir)
if os.path.isdir(os.path.join(split_dir, d))])
self.class_to_idx = {cls_name: idx for idx, cls_name in enumerate(classes)}
for class_name in classes:
class_dir = os.path.join(split_dir, class_name)
class_idx = self.class_to_idx[class_name]
for img_name in os.listdir(class_dir):
if img_name.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif')):
img_path = os.path.join(class_dir, img_name)
self.samples.append((img_path, class_idx))
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
img_path, label = self.samples[idx]
try:
image = Image.open(img_path).convert('RGB')
except Exception as e:
print(f"Error loading image {img_path}: {e}")
image = Image.new('RGB', (224, 224), color='black')
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
class SyntheticDataset(Dataset):
def __init__(
self,
num_samples: int = 10000,
num_classes: int = 10,
image_size: int = 224,
channels: int = 3
):
self.num_samples = num_samples
self.num_classes = num_classes
self.image_size = image_size
self.channels = channels
def __len__(self) -> int:
return self.num_samples
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
image = torch.randn(self.channels, self.image_size, self.image_size)
label = idx % self.num_classes
return image, label
class MemoryDataset(Dataset):
def __init__(self, data: torch.Tensor, labels: torch.Tensor):
assert len(data) == len(labels)
self.data = data
self.labels = labels
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
return self.data[idx], self.labels[idx]
class DataLoaderBuilder:
def __init__(self, config):
self.config = config
def build_train_loader(self, dataset: Dataset) -> DataLoader:
return DataLoader(
dataset,
batch_size=self.config.batch_size,
shuffle=True,
num_workers=self.config.num_workers,
pin_memory=self.config.pin_memory,
drop_last=True,
persistent_workers=self.config.num_workers > 0
)
def build_val_loader(self, dataset: Dataset) -> DataLoader:
return DataLoader(
dataset,
batch_size=self.config.batch_size,
shuffle=False,
num_workers=self.config.num_workers,
pin_memory=self.config.pin_memory,
drop_last=False,
persistent_workers=self.config.num_workers > 0
)
def build_test_loader(self, dataset: Dataset) -> DataLoader:
return DataLoader(
dataset,
batch_size=self.config.batch_size,
shuffle=False,
num_workers=self.config.num_workers,
pin_memory=self.config.pin_memory,
drop_last=False
)
class CachedDataset(Dataset):
def __init__(self, dataset: Dataset, cache_size: int = 1000):
self.dataset = dataset
self.cache_size = cache_size
self.cache = {}
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
if idx in self.cache:
return self.cache[idx]
item = self.dataset[idx]
if len(self.cache) < self.cache_size:
self.cache[idx] = item
return item
class MultiScaleDataset(Dataset):
def __init__(
self,
dataset: Dataset,
scales: List[int] = [224, 256, 288, 320]
):
self.dataset = dataset
self.scales = scales
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
image, label = self.dataset[idx]
scale = np.random.choice(self.scales)
resize = transforms.Resize((scale, scale))
image = resize(image)
return image, label
class PrefetchDataset(Dataset):
def __init__(self, dataset: Dataset, prefetch_size: int = 100):
self.dataset = dataset
self.prefetch_size = prefetch_size
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
return self.dataset[idx] | {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/data/dataset.py",
"license": "MIT License",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:ML/src/python/neuralforge/data/datasets.py | import torch
from torch.utils.data import Dataset
from torchvision import datasets, transforms
import os
from typing import Optional, Callable
class CIFAR10Dataset:
def __init__(self, root='./data', train=True, transform=None, download=True):
if transform is None:
if train:
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
self.dataset = datasets.CIFAR10(root=root, train=train, transform=transform, download=download)
self.classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class CIFAR100Dataset:
def __init__(self, root='./data', train=True, transform=None, download=True):
if transform is None:
if train:
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
])
self.dataset = datasets.CIFAR100(root=root, train=train, transform=transform, download=download)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class MNISTDataset:
def __init__(self, root='./data', train=True, transform=None, download=True):
if transform is None:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.dataset = datasets.MNIST(root=root, train=train, transform=transform, download=download)
self.classes = [str(i) for i in range(10)]
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class FashionMNISTDataset:
def __init__(self, root='./data', train=True, transform=None, download=True):
if transform is None:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.2860,), (0.3530,))
])
self.dataset = datasets.FashionMNIST(root=root, train=train, transform=transform, download=download)
self.classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class STL10Dataset:
def __init__(self, root='./data', split='train', transform=None, download=True):
if transform is None:
if split == 'train':
transform = transforms.Compose([
transforms.RandomCrop(96, padding=12),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4467, 0.4398, 0.4066), (0.2603, 0.2566, 0.2713))
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4467, 0.4398, 0.4066), (0.2603, 0.2566, 0.2713))
])
self.dataset = datasets.STL10(root=root, split=split, transform=transform, download=download)
self.classes = ['airplane', 'bird', 'car', 'cat', 'deer', 'dog', 'horse', 'monkey', 'ship', 'truck']
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
def get_dataset(name='cifar10', root='./data', train=True, download=True):
name = name.lower()
if name == 'cifar10':
return CIFAR10Dataset(root=root, train=train, download=download)
elif name == 'cifar100':
return CIFAR100Dataset(root=root, train=train, download=download)
elif name == 'mnist':
return MNISTDataset(root=root, train=train, download=download)
elif name == 'fashion_mnist' or name == 'fashionmnist':
return FashionMNISTDataset(root=root, train=train, download=download)
elif name == 'stl10':
split = 'train' if train else 'test'
return STL10Dataset(root=root, split=split, download=download)
else:
raise ValueError(f"Unknown dataset: {name}")
class ImageNetDataset:
def __init__(self, root='./data/imagenet', split='train', transform=None, download=False):
if transform is None:
if split == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
try:
self.dataset = datasets.ImageFolder(os.path.join(root, split), transform=transform)
except:
print(f"ImageNet not found at {root}. Please download manually from https://image-net.org/")
print("Expected structure: {root}/train/ and {root}/val/")
raise
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class TinyImageNetDataset:
def __init__(self, root='./data', train=True, transform=None, download=True):
if transform is None:
if train:
transform = transforms.Compose([
transforms.RandomCrop(64, padding=8),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
import zipfile
import urllib.request
data_dir = os.path.join(root, 'tiny-imagenet-200')
if download and not os.path.exists(data_dir):
print("Downloading Tiny ImageNet (237 MB)...")
url = 'http://cs231n.stanford.edu/tiny-imagenet-200.zip'
zip_path = os.path.join(root, 'tiny-imagenet-200.zip')
try:
urllib.request.urlretrieve(url, zip_path)
print("Extracting...")
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(root)
os.remove(zip_path)
except Exception as e:
print(f"Download failed: {e}")
print("Please download manually from: http://cs231n.stanford.edu/tiny-imagenet-200.zip")
split = 'train' if train else 'val'
self.dataset = datasets.ImageFolder(os.path.join(data_dir, split), transform=transform)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class Food101Dataset:
def __init__(self, root='./data', split='train', transform=None, download=True):
if transform is None:
if split == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ColorJitter(0.3, 0.3, 0.3),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.dataset = datasets.Food101(root=root, split=split, transform=transform, download=download)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class Caltech256Dataset:
def __init__(self, root='./data', transform=None, download=True):
if transform is None:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.dataset = datasets.Caltech256(root=root, transform=transform, download=download)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class OxfordPetsDataset:
def __init__(self, root='./data', split='trainval', transform=None, download=True):
if transform is None:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.dataset = datasets.OxfordIIITPet(root=root, split=split, transform=transform, download=download)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
def get_dataset(name='cifar10', root='./data', train=True, download=True):
name = name.lower()
if name == 'cifar10':
return CIFAR10Dataset(root=root, train=train, download=download)
elif name == 'cifar100':
return CIFAR100Dataset(root=root, train=train, download=download)
elif name == 'mnist':
return MNISTDataset(root=root, train=train, download=download)
elif name == 'fashion_mnist' or name == 'fashionmnist':
return FashionMNISTDataset(root=root, train=train, download=download)
elif name == 'stl10':
split = 'train' if train else 'test'
return STL10Dataset(root=root, split=split, download=download)
elif name == 'tiny_imagenet' or name == 'tinyimagenet':
return TinyImageNetDataset(root=root, train=train, download=download)
elif name == 'imagenet':
split = 'train' if train else 'val'
return ImageNetDataset(root=root, split=split, download=download)
elif name == 'food101':
split = 'train' if train else 'test'
return Food101Dataset(root=root, split=split, download=download)
elif name == 'caltech256':
return Caltech256Dataset(root=root, download=download)
elif name == 'oxford_pets' or name == 'oxfordpets':
split = 'trainval' if train else 'test'
return OxfordPetsDataset(root=root, split=split, download=download)
else:
raise ValueError(f"Unknown dataset: {name}")
def get_num_classes(dataset_name):
dataset_name = dataset_name.lower()
if dataset_name in ['cifar10', 'mnist', 'fashion_mnist', 'fashionmnist', 'stl10']:
return 10
elif dataset_name == 'cifar100':
return 100
elif dataset_name in ['tiny_imagenet', 'tinyimagenet']:
return 200
elif dataset_name == 'imagenet':
return 1000
elif dataset_name == 'food101':
return 101
elif dataset_name == 'caltech256':
return 257
elif dataset_name in ['oxford_pets', 'oxfordpets']:
return 37
else:
return 10
def get_class_names(dataset_name):
"""Get class names for a dataset"""
dataset_name = dataset_name.lower()
class_names_map = {
'cifar10': ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'],
'mnist': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],
'fashion_mnist': ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'],
'fashionmnist': ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'],
'stl10': ['airplane', 'bird', 'car', 'cat', 'deer', 'dog', 'horse', 'monkey', 'ship', 'truck'],
}
if dataset_name in class_names_map:
return class_names_map[dataset_name]
# For other datasets, return generic class names
num_classes = get_num_classes(dataset_name)
return [f'class_{i}' for i in range(num_classes)]
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/data/datasets.py",
"license": "MIT License",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:ML/src/python/neuralforge/data/transforms.py | from torchvision import transforms
import torch
from typing import List, Tuple
def get_transforms(image_size: int = 224, is_training: bool = True, mean=None, std=None):
if mean is None:
mean = [0.485, 0.456, 0.406]
if std is None:
std = [0.229, 0.224, 0.225]
if is_training:
return transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(0.8, 1.0)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomVerticalFlip(p=0.1),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3))
])
else:
return transforms.Compose([
transforms.Resize(int(image_size * 1.14)),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
class RandomMixup:
def __init__(self, alpha: float = 1.0):
self.alpha = alpha
def __call__(self, batch):
if self.alpha > 0:
lam = torch.distributions.Beta(self.alpha, self.alpha).sample()
else:
lam = 1.0
batch_size = batch[0].size(0)
index = torch.randperm(batch_size)
mixed_input = lam * batch[0] + (1 - lam) * batch[0][index, :]
y_a, y_b = batch[1], batch[1][index]
return mixed_input, y_a, y_b, lam
class RandomCutmix:
def __init__(self, alpha: float = 1.0):
self.alpha = alpha
def __call__(self, batch):
images, labels = batch
batch_size = images.size(0)
index = torch.randperm(batch_size)
if self.alpha > 0:
lam = torch.distributions.Beta(self.alpha, self.alpha).sample()
else:
lam = 1.0
_, _, H, W = images.shape
cut_rat = torch.sqrt(1.0 - lam)
cut_w = (W * cut_rat).int()
cut_h = (H * cut_rat).int()
cx = torch.randint(W, (1,)).item()
cy = torch.randint(H, (1,)).item()
bbx1 = torch.clamp(cx - cut_w // 2, 0, W)
bby1 = torch.clamp(cy - cut_h // 2, 0, H)
bbx2 = torch.clamp(cx + cut_w // 2, 0, W)
bby2 = torch.clamp(cy + cut_h // 2, 0, H)
images[:, :, bby1:bby2, bbx1:bbx2] = images[index, :, bby1:bby2, bbx1:bbx2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (W * H))
return images, labels, labels[index], lam
class GaussianNoise:
def __init__(self, mean: float = 0.0, std: float = 0.1):
self.mean = mean
self.std = std
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
class RandomGaussianBlur:
def __init__(self, kernel_size: int = 5, sigma: Tuple[float, float] = (0.1, 2.0)):
self.kernel_size = kernel_size
self.sigma = sigma
def __call__(self, img):
return transforms.GaussianBlur(self.kernel_size, self.sigma)(img)
def get_strong_augmentation(image_size: int = 224):
return transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.2)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur(kernel_size=23)], p=0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/data/transforms.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/models/efficientnet.py | import torch.nn as nn
from ..nn.convolution import EfficientNetBlock
class EfficientNetB0(nn.Module):
def __init__(self, num_classes=1000):
super().__init__()
self.stem = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.SiLU(inplace=True)
)
self.blocks = nn.Sequential(
EfficientNetBlock(32, 16, 3, 1, 1),
EfficientNetBlock(16, 24, 3, 2, 6),
EfficientNetBlock(24, 24, 3, 1, 6),
EfficientNetBlock(24, 40, 5, 2, 6),
EfficientNetBlock(40, 40, 5, 1, 6),
EfficientNetBlock(40, 80, 3, 2, 6),
EfficientNetBlock(80, 80, 3, 1, 6),
EfficientNetBlock(80, 112, 5, 1, 6),
EfficientNetBlock(112, 112, 5, 1, 6),
EfficientNetBlock(112, 192, 5, 2, 6),
EfficientNetBlock(192, 192, 5, 1, 6),
EfficientNetBlock(192, 320, 3, 1, 6),
)
self.head = nn.Sequential(
nn.Conv2d(320, 1280, 1, bias=False),
nn.BatchNorm2d(1280),
nn.SiLU(inplace=True),
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(1280, num_classes)
)
def forward(self, x):
x = self.stem(x)
x = self.blocks(x)
x = self.head(x)
return x | {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/models/efficientnet.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/models/resnet.py | import torch.nn as nn
from ..nn.convolution import ResNetBlock
def ResNet18(num_classes=1000, in_channels=3):
from ..nn.convolution import ResNet
return ResNet(ResNetBlock, [2, 2, 2, 2], num_classes, in_channels)
def ResNet34(num_classes=1000, in_channels=3):
from ..nn.convolution import ResNet
return ResNet(ResNetBlock, [3, 4, 6, 3], num_classes, in_channels)
def ResNet50(num_classes=1000, in_channels=3):
from ..nn.layers import BottleneckBlock
from ..nn.convolution import ResNet
return ResNet(BottleneckBlock, [3, 4, 6, 3], num_classes, in_channels) | {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/models/resnet.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/models/vit.py | import torch.nn as nn
from ..nn.attention import VisionTransformerBlock
def VisionTransformer(
img_size=224,
patch_size=16,
in_channels=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
dropout=0.1
):
return VisionTransformerBlock(
img_size=img_size,
patch_size=patch_size,
in_channels=in_channels,
embed_dim=embed_dim,
num_heads=num_heads,
num_layers=depth,
num_classes=num_classes,
dropout=dropout
) | {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/models/vit.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/nas/evaluator.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Subset
import time
from typing import Tuple
from .search_space import SearchSpace, Architecture
class ModelEvaluator:
def __init__(
self,
train_loader: DataLoader,
val_loader: DataLoader,
device: str = 'cuda',
epochs: int = 5,
quick_eval: bool = True
):
self.train_loader = train_loader
self.val_loader = val_loader
self.device = device
self.epochs = epochs
self.quick_eval = quick_eval
def evaluate(self, architecture: Architecture, search_space: SearchSpace) -> Tuple[float, float]:
try:
model = search_space.build_model(architecture)
model = model.to(self.device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
if self.quick_eval:
accuracy = self._quick_evaluate(model, criterion, optimizer)
else:
accuracy = self._full_evaluate(model, criterion, optimizer)
complexity = search_space.estimate_complexity(architecture)
params = complexity['params']
flops = complexity['flops']
param_penalty = params / 1e7
flop_penalty = flops / 1e9
fitness = accuracy - 0.1 * param_penalty - 0.05 * flop_penalty
return fitness, accuracy
except Exception as e:
print(f"Error evaluating architecture: {e}")
return 0.0, 0.0
def _quick_evaluate(self, model: nn.Module, criterion: nn.Module, optimizer: torch.optim.Optimizer) -> float:
model.train()
num_batches = min(50, len(self.train_loader))
for epoch in range(self.epochs):
for batch_idx, (inputs, targets) in enumerate(self.train_loader):
if batch_idx >= num_batches:
break
inputs = inputs.to(self.device)
targets = targets.to(self.device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
model.eval()
correct = 0
total = 0
num_val_batches = min(20, len(self.val_loader))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(self.val_loader):
if batch_idx >= num_val_batches:
break
inputs = inputs.to(self.device)
targets = targets.to(self.device)
outputs = model(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
accuracy = 100.0 * correct / total if total > 0 else 0.0
return accuracy
def _full_evaluate(self, model: nn.Module, criterion: nn.Module, optimizer: torch.optim.Optimizer) -> float:
for epoch in range(self.epochs):
model.train()
for inputs, targets in self.train_loader:
inputs = inputs.to(self.device)
targets = targets.to(self.device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
model.eval()
correct = 0
total = 0
with torch.no_grad():
for inputs, targets in self.val_loader:
inputs = inputs.to(self.device)
targets = targets.to(self.device)
outputs = model(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
accuracy = 100.0 * correct / total if total > 0 else 0.0
return accuracy
class ProxyEvaluator:
def __init__(self, device: str = 'cuda'):
self.device = device
def evaluate(self, architecture: Architecture, search_space: SearchSpace) -> Tuple[float, float]:
model = search_space.build_model(architecture)
model = model.to(self.device)
complexity = search_space.estimate_complexity(architecture)
params = complexity['params']
flops = complexity['flops']
num_layers = len([g for g in architecture.genome if g.get('type') != 'pooling'])
estimated_accuracy = 60.0 + torch.rand(1).item() * 20.0
estimated_accuracy = min(95.0, estimated_accuracy - params / 1e8)
fitness = estimated_accuracy - 0.1 * (params / 1e7) - 0.05 * (flops / 1e9)
return fitness, estimated_accuracy | {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/nas/evaluator.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:ML/src/python/neuralforge/nas/evolution.py | import torch
import random
import numpy as np
from typing import List, Dict, Any
from tqdm import tqdm
from .search_space import SearchSpace, Architecture
from .evaluator import ModelEvaluator
class EvolutionarySearch:
def __init__(
self,
search_space: SearchSpace,
evaluator: ModelEvaluator,
population_size: int = 20,
generations: int = 50,
mutation_rate: float = 0.1,
crossover_rate: float = 0.5,
tournament_size: int = 3
):
self.search_space = search_space
self.evaluator = evaluator
self.population_size = population_size
self.generations = generations
self.mutation_rate = mutation_rate
self.crossover_rate = crossover_rate
self.tournament_size = tournament_size
self.population = []
self.best_architecture = None
self.history = []
def initialize_population(self):
print(f"Initializing population of {self.population_size} architectures...")
self.population = []
for i in range(self.population_size):
arch = self.search_space.random_architecture()
self.population.append(arch)
print("Population initialized successfully")
def evaluate_population(self):
print("Evaluating population...")
for arch in tqdm(self.population, desc="Evaluating architectures"):
if arch.fitness == 0.0:
fitness, accuracy = self.evaluator.evaluate(arch, self.search_space)
arch.fitness = fitness
arch.accuracy = accuracy
complexity = self.search_space.estimate_complexity(arch)
arch.params = complexity['params']
arch.flops = complexity['flops']
def tournament_selection(self) -> Architecture:
tournament = random.sample(self.population, self.tournament_size)
return max(tournament, key=lambda x: x.fitness)
def select_parents(self) -> List[Architecture]:
parent1 = self.tournament_selection()
parent2 = self.tournament_selection()
return [parent1, parent2]
def create_offspring(self, parents: List[Architecture]) -> Architecture:
if random.random() < self.crossover_rate:
offspring = self.search_space.crossover(parents[0], parents[1])
else:
offspring = Architecture(parents[0].genome.copy())
if random.random() < self.mutation_rate:
offspring = self.search_space.mutate(offspring, self.mutation_rate)
return offspring
def evolve_generation(self):
self.population.sort(key=lambda x: x.fitness, reverse=True)
elite_size = max(1, self.population_size // 10)
new_population = self.population[:elite_size]
while len(new_population) < self.population_size:
parents = self.select_parents()
offspring = self.create_offspring(parents)
new_population.append(offspring)
self.population = new_population
def search(self) -> Architecture:
print(f"Starting evolutionary search for {self.generations} generations...")
self.initialize_population()
self.evaluate_population()
for generation in range(self.generations):
print(f"\n=== Generation {generation + 1}/{self.generations} ===")
self.population.sort(key=lambda x: x.fitness, reverse=True)
best_arch = self.population[0]
if self.best_architecture is None or best_arch.fitness > self.best_architecture.fitness:
self.best_architecture = best_arch
avg_fitness = np.mean([arch.fitness for arch in self.population])
avg_accuracy = np.mean([arch.accuracy for arch in self.population])
print(f"Best fitness: {best_arch.fitness:.4f}")
print(f"Best accuracy: {best_arch.accuracy:.2f}%")
print(f"Avg fitness: {avg_fitness:.4f}")
print(f"Avg accuracy: {avg_accuracy:.2f}%")
print(f"Best params: {best_arch.params:,}")
self.history.append({
'generation': generation + 1,
'best_fitness': best_arch.fitness,
'best_accuracy': best_arch.accuracy,
'avg_fitness': avg_fitness,
'avg_accuracy': avg_accuracy,
})
if generation < self.generations - 1:
self.evolve_generation()
self.evaluate_population()
print(f"\nSearch completed! Best architecture: {self.best_architecture}")
return self.best_architecture
def get_top_k_architectures(self, k: int = 5) -> List[Architecture]:
self.population.sort(key=lambda x: x.fitness, reverse=True)
return self.population[:k] | {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/nas/evolution.py",
"license": "MIT License",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/nas/search_space.py | import torch
import torch.nn as nn
from typing import List, Dict, Any, Optional
import random
import numpy as np
class Architecture:
def __init__(self, genome: List[int]):
self.genome = genome
self.fitness = 0.0
self.accuracy = 0.0
self.params = 0
self.flops = 0
def __repr__(self):
return f"Architecture(fitness={self.fitness:.4f}, acc={self.accuracy:.2f}%, params={self.params})"
class SearchSpace:
def __init__(self, config: Dict[str, Any]):
self.config = config
self.layer_types = ['conv3x3', 'conv5x5', 'conv7x7', 'depthwise', 'bottleneck', 'identity']
self.activation_types = ['relu', 'gelu', 'silu', 'mish']
self.pooling_types = ['max', 'avg', 'none']
self.channels = [32, 64, 128, 256, 512]
self.num_layers = config.get('num_layers', 20)
self.num_blocks = config.get('num_blocks', 5)
def random_architecture(self) -> Architecture:
genome = []
for block_idx in range(self.num_blocks):
num_layers_in_block = random.randint(2, 5)
for layer_idx in range(num_layers_in_block):
layer_gene = {
'type': random.choice(self.layer_types),
'channels': random.choice(self.channels),
'activation': random.choice(self.activation_types),
'use_bn': random.choice([True, False]),
'dropout': random.uniform(0.0, 0.3),
}
genome.append(layer_gene)
pooling_gene = {
'type': 'pooling',
'pooling_type': random.choice(self.pooling_types),
}
genome.append(pooling_gene)
return Architecture(genome)
def build_model(self, architecture: Architecture, input_channels: int = 3, num_classes: int = 1000) -> nn.Module:
layers = []
current_channels = input_channels
for gene in architecture.genome:
if gene.get('type') == 'pooling':
if gene['pooling_type'] == 'max':
layers.append(nn.MaxPool2d(2))
elif gene['pooling_type'] == 'avg':
layers.append(nn.AvgPool2d(2))
else:
layer_type = gene['type']
out_channels = gene['channels']
activation = gene['activation']
use_bn = gene['use_bn']
dropout = gene['dropout']
if layer_type == 'conv3x3':
layers.append(nn.Conv2d(current_channels, out_channels, 3, padding=1))
elif layer_type == 'conv5x5':
layers.append(nn.Conv2d(current_channels, out_channels, 5, padding=2))
elif layer_type == 'conv7x7':
layers.append(nn.Conv2d(current_channels, out_channels, 7, padding=3))
elif layer_type == 'depthwise':
layers.append(nn.Conv2d(current_channels, current_channels, 3, padding=1, groups=current_channels))
layers.append(nn.Conv2d(current_channels, out_channels, 1))
elif layer_type == 'bottleneck':
mid_channels = out_channels // 4
layers.append(nn.Conv2d(current_channels, mid_channels, 1))
if use_bn:
layers.append(nn.BatchNorm2d(mid_channels))
layers.append(self._get_activation(activation))
layers.append(nn.Conv2d(mid_channels, mid_channels, 3, padding=1))
if use_bn:
layers.append(nn.BatchNorm2d(mid_channels))
layers.append(self._get_activation(activation))
layers.append(nn.Conv2d(mid_channels, out_channels, 1))
elif layer_type == 'identity':
if current_channels != out_channels:
layers.append(nn.Conv2d(current_channels, out_channels, 1))
else:
layers.append(nn.Identity())
if use_bn and layer_type != 'bottleneck':
layers.append(nn.BatchNorm2d(out_channels))
if layer_type != 'bottleneck':
layers.append(self._get_activation(activation))
if dropout > 0:
layers.append(nn.Dropout2d(dropout))
current_channels = out_channels
layers.append(nn.AdaptiveAvgPool2d(1))
layers.append(nn.Flatten())
layers.append(nn.Linear(current_channels, num_classes))
model = nn.Sequential(*layers)
return model
def _get_activation(self, activation: str) -> nn.Module:
if activation == 'relu':
return nn.ReLU(inplace=True)
elif activation == 'gelu':
return nn.GELU()
elif activation == 'silu':
return nn.SiLU(inplace=True)
elif activation == 'mish':
return nn.Mish(inplace=True)
else:
return nn.ReLU(inplace=True)
def mutate(self, architecture: Architecture, mutation_rate: float = 0.1) -> Architecture:
new_genome = []
for gene in architecture.genome:
if random.random() < mutation_rate:
if gene.get('type') == 'pooling':
gene = gene.copy()
gene['pooling_type'] = random.choice(self.pooling_types)
else:
gene = gene.copy()
gene['type'] = random.choice(self.layer_types)
gene['channels'] = random.choice(self.channels)
gene['activation'] = random.choice(self.activation_types)
new_genome.append(gene)
return Architecture(new_genome)
def crossover(self, parent1: Architecture, parent2: Architecture) -> Architecture:
min_len = min(len(parent1.genome), len(parent2.genome))
crossover_point = random.randint(1, min_len - 1)
child_genome = parent1.genome[:crossover_point] + parent2.genome[crossover_point:]
return Architecture(child_genome)
def estimate_complexity(self, architecture: Architecture, input_size: int = 224) -> Dict[str, float]:
total_params = 0
total_flops = 0
current_channels = 3
current_size = input_size
for gene in architecture.genome:
if gene.get('type') == 'pooling':
current_size = current_size // 2
else:
out_channels = gene['channels']
if gene['type'] in ['conv3x3', 'conv5x5', 'conv7x7']:
kernel_size = int(gene['type'][-3])
params = current_channels * out_channels * kernel_size * kernel_size
flops = params * current_size * current_size
elif gene['type'] == 'depthwise':
params = current_channels * 9 + current_channels * out_channels
flops = current_channels * 9 * current_size * current_size + current_channels * out_channels * current_size * current_size
elif gene['type'] == 'bottleneck':
mid_channels = out_channels // 4
params = current_channels * mid_channels + mid_channels * 9 + mid_channels * out_channels
flops = (current_channels * mid_channels + mid_channels * 9 + mid_channels * out_channels) * current_size * current_size
total_params += params
total_flops += flops
current_channels = out_channels
return {'params': total_params, 'flops': total_flops} | {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/nas/search_space.py",
"license": "MIT License",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:ML/src/python/neuralforge/nn/activations.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class GELU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return 0.5 * x * (1.0 + torch.tanh(0.7978845608 * (x + 0.044715 * torch.pow(x, 3))))
class Swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
class HardSwish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0
class HardSigmoid(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.relu6(x + 3.0) / 6.0
class FReLU(nn.Module):
def __init__(self, channels, kernel_size=3):
super().__init__()
self.conv = nn.Conv2d(channels, channels, kernel_size, padding=kernel_size // 2, groups=channels)
self.bn = nn.BatchNorm2d(channels)
def forward(self, x):
tx = self.bn(self.conv(x))
return torch.max(x, tx)
class GLU(nn.Module):
def __init__(self, dim=-1):
super().__init__()
self.dim = dim
def forward(self, x):
a, b = x.chunk(2, dim=self.dim)
return a * torch.sigmoid(b)
class ReGLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
a, b = x.chunk(2, dim=-1)
return a * F.relu(b)
class GEGLU(nn.Module):
def __init__(self):
super().__init__()
self.gelu = GELU()
def forward(self, x):
a, b = x.chunk(2, dim=-1)
return a * self.gelu(b)
class SiLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class ELU(nn.Module):
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = alpha
def forward(self, x):
return torch.where(x > 0, x, self.alpha * (torch.exp(x) - 1))
class SELU(nn.Module):
def __init__(self):
super().__init__()
self.alpha = 1.6732632423543772848170429916717
self.scale = 1.0507009873554804934193349852946
def forward(self, x):
return self.scale * torch.where(x > 0, x, self.alpha * (torch.exp(x) - 1))
class PReLU(nn.Module):
def __init__(self, num_parameters=1, init=0.25):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_parameters) * init)
def forward(self, x):
return torch.where(x > 0, x, self.weight * x)
class LeakyReLU(nn.Module):
def __init__(self, negative_slope=0.01):
super().__init__()
self.negative_slope = negative_slope
def forward(self, x):
return F.leaky_relu(x, self.negative_slope)
class Softplus(nn.Module):
def __init__(self, beta=1):
super().__init__()
self.beta = beta
def forward(self, x):
return F.softplus(x, self.beta)
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/nn/activations.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/nn/attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Optional
class MultiHeadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.1, bias=True):
super().__init__()
assert embed_dim % num_heads == 0
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scale = self.head_dim ** -0.5
self.qkv = nn.Linear(embed_dim, embed_dim * 3, bias=bias)
self.proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
attn = attn.masked_fill(mask == 0, float('-inf'))
attn = F.softmax(attn, dim=-1)
attn = self.dropout(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.dropout(x)
return x
class CrossAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.1):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scale = self.head_dim ** -0.5
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.out_proj = nn.Linear(embed_dim, embed_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
B, N_q, C = query.shape
N_k = key.shape[1]
q = self.q_proj(query).reshape(B, N_q, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
k = self.k_proj(key).reshape(B, N_k, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
v = self.v_proj(value).reshape(B, N_k, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
attn = attn.masked_fill(mask == 0, float('-inf'))
attn = F.softmax(attn, dim=-1)
attn = self.dropout(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N_q, C)
x = self.out_proj(x)
return x
class FeedForward(nn.Module):
def __init__(self, embed_dim, hidden_dim, dropout=0.1, activation='gelu'):
super().__init__()
self.fc1 = nn.Linear(embed_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, embed_dim)
self.dropout = nn.Dropout(dropout)
if activation == 'gelu':
self.activation = nn.GELU()
elif activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'silu':
self.activation = nn.SiLU()
else:
self.activation = nn.GELU()
def forward(self, x):
x = self.fc1(x)
x = self.activation(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class TransformerBlock(nn.Module):
def __init__(self, embed_dim, num_heads, mlp_ratio=4.0, dropout=0.1, drop_path=0.0):
super().__init__()
self.norm1 = nn.LayerNorm(embed_dim)
self.attn = MultiHeadAttention(embed_dim, num_heads, dropout)
self.norm2 = nn.LayerNorm(embed_dim)
self.mlp = FeedForward(embed_dim, int(embed_dim * mlp_ratio), dropout)
from .modules import DropPath
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x, mask=None):
x = x + self.drop_path(self.attn(self.norm1(x), mask))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class TransformerEncoder(nn.Module):
def __init__(self, embed_dim, num_heads, num_layers, mlp_ratio=4.0, dropout=0.1):
super().__init__()
self.layers = nn.ModuleList([
TransformerBlock(embed_dim, num_heads, mlp_ratio, dropout)
for _ in range(num_layers)
])
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x, mask=None):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class VisionTransformerBlock(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_channels=3, embed_dim=768,
num_heads=12, num_layers=12, num_classes=1000, dropout=0.1):
super().__init__()
self.patch_size = patch_size
self.num_patches = (img_size // patch_size) ** 2
self.patch_embed = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim))
self.dropout = nn.Dropout(dropout)
self.encoder = TransformerEncoder(embed_dim, num_heads, num_layers, dropout=dropout)
self.head = nn.Linear(embed_dim, num_classes)
nn.init.trunc_normal_(self.pos_embed, std=0.02)
nn.init.trunc_normal_(self.cls_token, std=0.02)
def forward(self, x):
B = x.shape[0]
x = self.patch_embed(x).flatten(2).transpose(1, 2)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat([cls_tokens, x], dim=1)
x = x + self.pos_embed
x = self.dropout(x)
x = self.encoder(x)
x = x[:, 0]
x = self.head(x)
return x
class SelfAttention2D(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.query = nn.Conv2d(in_channels, in_channels // 8, 1)
self.key = nn.Conv2d(in_channels, in_channels // 8, 1)
self.value = nn.Conv2d(in_channels, in_channels, 1)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
B, C, H, W = x.size()
query = self.query(x).view(B, -1, H * W).permute(0, 2, 1)
key = self.key(x).view(B, -1, H * W)
value = self.value(x).view(B, -1, H * W)
attention = F.softmax(torch.bmm(query, key), dim=-1)
out = torch.bmm(value, attention.permute(0, 2, 1))
out = out.view(B, C, H, W)
return self.gamma * out + x
class LocalAttention(nn.Module):
def __init__(self, embed_dim, window_size=7, num_heads=8):
super().__init__()
self.embed_dim = embed_dim
self.window_size = window_size
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scale = self.head_dim ** -0.5
self.qkv = nn.Linear(embed_dim, embed_dim * 3)
self.proj = nn.Linear(embed_dim, embed_dim)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = F.softmax(attn, dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
return x
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/nn/attention.py",
"license": "MIT License",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
geekcomputers/Python:ML/src/python/neuralforge/nn/convolution.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional
class ResNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.downsample = downsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, in_channels=3):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512, num_classes)
def _make_layer(self, block, out_channels, blocks, stride=1):
downsample = None
if stride != 1 or self.in_channels != out_channels:
downsample = nn.Sequential(
nn.Conv2d(self.in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
layers = []
layers.append(block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for _ in range(1, blocks):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class EfficientNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, expand_ratio, se_ratio=0.25):
super().__init__()
self.stride = stride
self.use_residual = (stride == 1 and in_channels == out_channels)
hidden_dim = in_channels * expand_ratio
self.use_expansion = expand_ratio != 1
if self.use_expansion:
self.expand_conv = nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, 1, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.SiLU(inplace=True)
)
self.depthwise_conv = nn.Sequential(
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, kernel_size // 2, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.SiLU(inplace=True)
)
se_channels = max(1, int(in_channels * se_ratio))
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(hidden_dim, se_channels, 1),
nn.SiLU(inplace=True),
nn.Conv2d(se_channels, hidden_dim, 1),
nn.Sigmoid()
)
self.project_conv = nn.Sequential(
nn.Conv2d(hidden_dim, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
identity = x
if self.use_expansion:
x = self.expand_conv(x)
x = self.depthwise_conv(x)
se_weight = self.se(x)
x = x * se_weight
x = self.project_conv(x)
if self.use_residual:
x = x + identity
return x
class UNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, down=True):
super().__init__()
self.down = down
if down:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
self.pool = nn.MaxPool2d(2)
else:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, 2, stride=2)
def forward(self, x, skip=None):
if self.down:
x = self.conv(x)
pool = self.pool(x)
return x, pool
else:
x = self.up(x)
if skip is not None:
x = torch.cat([x, skip], dim=1)
x = self.conv(x)
return x
class ConvNeXtBlock(nn.Module):
def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-6):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
self.norm = nn.LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim)) if layer_scale_init_value > 0 else None
from .modules import DropPath
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
identity = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2)
x = identity + self.drop_path(x)
return x
class DilatedConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dilation_rates=[1, 2, 4, 8]):
super().__init__()
self.convs = nn.ModuleList([
nn.Sequential(
nn.Conv2d(in_channels, out_channels // len(dilation_rates), 3, padding=d, dilation=d),
nn.BatchNorm2d(out_channels // len(dilation_rates)),
nn.ReLU(inplace=True)
)
for d in dilation_rates
])
def forward(self, x):
return torch.cat([conv(x) for conv in self.convs], dim=1)
class PyramidPoolingModule(nn.Module):
def __init__(self, in_channels, out_channels, pool_sizes=[1, 2, 3, 6]):
super().__init__()
self.stages = nn.ModuleList([
nn.Sequential(
nn.AdaptiveAvgPool2d(size),
nn.Conv2d(in_channels, out_channels // len(pool_sizes), 1),
nn.BatchNorm2d(out_channels // len(pool_sizes)),
nn.ReLU(inplace=True)
)
for size in pool_sizes
])
def forward(self, x):
h, w = x.size(2), x.size(3)
features = [x]
for stage in self.stages:
pooled = stage(x)
features.append(F.interpolate(pooled, size=(h, w), mode='bilinear', align_corners=False))
return torch.cat(features, dim=1)
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/nn/convolution.py",
"license": "MIT License",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
geekcomputers/Python:ML/src/python/neuralforge/nn/layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1,
use_bn=True, activation='relu', drop_rate=0.0):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=not use_bn)
self.bn = nn.BatchNorm2d(out_channels) if use_bn else nn.Identity()
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'gelu':
self.activation = nn.GELU()
elif activation == 'silu':
self.activation = nn.SiLU(inplace=True)
elif activation == 'mish':
self.activation = nn.Mish(inplace=True)
else:
self.activation = nn.Identity()
self.dropout = nn.Dropout2d(drop_rate) if drop_rate > 0 else nn.Identity()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activation(x)
x = self.dropout(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels, kernel_size=3, drop_rate=0.0):
super().__init__()
self.conv1 = ConvBlock(channels, channels, kernel_size, padding=kernel_size // 2, drop_rate=drop_rate)
self.conv2 = ConvBlock(channels, channels, kernel_size, padding=kernel_size // 2, activation='none')
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.conv2(x)
x = x + residual
x = self.activation(x)
return x
class BottleneckBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, expansion=4):
super().__init__()
mid_channels = out_channels // expansion
self.conv1 = ConvBlock(in_channels, mid_channels, kernel_size=1, padding=0)
self.conv2 = ConvBlock(mid_channels, mid_channels, kernel_size=3, stride=stride, padding=1)
self.conv3 = ConvBlock(mid_channels, out_channels, kernel_size=1, padding=0, activation='none')
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
residual = self.shortcut(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x + residual
x = self.activation(x)
return x
class InvertedResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, expand_ratio=6):
super().__init__()
hidden_dim = in_channels * expand_ratio
self.use_residual = stride == 1 and in_channels == out_channels
layers = []
if expand_ratio != 1:
layers.append(ConvBlock(in_channels, hidden_dim, kernel_size=1, padding=0))
layers.extend([
ConvBlock(hidden_dim, hidden_dim, kernel_size=3, stride=stride, padding=1, activation='relu'),
nn.Conv2d(hidden_dim, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_residual:
return x + self.conv(x)
return self.conv(x)
class DenseLayer(nn.Module):
def __init__(self, in_channels, growth_rate, drop_rate=0.0):
super().__init__()
self.bn1 = nn.BatchNorm2d(in_channels)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_channels, growth_rate * 4, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(growth_rate * 4)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(growth_rate * 4, growth_rate, kernel_size=3, padding=1, bias=False)
self.dropout = nn.Dropout2d(drop_rate) if drop_rate > 0 else nn.Identity()
def forward(self, x):
out = self.conv1(self.relu1(self.bn1(x)))
out = self.conv2(self.relu2(self.bn2(out)))
out = self.dropout(out)
return torch.cat([x, out], 1)
class DenseBlock(nn.Module):
def __init__(self, num_layers, in_channels, growth_rate, drop_rate=0.0):
super().__init__()
layers = []
for i in range(num_layers):
layers.append(DenseLayer(in_channels + i * growth_rate, growth_rate, drop_rate))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class TransitionLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.bn = nn.BatchNorm2d(in_channels)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.conv(self.relu(self.bn(x)))
x = self.pool(x)
return x
class SEBlock(nn.Module):
def __init__(self, channels, reduction=16):
super().__init__()
self.squeeze = nn.AdaptiveAvgPool2d(1)
self.excitation = nn.Sequential(
nn.Linear(channels, channels // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channels // reduction, channels, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
se = self.squeeze(x).view(b, c)
se = self.excitation(se).view(b, c, 1, 1)
return x * se.expand_as(x)
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super().__init__()
self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, groups=in_channels, bias=False)
self.pointwise = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.depthwise(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pointwise(x)
x = self.bn2(x)
x = self.relu(x)
return x
| {
"repo_id": "geekcomputers/Python",
"file_path": "ML/src/python/neuralforge/nn/layers.py",
"license": "MIT License",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.