id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
156,568 | from django.http import StreamingHttpResponse, JsonResponse
from django.conf import settings
from wsgiref.util import FileWrapper
from rest_framework.exceptions import APIException
import mimetypes, os
def myip(request):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
print(s.getsockname()[0])
ip = s.getsockname()[0]
s.close()
return JsonResponse({"ip": ip}) | null |
156,569 | from django.http import JsonResponse
from utils.fbmsg import FBMsg
from django.contrib import auth
from django.contrib.auth.models import User
import json
from userprofile.models import Users
from staff.models import ListModel as staff
class FBMsg(object):
def ret():
def err_contact_name():
def err_contact_mobile():
def err_contact_comments():
def err_order_same():
def err_order_no():
def err_order_fail():
def err_ret():
def err_data():
def err_tc():
def err_tc_empty():
def err_delete():
def err_code1():
def err_status():
def err_user_name():
def err_auth():
def err_user_same():
def error_referer():
def err_password1_empty():
def err_password2_empty():
def err_password_not_same():
def err_psw():
def err_dev():
def err_register_more():
def err_openid():
def err_more_user():
def err_req_day():
def err_req_shipping_list():
def err_req_stock_list():
def err_req_baseinfo_list():
def err_goods_code():
def err_authid():
def ret_auth():
def err_bad():
def err_auth_open():
def err_goods_code():
def err_po_num_empty():
def err_po_num():
def err_po_qty_type():
def err_po_qty():
def err_same_po_num():
def err_lot_num():
def err_lot_num_empty():
def err_lot_num_format():
def err_po_supplier():
def err_po_supplier_empty():
def err_po_goods_code():
def err_po_status_empty():
def err_po_status_less():
def err_po_status_same():
def err_po_status_more():
def err_po_status_big():
def err_po_status_delete():
def err_po_status_patch():
def err_po_actual_delivery_stock_patch():
def err_po_actual_delivery_stock_more():
def err_po_actual_delivery_stock_zero():
def err_po_actual_delivery_stock_moreall():
def err_po_actual_delivery_stock_again():
def err_sort_stock_bin_name():
def err_sort_stock_bin_name_error():
def err_sort_stock_qty():
def err_sort_stock_qty_empty():
def err_sort_stock_qty_zero():
def err_sort_stock_qty_more():
def err_sort_stock_bin_type():
def wms_ret():
def wms_same():
def wms_err():
def wms_errfile():
def wms_time():
def wms_vip_get():
def wms_vip():
def wms_dev():
def wms_user_owner():
def wms_warehouse_more():
def wms_company_more():
def wms_binproperty():
def wms_binsize():
def wms_no_user():
def wms_po_status_1():
def wms_po_empty():
def wms_po_status_predelivery():
def wms_po_status_predelivery_detail():
def wms_po_status_preload_detail():
def wms_po_qty_up_more():
def wms_po_qty_dup_more():
def wms_po_qty_all_up_more():
def wms_so_picked_more():
def wms_dongtai():
def wms_capcha():
def wms_capcha_l():
def wms_capcha_n():
class Users(models.Model):
def login(request, *args, **kwargs):
post_data = json.loads(request.body.decode())
data = {
"name": post_data.get('name'),
"password": post_data.get('password'),
}
ip = request.META.get('HTTP_X_FORWARDED_FOR') if request.META.get(
'HTTP_X_FORWARDED_FOR') else request.META.get('REMOTE_ADDR')
if User.objects.filter(username=str(data['name'])).exists():
user = auth.authenticate(username=str(data['name']), password=str(data['password']))
if user is None:
err_ret = FBMsg.err_ret()
err_ret['data'] = data
return JsonResponse(err_ret)
else:
auth.login(request, user)
user_detail = Users.objects.filter(user_id=user.id).first()
staff_id = staff.objects.filter(openid=user_detail.openid, staff_name=str(user_detail.name)).first().id
data = {
"name": data['name'],
'openid': user_detail.openid,
"user_id": staff_id
}
ret = FBMsg.ret()
ret['ip'] = ip
ret['data'] = data
return JsonResponse(ret)
else:
err_ret = FBMsg.err_ret()
err_ret['ip'] = ip
err_ret['data'] = data
return JsonResponse(err_ret) | null |
156,570 | from django.http import JsonResponse
from userprofile.models import Users
from utils.fbmsg import FBMsg
from utils.md5 import Md5
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.contrib import auth
from django.utils import timezone
from django.contrib.auth.models import User
from staff.models import ListModel as staff
import json, random, os
from django.conf import settings
from scanner.models import ListModel as scanner
def randomPhone():
randomcity = ["shanghai", "nanjing", "hangzhou", "beijing", "chongqing", "shenzhen", "guangzhou", "suzhou", "hefei",
"chengdu", "kunming", "wuhan"]
randomcolor = ["Red", "Orange", "Yellow", "Green", "Blue", "Indigo", "Purple"]
randomclass = ["Electronics", "Computers", "Smart Home", "Arts & Crafts", "Automotive", "Baby", "Health", "Kitchen",
"Industrial", "Luggage", "Movies", "Software"]
randomunit = ["Box", "Package", "Piece", "Pallet"]
randomname = ["Aaron", "Abbott", "Abel", "Baird", "Baldwin", "Bancroft", "Caesar", "Calvin", "Camille", "chengdu",
"Daisy", "Dale", "Dana", "Earl", "Eartha", "Ed", "Fabian", "Faithe", "Fanny", "Gabriel", "Gabrielle",
"Gail", "Hale", "Haley", "Hamiltion", "Ian", "Ida", "Ina", "Jack", "Jacob", "Jacqueline", "Kama",
"Karen", "Katherine", "Lambert", "Lance", "Larry", "Mabel", "Madeline", "Madge", "Nancy", "Naomi",
"Nat", "Octavia", "Odelette", "Odelia", "Paddy", "Pag", "Page", "Queena", "Quennel", "Quentin",
"Rachel", "Rae", "Ralap", "Sabina", "Sabrina", "Sally", "Tab", "Tabitha", "Tammy", "Ula", "Ulysses",
"Una", "Valentina", "Valentine", "Valentine", "Wade", "Walker", "Wallis", "Xanthe", "Xavier", "Xaviera",
"Yale", "Yedda", "Yehudi", "Zachary", "Zebulon", "Zenobia"
]
randomshape = ["Square", "Rectangle", "Cone", "Cylinder", "Irregular"]
randomspecs = ["1 x 10", "3 x 3", "5 x 5", "6 x 6"]
def randomStaffType():
randombinsize = ["Big", "Floor", "Tiny", "Small"]
class Users(models.Model):
class FBMsg(object):
def ret():
def err_contact_name():
def err_contact_mobile():
def err_contact_comments():
def err_order_same():
def err_order_no():
def err_order_fail():
def err_ret():
def err_data():
def err_tc():
def err_tc_empty():
def err_delete():
def err_code1():
def err_status():
def err_user_name():
def err_auth():
def err_user_same():
def error_referer():
def err_password1_empty():
def err_password2_empty():
def err_password_not_same():
def err_psw():
def err_dev():
def err_register_more():
def err_openid():
def err_more_user():
def err_req_day():
def err_req_shipping_list():
def err_req_stock_list():
def err_req_baseinfo_list():
def err_goods_code():
def err_authid():
def ret_auth():
def err_bad():
def err_auth_open():
def err_goods_code():
def err_po_num_empty():
def err_po_num():
def err_po_qty_type():
def err_po_qty():
def err_same_po_num():
def err_lot_num():
def err_lot_num_empty():
def err_lot_num_format():
def err_po_supplier():
def err_po_supplier_empty():
def err_po_goods_code():
def err_po_status_empty():
def err_po_status_less():
def err_po_status_same():
def err_po_status_more():
def err_po_status_big():
def err_po_status_delete():
def err_po_status_patch():
def err_po_actual_delivery_stock_patch():
def err_po_actual_delivery_stock_more():
def err_po_actual_delivery_stock_zero():
def err_po_actual_delivery_stock_moreall():
def err_po_actual_delivery_stock_again():
def err_sort_stock_bin_name():
def err_sort_stock_bin_name_error():
def err_sort_stock_qty():
def err_sort_stock_qty_empty():
def err_sort_stock_qty_zero():
def err_sort_stock_qty_more():
def err_sort_stock_bin_type():
def wms_ret():
def wms_same():
def wms_err():
def wms_errfile():
def wms_time():
def wms_vip_get():
def wms_vip():
def wms_dev():
def wms_user_owner():
def wms_warehouse_more():
def wms_company_more():
def wms_binproperty():
def wms_binsize():
def wms_no_user():
def wms_po_status_1():
def wms_po_empty():
def wms_po_status_predelivery():
def wms_po_status_predelivery_detail():
def wms_po_status_preload_detail():
def wms_po_qty_up_more():
def wms_po_qty_dup_more():
def wms_po_qty_all_up_more():
def wms_so_picked_more():
def wms_dongtai():
def wms_capcha():
def wms_capcha_l():
def wms_capcha_n():
class Md5(object):
def md5(s):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class ListModel(models.Model):
class TransportationFeeListModel(models.Model):
def register(request, *args, **kwargs):
post_data = json.loads(request.body.decode())
data = {
"name": post_data.get('name'),
"password1": post_data.get('password1'),
"password2": post_data.get('password2')
}
ip = request.META.get('HTTP_X_FORWARDED_FOR') if request.META.get(
'HTTP_X_FORWARDED_FOR') else request.META.get('REMOTE_ADDR')
if Users.objects.filter(name=str(data['name']), developer=1, is_delete=0).exists():
err_user_same = FBMsg.err_user_same()
err_user_same['ip'] = ip
err_user_same['data'] = data['name']
return JsonResponse(err_user_same)
else:
if data.get('password1') is None:
err_password1_empty = FBMsg.err_password1_empty()
err_password1_empty['ip'] = ip
err_password1_empty['data'] = data['name']
return JsonResponse(err_password1_empty)
else:
if str(data['password1']) == '':
err_password1_empty = FBMsg.err_password1_empty()
err_password1_empty['ip'] = ip
err_password1_empty['data'] = data['name']
return JsonResponse(err_password1_empty)
else:
if data.get('password2') is None:
err_password2_empty = FBMsg.err_password2_empty()
err_password2_empty['ip'] = ip
err_password2_empty['data'] = data['name']
return JsonResponse(err_password2_empty)
else:
if str(data['password2']) == '':
err_password2_empty = FBMsg.err_password2_empty()
err_password2_empty['ip'] = ip
err_password2_empty['data'] = data['name']
return JsonResponse(err_password2_empty)
else:
if str(data['password1']) != str(data['password2']):
err_password_not_same = FBMsg.err_password_not_same()
err_password_not_same['ip'] = ip
err_password_not_same['data'] = data['name']
return JsonResponse(err_password_not_same)
else:
transaction_code = Md5.md5(data['name'])
user = User.objects.create_user(username=str(data['name']),
password=str(data['password1']))
Users.objects.create(user_id=user.id, name=str(data['name']),
openid=transaction_code, appid=Md5.md5(data['name'] + '1'),
t_code=Md5.md5(str(timezone.now())),
developer=1, ip=ip)
auth.login(request, user)
check_code = random.randint(1000, 9999)
staff.objects.create(staff_name=str(data['name']),
staff_type='Admin',
check_code=check_code,
openid=transaction_code)
user_id = staff.objects.filter(openid=transaction_code, staff_name=str(data['name']),
staff_type='Admin', check_code=check_code).first().id
folder = os.path.exists(os.path.join(settings.BASE_DIR, 'media/' + transaction_code))
if not folder:
os.makedirs(os.path.join(settings.BASE_DIR, 'media/' + transaction_code))
os.makedirs(os.path.join(settings.BASE_DIR, 'media/' + transaction_code + "/win32"))
os.makedirs(os.path.join(settings.BASE_DIR, 'media/' + transaction_code + "/linux"))
os.makedirs(os.path.join(settings.BASE_DIR, 'media/' + transaction_code + "/darwin"))
ret = FBMsg.ret()
ret['ip'] = ip
data['openid'] = transaction_code
data['name'] = str(data['name'])
data['user_id'] = user_id
data.pop('password1', '')
data.pop('password2', '')
ret['data'] = data
from company.models import ListModel as company
company.objects.create(openid=transaction_code,
company_name='GreaterWMS',
company_city=str(random.choice(randomcity)),
company_address='People’s Square # 666 Room 1F',
company_contact=str(randomPhone()),
company_manager='Elvis.Shi',
creater='DemoData'
)
from warehouse.models import ListModel as warehouse
warehouse.objects.create(openid=transaction_code,
warehouse_name='Center Warehouse',
warehouse_city=str(random.choice(randomcity)),
warehouse_address='People’s Square # 666 Room 2F',
warehouse_contact=str(randomPhone()),
warehouse_manager='Tim.Yao',
creater='DemoData'
)
from supplier.models import ListModel as supplier
supplier_data_list = []
for supplier_data in range(1, 42):
demo_data = supplier(openid=transaction_code,
supplier_name='Supplier Name-' + str(supplier_data),
supplier_city=str(random.choice(randomcity)),
supplier_address='Address-' + str(supplier_data),
supplier_contact=str(randomPhone()),
supplier_manager=str(random.choice(randomname)),
creater='DemoData'
)
supplier_data_list.append(demo_data)
supplier.objects.bulk_create(supplier_data_list, batch_size=100)
from customer.models import ListModel as customer
customer_data_list = []
for customer_data in range(1, 42):
demo_data = customer(openid=transaction_code,
customer_name='Customer Name-' + str(customer_data),
customer_city=str(random.choice(randomcity)),
customer_address='Address-' + str(customer_data),
customer_contact=str(randomPhone()),
customer_manager=str(random.choice(randomname)),
creater='DemoData'
)
customer_data_list.append(demo_data)
customer.objects.bulk_create(customer_data_list, batch_size=100)
staff_data_list = []
for staff_data in randomname:
demo_data = staff(openid=transaction_code,
staff_name=staff_data,
staff_type=str(randomStaffType()),
check_code=random.randint(1000, 9999)
)
staff_data_list.append(demo_data)
staff.objects.bulk_create(staff_data_list, batch_size=100)
from driver.models import ListModel as driver
driver_data_list = []
for driver_data in range(1, 42):
demo_data = driver(openid=transaction_code,
driver_name='Driver Name-' + str(driver_data),
license_plate="".join(random.choice("0123456789") for i in range(8)),
contact=str(randomPhone()),
creater='DemoData'
)
driver_data_list.append(demo_data)
driver.objects.bulk_create(driver_data_list, batch_size=100)
from capital.models import ListModel as capital
capital_data_list = []
for capital_data in range(1, 42):
demo_data = capital(openid=transaction_code,
capital_name='Capital Name-' + str(capital_data),
capital_qty=random.randint(1, 100),
capital_cost=random.randint(100, 10000),
creater='DemoData'
)
capital_data_list.append(demo_data)
capital.objects.bulk_create(capital_data_list, batch_size=100)
from binsize.models import ListModel as binsize
binsize_data_list = [
binsize(openid=transaction_code,
bin_size='Big',
bin_size_w=1100,
bin_size_d=1200,
bin_size_h=1800,
creater='DemoData'
),
binsize(openid=transaction_code,
bin_size='Floor',
bin_size_w=10000,
bin_size_d=10000,
bin_size_h=10000,
creater='DemoData'
),
binsize(openid=transaction_code,
bin_size='Small',
bin_size_w=800,
bin_size_d=1000,
bin_size_h=1200,
creater='DemoData'
),
binsize(openid=transaction_code,
bin_size='Tiny',
bin_size_w=200,
bin_size_d=250,
bin_size_h=300,
creater='DemoData'
)
]
binsize.objects.bulk_create(binsize_data_list, batch_size=100)
from binset.models import ListModel as binset
bar_code1 = Md5.md5('1')
bar_code2 = Md5.md5('2')
bar_code3 = Md5.md5('3')
bar_code4 = Md5.md5('4')
bar_code5 = Md5.md5('5')
bar_code6 = Md5.md5('6')
bar_code7 = Md5.md5('7')
bar_code8 = Md5.md5('8')
bar_code9 = Md5.md5('9')
bar_code10 = Md5.md5('10')
bar_code11 = Md5.md5('11')
bar_code12 = Md5.md5('12')
binset_data_list = [
binset(openid=transaction_code,
bin_name='A010101',
bin_size=str(random.choice(randombinsize)),
bin_property="Normal",
empty_label=True,
creater='DemoData',
bar_code=bar_code1
),
binset(openid=transaction_code,
bin_name='A010102',
bin_size=str(random.choice(randombinsize)),
bin_property="Normal",
empty_label=True,
creater='DemoData',
bar_code=bar_code2
),
binset(openid=transaction_code,
bin_name='A010103',
bin_size=str(random.choice(randombinsize)),
bin_property="Normal",
empty_label=True,
creater='DemoData',
bar_code=bar_code3
),
binset(openid=transaction_code,
bin_name='B010101',
bin_size=str(random.choice(randombinsize)),
bin_property="Inspection",
empty_label=True,
creater='DemoData',
bar_code=bar_code4
),
binset(openid=transaction_code,
bin_name='B010102',
bin_size=str(random.choice(randombinsize)),
bin_property="Inspection",
empty_label=True,
creater='DemoData',
bar_code=bar_code5
),
binset(openid=transaction_code,
bin_name='B010103',
bin_size=str(random.choice(randombinsize)),
bin_property="Inspection",
empty_label=True,
creater='DemoData',
bar_code=bar_code6
),
binset(openid=transaction_code,
bin_name='B020101',
bin_size=str(random.choice(randombinsize)),
bin_property="Holding",
empty_label=True,
creater='DemoData',
bar_code=bar_code7
),
binset(openid=transaction_code,
bin_name='B020102',
bin_size=str(random.choice(randombinsize)),
bin_property="Holding",
empty_label=True,
creater='DemoData',
bar_code=bar_code8
),
binset(openid=transaction_code,
bin_name='B020103',
bin_size=str(random.choice(randombinsize)),
bin_property="Holding",
empty_label=True,
creater='DemoData',
bar_code=bar_code9
),
binset(openid=transaction_code,
bin_name='B030101',
bin_size=str(random.choice(randombinsize)),
bin_property="Damage",
empty_label=True,
creater='DemoData',
bar_code=bar_code10
),
binset(openid=transaction_code,
bin_name='B030102',
bin_size=str(random.choice(randombinsize)),
bin_property="Damage",
empty_label=True,
creater='DemoData',
bar_code=bar_code11
),
binset(openid=transaction_code,
bin_name='B030103',
bin_size=str(random.choice(randombinsize)),
bin_property="Damage",
empty_label=True,
creater='DemoData',
bar_code=bar_code12
),
]
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='A010101',
bar_code=bar_code1)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='A010102',
bar_code=bar_code2)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='A010103',
bar_code=bar_code3)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='B010101',
bar_code=bar_code4)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='B010102',
bar_code=bar_code5)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='B010103',
bar_code=bar_code6)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='B020101',
bar_code=bar_code7)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='B020102',
bar_code=bar_code8)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='B020103',
bar_code=bar_code9)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='B030101',
bar_code=bar_code10)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='B030102',
bar_code=bar_code11)
scanner.objects.create(openid=transaction_code, mode="BINSET",
code='B030103',
bar_code=bar_code12)
binset.objects.bulk_create(binset_data_list, batch_size=100)
from goodsunit.models import ListModel as goodsunit
demo_data = []
for goods_unit in randomunit:
demo_data.append(goodsunit(openid=transaction_code, goods_unit=goods_unit,
creater='DemoData'))
goodsunit.objects.bulk_create(demo_data, batch_size=100)
from goodsclass.models import ListModel as goodsclass
demo_data = []
for goods_class in randomclass:
demo_data.append(goodsclass(openid=transaction_code, goods_class=goods_class,
creater='DemoData'))
goodsclass.objects.bulk_create(demo_data, batch_size=100)
from goodscolor.models import ListModel as goodscolor
demo_data = []
for goods_color in randomcolor:
demo_data.append(goodscolor(openid=transaction_code, goods_color=goods_color,
creater='DemoData'))
goodscolor.objects.bulk_create(demo_data, batch_size=100)
from goodsbrand.models import ListModel as goodsbrand
goodsbrand_data_list = []
for goodsbrand_data in range(1, 42):
demo_data = goodsbrand(openid=transaction_code,
goods_brand='Brand Name-' + str(goodsbrand_data),
creater='DemoData'
)
goodsbrand_data_list.append(demo_data)
goodsbrand.objects.bulk_create(goodsbrand_data_list, batch_size=100)
from goodsshape.models import ListModel as goodsshape
demo_data = []
for goods_shape in randomshape:
demo_data.append(goodsshape(openid=transaction_code, goods_shape=goods_shape,
creater='DemoData'))
goodsshape.objects.bulk_create(demo_data, batch_size=100)
from goodsspecs.models import ListModel as goodsspecs
demo_data = []
for goods_specs in randomspecs:
demo_data.append(goodsspecs(openid=transaction_code, goods_specs=goods_specs,
creater='DemoData'))
goodsspecs.objects.bulk_create(demo_data, batch_size=100)
from goodsorigin.models import ListModel as goodsorigin
goodsorigin_data_list = []
for city in randomcity:
demo_data = goodsorigin(openid=transaction_code,
goods_origin=city,
creater='DemoData'
)
goodsorigin_data_list.append(demo_data)
goodsorigin.objects.bulk_create(goodsorigin_data_list, batch_size=100)
from goods.models import ListModel as goods
goods_data_list = []
for goods_data in range(1, 42):
bar_code = Md5.md5("A0000" + str(goods_data))
goods_w = round(random.uniform(10, 1000), 2),
goods_d = round(random.uniform(10, 1000), 2),
goods_h = round(random.uniform(10, 1000), 2),
goods_cost = round(random.uniform(10, 1000), 2),
goods_price = round(random.uniform(10, 1000), 2),
while True:
if goods_cost[0] >= goods_price[0]:
goods_price = round(random.uniform(10, 1000), 2),
else:
break
demo_data = goods(openid=transaction_code,
goods_code="A0000" + str(goods_data),
goods_desc="Goods Desc-" + str(goods_data),
goods_supplier='Supplier Name-' + str(random.randint(1, 42)),
goods_weight=random.randint(100, 10000),
goods_w=goods_w[0],
goods_d=goods_d[0],
goods_h=goods_h[0],
unit_volume=round((int(goods_w[0]) * int(goods_d[0]) * int(
goods_h[0])) / 1000000000, 4),
goods_unit=random.choice(randomunit),
goods_class=random.choice(randomclass),
goods_brand='Brand Name-' + str(random.randint(1, 42)),
goods_color=random.choice(randomcolor),
goods_shape=random.choice(randomshape),
goods_specs=random.choice(randomspecs),
goods_origin=random.choice(randomcity),
goods_cost=goods_cost[0],
goods_price=goods_price[0],
bar_code=bar_code,
creater='DemoData'
)
goods_data_list.append(demo_data)
scanner.objects.create(openid=transaction_code, mode="GOODS",
code="A0000" + str(goods_data),
bar_code=bar_code)
goods.objects.bulk_create(goods_data_list, batch_size=100)
from payment.models import TransportationFeeListModel as freight
freight_data_list = []
for sender in randomcity:
for receiver in randomcity:
demo_data = freight(openid=transaction_code,
send_city=sender,
receiver_city=receiver,
weight_fee=random.randint(10, 20),
volume_fee=random.randint(100, 200),
min_payment=random.randint(250, 300),
transportation_supplier="Supplier",
creater="DemoData"
)
freight_data_list.append(demo_data)
freight.objects.bulk_create(freight_data_list, batch_size=100)
return JsonResponse(ret) | null |
156,571 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
'bin_name',
'bin_size',
'bin_property',
'empty_label',
'creater',
'create_time',
'update_time'
] | null |
156,572 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('bin_name', u'库位名称'),
('bin_size', u'库位尺寸'),
('bin_property', u'库位属性'),
('empty_label', u'空库位标识'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,573 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('bin_name', u'Bin Name'),
('bin_size', u'Bin Size'),
('bin_property', u'Bin Property'),
('empty_label', u'Empty Label'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,574 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
'supplier_name',
'supplier_city',
'supplier_address',
'supplier_contact',
'supplier_manager',
'supplier_level',
'creater',
'create_time',
'update_time'
] | null |
156,575 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('supplier_name', u'供应商名称'),
('supplier_city', u'供应商城市'),
('supplier_address', u'详细地址'),
('supplier_contact', u'联系电话'),
('supplier_manager', u'负责人'),
('supplier_level', u'供应商等级'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间'),
]) | null |
156,576 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('supplier_name', u'Supplier Name'),
('supplier_city', u'Supplier City'),
('supplier_address', u'Supplier Address'),
('supplier_contact', u'Supplier Contact'),
('supplier_manager', u'Supplier Manager'),
('supplier_level', u'Supplier Level'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time'),
]) | null |
156,577 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
'capital_name',
'capital_qty',
'capital_cost',
'creater',
'create_time',
'update_time'
] | null |
156,578 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('capital_name', u'资产名称'),
('capital_qty', u'资产数量'),
('capital_cost', u'资产成本'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,579 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('capital_name', u'Capital Name'),
('capital_qty', u'Capital Qty'),
('capital_cost', u'Capital Cost'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,580 | from django.apps import AppConfig
from django.db.models.signals import post_migrate
def init_category():
"""
:return:None
"""
try:
from .models import TypeListModel as ls
if ls.objects.filter(openid__iexact='init_data').exists():
if ls.objects.filter(openid__iexact='init_data').count() != 7:
ls.objects.filter(openid__iexact='init_data').delete()
init_data = [
ls(id=1, openid='init_data', staff_type='Manager', creater='GreaterWMS'),
ls(id=2, openid='init_data', staff_type='Supplier', creater='GreaterWMS'),
ls(id=3, openid='init_data', staff_type='Customer', creater='GreaterWMS'),
ls(id=4, openid='init_data', staff_type='Supervisor', creater='GreaterWMS'),
ls(id=5, openid='init_data', staff_type='Inbound', creater='GreaterWMS'),
ls(id=6, openid='init_data', staff_type='Outbound', creater='GreaterWMS'),
ls(id=7, openid='init_data', staff_type='StockControl', creater='GreaterWMS')
]
ls.objects.bulk_create(init_data, batch_size=100)
else:
init_data = [
ls(id=1, openid='init_data', staff_type='Manager', creater='GreaterWMS'),
ls(id=2, openid='init_data', staff_type='Supplier', creater='GreaterWMS'),
ls(id=3, openid='init_data', staff_type='Customer', creater='GreaterWMS'),
ls(id=4, openid='init_data', staff_type='Supervisor', creater='GreaterWMS'),
ls(id=5, openid='init_data', staff_type='Inbound', creater='GreaterWMS'),
ls(id=6, openid='init_data', staff_type='Outbound', creater='GreaterWMS'),
ls(id=7, openid='init_data', staff_type='StockControl', creater='GreaterWMS')
]
ls.objects.bulk_create(init_data, batch_size=100)
except:
pass
def do_init_data(sender, **kwargs):
init_category() | null |
156,581 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
'staff_name',
'staff_type',
'create_time',
'update_time'
] | null |
156,582 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('staff_name', u'员工用户名'),
('staff_type', u'员工类型'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,583 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('staff_name', u'Staff Name'),
('staff_type', u'Staff Type'),
('create_time', u'Create Time'),
('update_time', u'Update Time'),
]) | null |
156,584 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
'customer_name',
'customer_city',
'customer_address',
'customer_contact',
'customer_manager',
'customer_level',
'creater',
'create_time',
'update_time'
] | null |
156,585 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('customer_name', u'客户名称'),
('customer_city', u'客户城市'),
('customer_address', u'详细地址'),
('customer_contact', u'联系电话'),
('customer_manager', u'负责人'),
('customer_level', u'客户等级'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间'),
]) | null |
156,586 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('customer_name', u'Customer Name'),
('customer_city', u'Customer City'),
('customer_address', u'Customer Address'),
('customer_contact', u'Customer Contact'),
('customer_manager', u'Customer Manager'),
('customer_level', u'Customer Level'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time'),
]) | null |
156,587 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
'driver_name',
'license_plate',
'contact',
'creater',
'create_time',
'update_time'
] | null |
156,588 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('driver_name', u'司机姓名'),
('license_plate', u'车牌号'),
('contact', u'联系方式'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,589 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('driver_name', u'Driver Name'),
('license_plate', u'License Plate'),
('contact', u'Contact'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,590 | from rest_framework_csv.renderers import CSVStreamingRenderer
def list_file_headers():
return [
'dn_code',
'dn_status',
'total_weight',
'total_volume',
'customer',
'creater',
'back_order_label',
'create_time',
'update_time'
] | null |
156,591 | from rest_framework_csv.renderers import CSVStreamingRenderer
def list_cn_data_header():
return dict([
('dn_code', u'发货单单号'),
('dn_status', u'发货单状态'),
('total_weight', u'总重量'),
('total_volume', u'总体积'),
('customer', u'客户'),
('creater', u'创建人'),
('back_order_label', u'欠货订单标识'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,592 | from rest_framework_csv.renderers import CSVStreamingRenderer
def list_en_data_header():
return dict([
('dn_code', u'DN Code'),
('dn_status', u'DN Status'),
('total_weight', u'Total Weight'),
('total_volume', u'Total Volume'),
('customer', u'Customer'),
('creater', u'Creater'),
('back_order_label', u'Back Order Label'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,593 | from rest_framework_csv.renderers import CSVStreamingRenderer
def detail_file_headers():
return [
'dn_code',
'dn_status',
'goods_code',
'goods_desc',
'goods_qty',
'pick_qty',
'picked_qty',
'intransit_qty',
'delivery_actual_qty',
'delivery_shortage_qty',
'delivery_more_qty',
'delivery_damage_qty',
'goods_weight',
'goods_volume',
'customer',
'creater',
'back_order_label',
'create_time',
'update_time'
] | null |
156,594 | from rest_framework_csv.renderers import CSVStreamingRenderer
def detail_cn_data_header():
return dict([
('dn_code', u'发货单单号'),
('dn_status', u'发货单状态'),
('goods_code', u'发货单货物名称'),
('goods_desc', u'发货单货物描述'),
('goods_qty', u'发货单数量'),
('pick_qty', u'需要拣货数量'),
('picked_qty', u'已拣货数量'),
('intransit_qty', u'在途库存'),
('delivery_actual_qty', u'实际到货'),
('delivery_shortage_qty', u'到货短少'),
('delivery_more_qty', u'多到货'),
('delivery_damage_qty', u'到货破损'),
('goods_weight', u'商品重量'),
('goods_volume', u'商品体积'),
('customer', u'客户'),
('creater', u'创建人'),
('back_order_label', u'欠货订单标识'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,595 | from rest_framework_csv.renderers import CSVStreamingRenderer
def detail_en_data_header():
return dict([
('dn_code', u'DN Code'),
('dn_status', u'DN Status'),
('goods_code', u'Goods Code'),
('goods_desc', u'Goods Description'),
('goods_qty', u'Goods Qty'),
('pick_qty', u'Pick Qty'),
('picked_qty', u'Picked Qty'),
('intransit_qty', u'Intransit Qty'),
('delivery_actual_qty', u'Delivery Actual Qty'),
('delivery_shortage_qty', u'Delivery Shortage Qty'),
('delivery_more_qty', u'Delivery More Qty'),
('delivery_damage_qty', u'Delivery Damage Qty'),
('goods_weight', u'Goods Weight'),
('goods_volume', u'Goods Volume'),
('customer', u'Customer'),
('creater', u'Creater'),
('back_order_label', u'Back Order Label'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,596 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
'send_city',
'receiver_city',
'weight_fee',
'volume_fee',
'min_payment',
'transportation_supplier',
'creater',
'create_time',
'update_time'
] | null |
156,597 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('send_city', u'始发城市'),
('receiver_city', u'到货城市'),
('weight_fee', u'单公斤运费'),
('volume_fee', u'每立方米运费'),
('min_payment', u'最小运费'),
('transportation_supplier', u'承运商'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,598 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('send_city', u'Send City'),
('receiver_city', u'Receiver City'),
('weight_fee', u'Weight Fee'),
('volume_fee', u'Volume Fee'),
('min_payment', u'Min Payment'),
('transportation_supplier', u'Transportation Supplier'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,599 | from rest_framework_csv.renderers import CSVStreamingRenderer
def list_file_headers():
return [
'asn_code',
'asn_status',
'total_weight',
'total_volume',
'total_cost',
'supplier',
'creater',
'create_time',
'update_time'
] | null |
156,600 | from rest_framework_csv.renderers import CSVStreamingRenderer
def list_cn_data_header():
return dict([
('asn_code', u'ASN单号'),
('asn_status', u'ASN状态'),
('total_weight', u'总重量'),
('total_volume', u'总体积'),
('total_cost', u'总成本'),
('supplier', u'供应商'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,601 | from rest_framework_csv.renderers import CSVStreamingRenderer
def list_en_data_header():
return dict([
('asn_code', u'ASN Code'),
('asn_status', u'ASN Status'),
('total_weight', u'Total Weight'),
('total_volume', u'Total Volume'),
('total_cost', u'Total Cost'),
('supplier', u'Supplier'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,602 | from rest_framework_csv.renderers import CSVStreamingRenderer
def detail_file_headers():
return [
'asn_code',
'asn_status',
'supplier',
'goods_code',
'goods_desc',
'goods_qty',
'goods_actual_qty',
'sorted_qty',
'goods_shortage_qty',
'goods_more_qty',
'goods_damage_qty',
'goods_weight',
'goods_volume',
'goods_cost',
'creater',
'create_time',
'update_time'
] | null |
156,603 | from rest_framework_csv.renderers import CSVStreamingRenderer
def detail_cn_data_header():
return dict([
('asn_code', u'ASN单号'),
('asn_status', u'ASN状态'),
('supplier', u'供应商'),
('goods_code', u'商品编码'),
('goods_desc', u'商品描述'),
('goods_qty', u'订单数量'),
('goods_actual_qty', u'实际到货数量'),
('sorted_qty', u'已分拣数量'),
('goods_shortage_qty', u'少到货数量'),
('goods_more_qty', u'多到货数量'),
('goods_damage_qty', u'破损数量'),
('goods_weight', u'商品重量'),
('goods_volume', u'商品体积'),
('goods_cost', u'商品成本'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,604 | from rest_framework_csv.renderers import CSVStreamingRenderer
def detail_en_data_header():
return dict([
('asn_code', u'ASN Code'),
('asn_status', u'ASN Status'),
('supplier', u'Supplier'),
('goods_code', u'Goods Code'),
('goods_desc', u'Goods Description'),
('goods_qty', u'Goods Qty'),
('goods_actual_qty', u'Goods Actual Qty'),
('sorted_qty', u'Sorted Qty'),
('goods_shortage_qty', u'Goods Shortage Qty'),
('goods_more_qty', u'Goods More Qty'),
('goods_damage_qty', u'Goods Damage Qty'),
('goods_weight', u'Goods Weight'),
('goods_volume', u'Goods Volume'),
('goods_cost', u'Goods Cost'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,605 | import jwt
import datetime
from jwt import exceptions
from django.conf import settings
JWT_SALT = "ds()udsjo@jlsdosjf)wjd_#(#)$"
import jwt
from jwt import exceptions
def create_token(payload):
headers = {
"type": "jwt",
"alg": "HS256"
}
payload['exp'] = datetime.datetime.utcnow()
result = jwt.encode(payload=payload, key=JWT_SALT, algorithm="HS256", headers=headers)
return result | null |
156,606 | import jwt
import datetime
from jwt import exceptions
from django.conf import settings
JWT_SALT = "ds()udsjo@jlsdosjf)wjd_#(#)$"
import jwt
from jwt import exceptions
def parse_payload(token):
result = {"status": False, "data": None, "error": None}
try:
verified_payload = jwt.decode(token, JWT_SALT, algorithms="HS256", verify=True)
result["status"] = True
result['data'] = verified_payload
except exceptions.ExpiredSignatureError:
result['error'] = 'Token Expired'
except jwt.DecodeError:
result['error'] = 'Token Authentication Failed'
except jwt.InvalidTokenError:
result['error'] = 'Illegal Token'
return result | null |
156,607 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
return data | null |
156,608 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def qty_0_data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
if data > 0:
return data
else:
raise APIException({'detail': 'Qty Must > 0'}) | null |
156,609 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def qty_data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
if data >= 0:
return data
else:
raise APIException({'detail': 'Qty Must >= 0'}) | null |
156,610 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
class Users(models.Model):
user_id = models.IntegerField(default=0, verbose_name="Admin ID")
name = models.CharField(max_length=80, verbose_name='Staff Name')
vip = models.BigIntegerField(default=1, verbose_name='VIP Level')
openid = models.CharField(max_length=100, verbose_name='OPENID')
appid = models.CharField(max_length=100, verbose_name='APPID')
is_delete = models.BooleanField(default=False, verbose_name='Delete Label')
developer = models.BooleanField(default=True, verbose_name='Developer Label')
t_code = models.CharField(max_length=100, verbose_name='Transaction Code')
ip = models.CharField(max_length=100, verbose_name='Register IP')
vip_time = models.DateTimeField(auto_now_add=True)
link_to = models.BooleanField(default=False, verbose_name='Link To')
link_to_id = models.BigIntegerField(default=0, verbose_name='Link To ID')
avatar = models.CharField(max_length=100, default='/static/img/user.jpg', verbose_name='Staff Avatar')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='Create Time')
update_time = models.DateTimeField(auto_now=True, verbose_name='Update Time')
class Meta:
db_table = 'user_profile'
verbose_name = 'User Profile'
verbose_name_plural = "User Profile"
ordering = ['-id']
def openid_validate(data):
if Users.objects.filter(openid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'}) | null |
156,611 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
class Users(models.Model):
def appid_validate(data):
if Users.objects.filter(appid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'}) | null |
156,612 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def asn_data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
asn_last_code = re.findall(r'\d+', str(data), re.IGNORECASE)
if str(asn_last_code[0]) == '00000001':
data = 'ASN' + '00000001'
else:
data = 'ASN' + str(int(asn_last_code[0]) + 1).zfill(8)
return data | null |
156,613 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def dn_data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
dn_last_code = re.findall(r'\d+', str(data), re.IGNORECASE)
if str(dn_last_code[0]) == '00000001':
data = 'DN' + '00000001'
else:
data = 'DN' + str(int(dn_last_code[0]) + 1).zfill(8)
return data | null |
156,614 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def sumOfList(list, size):
if (size == 0):
return 0
else:
return list[size - 1] + sumOfList(list, size - 1) | null |
156,615 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def is_number(data):
try:
float(data)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(data)
return True
except (TypeError, ValueError):
pass
return False | null |
156,616 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def secret_bar_code(data):
return base64.b64encode(str(data).encode()).decode() | null |
156,617 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def verify_bar_code(data):
return json.loads(base64.b64decode(str(data).encode()).decode().replace('\'', '\"')) | null |
156,618 | from userprofile.models import Users
import re, base64, json
from rest_framework.exceptions import APIException
def transportation_calculate(weight, volume, weight_fee, volume_fee, min_fee):
weight_cost = weight * weight_fee
volume_cost = volume * volume_fee
max_ = (weight_cost if weight_cost > volume_cost else volume_cost) if (weight_cost if weight_cost > volume_cost
else volume_cost) > min_fee else min_fee
data = round(max_, 2)
return data | null |
156,619 | import re
def api_tags(data):
lang = re.findall(r'zh-CN', str(data), re.IGNORECASE)
if lang:
return [
{
"name": "asn",
"description": "到货通知书"
},
{
"name": "binproperty",
"description": "库位属性"
},
{
"name": "binset",
"description": "库位设置"
},
{
"name": "binsize",
"description": "库位尺寸"
},
{
"name": "capital",
"description": "固定资产"
},
{
"name": "chat",
"description": "即时聊天"
},
{
"name": "company",
"description": "公司信息"
},
{
"name": "customer",
"description": "客户信息"
},
{
"name": "cyclecount",
"description": "动态盘点"
},
{
"name": "dashboard",
"description": "仪表盘"
},
{
"name": "dn",
"description": "发货单"
},
{
"name": "driver",
"description": "司机信息"
},
{
"name": "goods",
"description": "商品信息"
},
{
"name": "goodsbrand",
"description": "商品品牌"
},
{
"name": "goodsclass",
"description": "商品类别"
},
{
"name": "goodscolor",
"description": "商品颜色"
},
{
"name": "goodsorigin",
"description": "商品产地"
},
{
"name": "goodsshape",
"description": "商品形状"
},
{
"name": "goodsspecs",
"description": "商品规格"
},
{
"name": "goodsunit",
"description": "商品单位"
},
{
"name": "payment",
"description": "费用支出"
},
{
"name": "scanner",
"description": "扫描PDA"
},
{
"name": "shopid",
"description": "电商扩展"
},
{
"name": "staff",
"description": "员工信息"
},
{
"name": "stock",
"description": "库存信息"
},
{
"name": "supplier",
"description": "供应商信息"
},
{
"name": "uploadfile",
"description": "上传中心"
},
{
"name": "warehouse",
"description": "仓库信息"
}
]
else:
return [
{
"name": "asn",
"description": "Arrive Manifest"
},
{
"name": "binproperty",
"description": "Bin Property"
},
{
"name": "binset",
"description": "Bin Set"
},
{
"name": "binsize",
"description": "Bin Size"
},
{
"name": "capital",
"description": "Capital"
},
{
"name": "chat",
"description": "Chat"
},
{
"name": "company",
"description": "Company Info"
},
{
"name": "customer",
"description": "Customer Info"
},
{
"name": "cyclecount",
"description": "Cycle Count"
},
{
"name": "dashboard",
"description": "Dashboard"
},
{
"name": "dn",
"description": "Shipping Notice"
},
{
"name": "driver",
"description": "Driver Info"
},
{
"name": "goods",
"description": "Goods List"
},
{
"name": "goodsbrand",
"description": "Goods Brand"
},
{
"name": "goodsclass",
"description": "Goods Class"
},
{
"name": "goodscolor",
"description": "Goods Color"
},
{
"name": "goodsorigin",
"description": "Goods Origin"
},
{
"name": "goodsshape",
"description": "Goods Shape"
},
{
"name": "goodsspecs",
"description": "Goods Specs"
},
{
"name": "goodsunit",
"description": "Goods Unit"
},
{
"name": "payment",
"description": "Payment"
},
{
"name": "scanner",
"description": "Scanner PDA"
},
{
"name": "shopid",
"description": "E-comments"
},
{
"name": "staff",
"description": "Staff Info"
},
{
"name": "stock",
"description": "Stock Info"
},
{
"name": "supplier",
"description": "Supplier Info"
},
{
"name": "uploadfile",
"description": "Upload Center"
},
{
"name": "warehouse",
"description": "Warehouse Info"
}
] | null |
156,620 | from rest_framework.views import exception_handler
from rest_framework.response import Response
from django.db import DatabaseError
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
response = exception_handler(exc, context)
# Now add the HTTP status code to the response.
if response is not None:
response.data['status_code'] = response.status_code
response = Response(response.data)
else:
if isinstance(exc, DatabaseError):
pass
# response = Response({'detail': 'Database Error'})
else:
pass
# response = Response({'detail': 'Other Error'})
return response | null |
156,621 | import os
import fire
import torch
import readline
from accelerate import infer_auto_device_map, dispatch_model
from accelerate.utils import get_balanced_memory
from transformers import AutoTokenizer, AutoModelForCausalLM
def get_model(model):
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
model = AutoModelForCausalLM.from_pretrained(model, torch_dtype=torch.float16)
return model | null |
156,622 | import os
import bz2
import ctypes
import base64
import fire
import torch
import readline
from typing import List
from torch.nn import Linear
from torch.nn.parameter import Parameter
from accelerate import infer_auto_device_map, dispatch_model
from accelerate.utils import get_balanced_memory
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers.utils import logging
def quantize(model, weight_bit_width, empty_init=False, **kwargs):
"""Replace fp16 linear with quantized linear"""
# print(model.model.layers)
for layer in model.model.layers:
layer.self_attn.q_proj = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.self_attn.q_proj.weight.to(torch.cuda.current_device()),
bias_tensor=layer.self_attn.q_proj.bias,
in_features=layer.self_attn.q_proj.in_features,
out_features=layer.self_attn.q_proj.out_features,
bias=True,
dtype=torch.half,
device=layer.self_attn.q_proj.weight.device,
empty_init=empty_init
)
layer.self_attn.k_proj = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.self_attn.k_proj.weight.to(torch.cuda.current_device()),
bias_tensor=layer.self_attn.k_proj.bias,
in_features=layer.self_attn.k_proj.in_features,
out_features=layer.self_attn.k_proj.out_features,
bias=True,
dtype=torch.half,
device=layer.self_attn.k_proj.weight.device,
empty_init=empty_init
)
layer.self_attn.v_proj = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.self_attn.v_proj.weight.to(torch.cuda.current_device()),
bias_tensor=layer.self_attn.v_proj.bias,
in_features=layer.self_attn.v_proj.in_features,
out_features=layer.self_attn.v_proj.out_features,
bias=True,
dtype=torch.half,
device=layer.self_attn.v_proj.weight.device,
empty_init=empty_init
)
layer.self_attn.o_proj = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.self_attn.o_proj.weight.to(torch.cuda.current_device()),
bias_tensor=layer.self_attn.o_proj.bias,
in_features=layer.self_attn.o_proj.in_features,
out_features=layer.self_attn.o_proj.out_features,
bias=True,
dtype=torch.half,
device=layer.self_attn.o_proj.weight.device,
empty_init=empty_init
)
layer.mlp.gate_proj = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.mlp.gate_proj.weight.to(torch.cuda.current_device()),
bias_tensor=layer.mlp.gate_proj.bias,
in_features=layer.mlp.gate_proj.in_features,
out_features=layer.mlp.gate_proj.out_features,
bias=True,
dtype=torch.half,
device=layer.mlp.gate_proj.weight.device,
empty_init=empty_init
)
layer.mlp.down_proj = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.mlp.down_proj.weight.to(torch.cuda.current_device()),
bias_tensor=layer.mlp.down_proj.bias,
in_features=layer.mlp.down_proj.in_features,
out_features=layer.mlp.down_proj.out_features,
bias=True,
dtype=torch.half,
device=layer.mlp.down_proj.weight.device,
empty_init=empty_init
)
layer.mlp.up_proj = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.mlp.up_proj.weight.to(torch.cuda.current_device()),
bias_tensor=layer.mlp.up_proj.bias,
in_features=layer.mlp.up_proj.in_features,
out_features=layer.mlp.up_proj.out_features,
bias=True,
dtype=torch.half,
device=layer.mlp.up_proj.weight.device,
empty_init=empty_init
)
return model
def get_model(model, wbit):
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
model = AutoModelForCausalLM.from_pretrained(model, torch_dtype=torch.float16)
model = quantize(model, wbit)
return model | null |
156,623 | import os
import bz2
import ctypes
import base64
import fire
import torch
import readline
from typing import List
from torch.nn import Linear
from torch.nn.parameter import Parameter
from accelerate import infer_auto_device_map, dispatch_model
from accelerate.utils import get_balanced_memory
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers.utils import logging
def compress_int4_weight(weight: torch.Tensor): # (n, m)
with torch.cuda.device(weight.device):
n, m = weight.size(0), weight.size(1)
assert m % 2 == 0
m = m // 2
out = torch.empty(n, m, dtype=torch.int8, device="cuda")
stream = torch.cuda.current_stream()
gridDim = (n, 1, 1)
blockDim = (min(round_up(m, 32), 1024), 1, 1)
kernels.int4WeightCompression(
gridDim,
blockDim,
0,
stream,
[ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
)
return out | null |
156,624 | import os
import bz2
import ctypes
import base64
import fire
import torch
import readline
from typing import List
from torch.nn import Linear
from torch.nn.parameter import Parameter
from accelerate import infer_auto_device_map, dispatch_model
from accelerate.utils import get_balanced_memory
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers.utils import logging
def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
if source_bit_width == 8:
func = kernels.int8WeightExtractionHalf
elif source_bit_width == 4:
func = kernels.int4WeightExtractionHalf
else:
assert False, "Unsupported bit-width"
with torch.cuda.device(weight.device):
n, m = weight.size(0), weight.size(1)
out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda")
stream = torch.cuda.current_stream()
gridDim = (n, 1, 1)
blockDim = (min(round_up(m, 32), 1024), 1, 1)
func(
gridDim,
blockDim,
0,
stream,
[
ctypes.c_void_p(weight.data_ptr()),
ctypes.c_void_p(scale_list.data_ptr()),
ctypes.c_void_p(out.data_ptr()),
ctypes.c_int32(n),
ctypes.c_int32(m),
],
)
return out | null |
156,625 | import logging
import os
import time
import fire
import torch
from datasets import load_dataset
from transformers import AutoTokenizer, GenerationConfig
def get_model(model_name_or_path):
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
quantize_config = BaseQuantizeConfig.from_pretrained(
model_name_or_path
)
model = AutoGPTQForCausalLM.from_quantized(
model_name_or_path,
use_safetensors=True,
device_map="auto",
use_triton=False,
inject_fused_attention=False,
quantize_config=quantize_config,
)
model.config.pretraining_tp = 1
model.eval()
return model | null |
156,626 | import os
import time
from pathlib import Path
from threading import Thread
from typing import Any, Dict, Optional, Union
import fire
import torch
from exllamav2 import ExLlamaV2, ExLlamaV2Cache, ExLlamaV2Config
from torch.nn import CrossEntropyLoss
import torch
import transformers
from transformers import (
GenerationConfig,
LlamaTokenizer,
PretrainedConfig,
PreTrainedModel,
TextIteratorStreamer,
)
from transformers.modeling_outputs import CausalLMOutputWithPast
def progress_rep(module, num_modules):
yield 100 * module / num_modules | null |
156,627 | import os
import time
from pathlib import Path
from threading import Thread
from typing import Any, Dict, Optional, Union
import fire
import torch
from exllamav2 import ExLlamaV2, ExLlamaV2Cache, ExLlamaV2Config
from torch.nn import CrossEntropyLoss
import torch
import transformers
from transformers import (
GenerationConfig,
LlamaTokenizer,
PretrainedConfig,
PreTrainedModel,
TextIteratorStreamer,
)
from transformers.modeling_outputs import CausalLMOutputWithPast
def generate_stream(model: transformers.AutoModelForCausalLM, tokenizer: transformers.AutoModelForCausalLM,
input_ids: torch.Tensor, attention_mask: torch.Tensor,
generation_config: transformers.GenerationConfig):
streamer = TextIteratorStreamer(
tokenizer,
skip_prompt=True,
skip_special_tokens=True,
spaces_between_special_tokens=False,
)
kwargs = generation_config.to_dict()
def eval_generate(**args):
with torch.inference_mode(mode=True):
model.eval()
model.generate(**args)
kwargs['input_ids'] = input_ids
kwargs['attention_mask'] = attention_mask
kwargs['streamer'] = streamer
Thread(target=eval_generate, kwargs=kwargs).start()
return streamer | null |
156,628 | from setuptools import find_packages, setup
from setuptools.command.install import install
class DownloadNLTK(install):
def run(self):
self.do_egg_install()
import nltk
nltk.download('punkt')
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
if '--' in version:
# the `extras_require` doesn't accept options.
version = version.split('--')[0].strip()
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def get_version():
version_file = 'opencompass/__init__.py'
with open(version_file, 'r', encoding='utf-8') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def do_setup():
setup(
name='opencompass',
version=get_version(),
description='A comprehensive toolkit for large model evaluation',
# url="",
# author="",
long_description=readme,
long_description_content_type='text/markdown',
cmdclass={'download_nltk': DownloadNLTK},
setup_requires=['nltk==3.8'],
python_requires='>=3.8.0',
install_requires=parse_requirements('requirements/runtime.txt'),
packages=find_packages(exclude=[
'test*',
'paper_test*',
]),
keywords=['AI', 'NLP', 'in-context learning'],
classifiers=[
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
]) | null |
156,629 | import argparse
import getpass
import os
import os.path as osp
from datetime import datetime
from mmengine.config import Config
from opencompass.partitioners import NaivePartitioner, SizePartitioner
from opencompass.registry import PARTITIONERS, RUNNERS
from opencompass.runners import DLCRunner, LocalRunner, SlurmRunner
from opencompass.utils import LarkReporter, Summarizer, get_logger
def parse_slurm_args(slurm_parser):
"""These args are all for slurm launch."""
slurm_parser.add_argument('-p',
'--partition',
help='Slurm partition name',
default=None,
type=str)
slurm_parser.add_argument('-q',
'--quotatype',
help='Slurm quota type',
default=None,
type=str)
def parse_dlc_args(dlc_parser):
"""These args are all for dlc launch."""
dlc_parser.add_argument('--aliyun-cfg',
help='The config path for aliyun config',
default='~/.aliyun.cfg',
type=str)
def parse_args():
parser = argparse.ArgumentParser(description='Run an evaluation task')
parser.add_argument('config', help='Train config file path')
# add mutually exclusive args `--slurm` `--dlc`, defaults to local runner
# if "infer" or "eval" not specified
launch_method = parser.add_mutually_exclusive_group()
launch_method.add_argument('--slurm',
action='store_true',
default=False,
help='Whether to force tasks to run with srun. '
'If True, `--partition(-p)` must be set. '
'Defaults to False')
launch_method.add_argument('--dlc',
action='store_true',
default=False,
help='Whether to force tasks to run on dlc. If '
'True, `--aliyun-cfg` must be set. Defaults'
' to False')
# add general args
parser.add_argument('--debug',
help='Debug mode, in which scheduler will run tasks '
'in the single process, and output will not be '
'redirected to files',
action='store_true',
default=False)
parser.add_argument('--dry-run',
help='Dry run mode, in which the scheduler will not '
'actually run the tasks, but only print the commands '
'to run',
action='store_true',
default=False)
parser.add_argument('-m',
'--mode',
help='Running mode. You can choose "infer" if you '
'only want the inference results, or "eval" if you '
'already have the results and want to evaluate them, '
'or "viz" if you want to visualize the results.',
choices=['all', 'infer', 'eval', 'viz'],
default='all',
type=str)
parser.add_argument('-r',
'--reuse',
nargs='?',
type=str,
const='latest',
help='Reuse previous outputs & results, and run any '
'missing jobs presented in the config. If its '
'argument is not specified, the latest results in '
'the work_dir will be reused. The argument should '
'also be a specific timestamp, e.g. 20230516_144254'),
parser.add_argument('-w',
'--work-dir',
help='Work path, all the outputs will be '
'saved in this path, including the slurm logs, '
'the evaluation results, the summary results, etc.'
'If not specified, the work_dir will be set to '
'./outputs/default.',
default=None,
type=str)
parser.add_argument('-l',
'--lark',
help='Report the running status to lark bot',
action='store_true',
default=False)
parser.add_argument('--max-partition-size',
help='The maximum size of an infer task. Only '
'effective when "infer" is missing from the config.',
type=int,
default=2000),
parser.add_argument(
'--gen-task-coef',
help='The dataset cost measurement coefficient for generation tasks, '
'Only effective when "infer" is missing from the config.',
type=int,
default=20)
parser.add_argument('--max-num-workers',
help='Max number of workers to run in parallel. '
'Will be overrideen by the "max_num_workers" argument '
'in the config.',
type=int,
default=32)
parser.add_argument(
'--retry',
help='Number of retries if the job failed when using slurm or dlc. '
'Will be overrideen by the "retry" argument in the config.',
type=int,
default=2)
# set srun args
slurm_parser = parser.add_argument_group('slurm_args')
parse_slurm_args(slurm_parser)
# set dlc args
dlc_parser = parser.add_argument_group('dlc_args')
parse_dlc_args(dlc_parser)
args = parser.parse_args()
if args.slurm:
assert args.partition is not None, (
'--partition(-p) must be set if you want to use slurm')
if args.dlc:
assert os.path.exists(args.aliyun_cfg), (
'When launching tasks using dlc, it needs to be configured '
'in "~/.aliyun.cfg", or use "--aliyun-cfg $ALiYun-CFG_Path"'
' to specify a new path.')
return args | null |
156,630 | import argparse
import getpass
import os
import os.path as osp
from datetime import datetime
from mmengine.config import Config
from opencompass.partitioners import NaivePartitioner, SizePartitioner
from opencompass.registry import PARTITIONERS, RUNNERS
from opencompass.runners import DLCRunner, LocalRunner, SlurmRunner
from opencompass.utils import LarkReporter, Summarizer, get_logger
The provided code snippet includes necessary dependencies for implementing the `exec_infer_runner` function. Write a Python function `def exec_infer_runner(tasks, args, cfg)` to solve the following problem:
execute infer runner according to args.
Here is the function:
def exec_infer_runner(tasks, args, cfg):
"""execute infer runner according to args."""
if args.slurm:
runner = SlurmRunner(dict(type='OpenICLInferTask'),
max_num_workers=args.max_num_workers,
partition=args.partition,
quotatype=args.quotatype,
retry=args.retry,
debug=args.debug,
lark_bot_url=cfg['lark_bot_url'])
elif args.dlc:
runner = DLCRunner(dict(type='OpenICLInferTask'),
max_num_workers=args.max_num_workers,
aliyun_cfg=Config.fromfile(args.aliyun_cfg),
retry=args.retry,
debug=args.debug,
lark_bot_url=cfg['lark_bot_url'])
else:
runner = LocalRunner(task=dict(type='OpenICLInferTask'),
max_num_workers=args.max_num_workers,
debug=args.debug,
lark_bot_url=cfg['lark_bot_url'])
runner(tasks) | execute infer runner according to args. |
156,631 | import argparse
import getpass
import os
import os.path as osp
from datetime import datetime
from mmengine.config import Config
from opencompass.partitioners import NaivePartitioner, SizePartitioner
from opencompass.registry import PARTITIONERS, RUNNERS
from opencompass.runners import DLCRunner, LocalRunner, SlurmRunner
from opencompass.utils import LarkReporter, Summarizer, get_logger
The provided code snippet includes necessary dependencies for implementing the `exec_eval_runner` function. Write a Python function `def exec_eval_runner(tasks, args, cfg)` to solve the following problem:
execute infer runner according to args.
Here is the function:
def exec_eval_runner(tasks, args, cfg):
"""execute infer runner according to args."""
if args.slurm:
runner = SlurmRunner(dict(type='OpenICLEvalTask'),
max_num_workers=args.max_num_workers,
partition=args.partition,
quotatype=args.quotatype,
retry=args.retry,
debug=args.debug,
lark_bot_url=cfg['lark_bot_url'])
elif args.dlc:
runner = DLCRunner(dict(type='OpenICLEvalTask'),
max_num_workers=args.max_num_workers,
aliyun_cfg=Config.fromfile(args.aliyun_cfg),
retry=args.retry,
debug=args.debug,
lark_bot_url=cfg['lark_bot_url'])
else:
runner = LocalRunner(task=dict(type='OpenICLEvalTask'),
max_num_workers=args.max_num_workers,
debug=args.debug,
lark_bot_url=cfg['lark_bot_url'])
runner(tasks) | execute infer runner according to args. |
156,632 | import argparse
import os.path as osp
import time
from typing import Optional
import mmengine
from mmengine.config import Config, ConfigDict
from mmengine.utils import mkdir_or_exist
from opencompass.registry import (ICL_EVALUATORS, MODELS, TASKS,
TEXT_POSTPROCESSORS)
from opencompass.tasks.base import BaseTask
from opencompass.utils import (build_dataset_from_cfg, get_infer_output_path,
get_logger, task_abbr_from_cfg)
def parse_args():
parser = argparse.ArgumentParser(description='Score Calculator')
parser.add_argument('config', help='Config file path')
args = parser.parse_args()
return args | null |
156,635 | import copy
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import faiss
import numpy as np
import torch
import tqdm
from sentence_transformers import SentenceTransformer
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, BatchEncoding, PreTrainedTokenizerBase
from transformers.file_utils import PaddingStrategy
from opencompass.openicl.icl_dataset_reader import DatasetEncoder
from opencompass.openicl.icl_retriever import BaseRetriever
from opencompass.openicl.utils.logging import get_logger
from opencompass.registry import ICL_RETRIEVERS
class ListWrapper:
def __init__(self, data: List[Any]):
def to(self, device):
def ignore_pad_dict(features):
res_dict = {}
if 'metadata' in features[0]:
res_dict['metadata'] = ListWrapper(
[x.pop('metadata') for x in features])
return res_dict | null |
156,640 | from opencompass.registry import TEXT_POSTPROCESSORS
def gsm8k_dataset_postprocess(text: str) -> str:
return text.split('#### ')[1].replace(',', '') | null |
156,641 | from opencompass.registry import TEXT_POSTPROCESSORS
def gsm8k_postprocess(text: str) -> str:
text = text.split('\n\n')[0]
text = text.split(' ')[::-1]
flag = False
ret = ''
for i in range(len(text)):
s = text[i]
for i in range(len(s)):
if s[i].isdigit():
flag = True
ret = s
break
if flag:
break
ret1 = ''
for i in range(len(ret)):
if ret[i].isdigit():
ret1 += ret[i]
return ret1 | null |
156,642 | import json
from datasets import Dataset, DatasetDict
from opencompass.openicl.icl_evaluator import BaseEvaluator
from opencompass.registry import (ICL_EVALUATORS, LOAD_DATASET,
TEXT_POSTPROCESSORS)
from .base import BaseDataset
def math_postprocess(text: str) -> str:
SUBSTITUTIONS = [('an ', ''), ('a ', ''), ('.$', '$'), ('\\$', ''),
(r'\ ', ''), (' ', ''), ('mbox', 'text'),
(',\\text{and}', ','), ('\\text{and}', ','),
('\\text{m}', '\\text{}'), ('\\le', '<')]
REMOVED_EXPRESSIONS = [
'square', 'ways', 'integers', 'dollars', 'mph', 'inches', 'ft',
'hours', 'km', 'units', '\\ldots', 'sue', 'points', 'feet', 'minutes',
'digits', 'cents', 'degrees', 'cm', 'gm', 'pounds', 'meters', 'meals',
'edges', 'students', 'childrentickets', 'multiples', '\\text{s}',
'\\text{.}', '\\text{\ns}', '\\text{}^2', '\\text{}^3', '\\text{\n}',
'\\text{}', r'\mathrm{th}', r'^\circ', r'^{\circ}', r'\;', r',\!',
'{,}', '"', '\\dots', '\n', '\r', '\f'
]
import re
def normalize_final_answer(final_answer: str) -> str:
"""Normalize a final answer to a quantitative reasoning question."""
# final_answer = final_answer.split('=')[-1]
for before, after in SUBSTITUTIONS:
final_answer = final_answer.replace(before, after)
for expr in REMOVED_EXPRESSIONS:
final_answer = final_answer.replace(expr, '')
# Extract answer that is in LaTeX math, is bold,
# is surrounded by a box, etc.
final_answer = re.sub(r'(\\text\{)(.*?)(\})', '\\2', final_answer)
final_answer = re.sub(r'(\\textbf\{)(.*?)(\})', '\\2', final_answer)
final_answer = re.sub(r'(\\overline\{)(.*?)(\})', '\\2', final_answer)
final_answer = re.sub(r'(\\boxed\{)(.*)(\})', '\\2', final_answer)
assert '\n' not in final_answer
assert '\r' not in final_answer
assert '\f' not in final_answer
if len(re.findall(r'finalansweris(.*)', final_answer)) > 0:
final_answer = re.findall(r'finalansweris(.*)', final_answer)[-1]
if len(re.findall(r'oxed\{(.*?)\}', final_answer)) > 0:
final_answer = re.findall(r'oxed\{(.*?)\}', final_answer)[-1]
if len(re.findall(r'\$(.*?)\$', final_answer)) > 0:
final_answer = re.findall(r'\$(.*?)\$', final_answer)[-1]
final_answer = final_answer.strip()
if 'rac' in final_answer and '\\frac' not in final_answer:
final_answer = final_answer.replace('rac', '\\frac')
# Normalize shorthand TeX:
# \fracab -> \frac{a}{b}
# \frac{abc}{bef} -> \frac{abc}{bef}
# \fracabc -> \frac{a}{b}c
# \sqrta -> \sqrt{a}
# \sqrtab -> sqrt{a}b
final_answer = re.sub(r'(frac)([^{])(.)', 'frac{\\2}{\\3}',
final_answer)
final_answer = re.sub(r'(sqrt)([^{])', 'sqrt{\\2}', final_answer)
final_answer = final_answer.replace('$', '')
# Normalize 100,000 -> 100000
if final_answer.replace(',', '').isdigit():
final_answer = final_answer.replace(',', '')
return final_answer
for maybe_ans in text.split('.'):
if 'final answer' in maybe_ans.lower():
return normalize_final_answer(maybe_ans)
return normalize_final_answer(text.split('.')[0])
# return normalize_final_answer(
# text.split('Final Answer: ', 1)[-1].split('\n\n')[0]) | null |
156,643 | import re
from datasets import DatasetDict, load_dataset
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS
from .base import BaseDataset
def flores_postprocess(text: str) -> str:
text = text.strip().split('\n')[0]
return text | null |
156,644 | import re
from datasets import DatasetDict, load_dataset
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS
from .base import BaseDataset
def flores_postprocess_chinese(text: str) -> str:
import jieba
truncated_text = text.strip().split('\n')[0]
cleaned_text = re.sub(r'\s+', ' ', truncated_text).strip()
cleaned_text = ' '.join(jieba.cut(cleaned_text))
return cleaned_text | null |
156,645 | import json
from datasets import Dataset
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS
from .base import BaseDataset
def cmrc_postprocess(text: str) -> str:
if '答案是' in text:
text = text.split('答案是')[1]
text = text.split("\n")[0]
# text = "".join(text.split("\n"))
text = text.strip()
return text | null |
156,646 | import json
import re
from datasets import Dataset
from opencompass.openicl.icl_evaluator import BaseEvaluator
from opencompass.registry import ICL_EVALUATORS, LOAD_DATASET
from .base import BaseDataset
class GaokaoBenchEvaluator(BaseEvaluator):
def __init__(self, question_type) -> None:
super().__init__()
assert question_type in valid_gaokao_bench_question_types
self.question_type = question_type
def do_predictions_postprocess(self, model_output, answer_lenth=None):
if self.question_type == 'single_choice':
model_answer = []
temp = re.findall(r'[A-D]', model_output[::-1])
if len(temp) != 0:
model_answer.append(temp[0])
elif self.question_type == 'multi_question_choice':
model_answer = []
temp = re.findall(r'【答案】\s*[::]*\s*[A-Z]', model_output)
if len(temp) == answer_lenth:
for t in temp:
model_answer.append(re.findall(r'[A-Z]', t)[0])
else:
temp = re.findall(r'[A-Z]', model_output)
if len(temp) > 0:
for k in range(min(len(temp), answer_lenth)):
model_answer.append(temp[k])
elif self.question_type == 'multi_choice':
model_answer = []
answer = ''
content = re.sub(r'\s+', '', model_output)
answer_index = content.find('【答案】')
if answer_index > 0:
temp = content[answer_index:]
if len(re.findall(r'[A-D]', temp)) > 0:
for t in re.findall(r'[A-D]', temp):
answer += t
else:
temp = content[-10:]
if len(re.findall(r'[A-D]', temp)) > 0:
for t in re.findall(r'[A-D]', temp):
answer += t
if len(answer) != 0:
model_answer.append(answer)
elif self.question_type == 'five_out_of_seven':
model_answer = []
temp = re.findall(r'[A-G]', model_output)
if len(temp) > 0:
for k in range(min(5, len(temp))):
model_answer.append(temp[k])
return model_answer
def ensure_same_length(self, pred, refr):
if len(pred) == len(refr):
return pred
return ['Z'] * len(refr)
def score(self, predictions, references):
if self.question_type not in [
'single_choice', 'multi_choice', 'multi_question_choice',
'five_out_of_seven'
]:
return {'score': 0}
elif self.question_type == 'multi_choice':
correct_score, total_score = 0, 0
for pred, refr in zip(predictions, references):
pred = self.do_predictions_postprocess(pred)
pred = self.ensure_same_length(pred, refr)
for p, r in zip(pred, refr):
if p == r:
correct_score += 2
else:
for i in p:
if i not in r:
break
else:
correct_score += 1
total_score += 2
return {'score': correct_score / total_score * 100}
else:
correct_score, total_score = 0, 0
for pred, refr in zip(predictions, references):
if self.question_type == 'multi_question_choice':
pred = self.do_predictions_postprocess(pred, len(refr))
else:
pred = self.do_predictions_postprocess(pred)
pred = self.ensure_same_length(pred, refr)
for p, r in zip(pred, refr):
if p == r:
correct_score += 1
total_score += 1
return {'score': correct_score / total_score * 100}
def _gaokao_register(question_type):
ICL_EVALUATORS.register_module(
name='GaokaoBenchEvaluator' + '_' + question_type,
module=lambda *args, **kwargs: GaokaoBenchEvaluator(
question_type=question_type, *args, **kwargs)) | null |
156,649 | import ast
import json
import os
import pandas as pd
import tiktoken
from tqdm import tqdm
from .constructions import ChatGPTSchema, ResultsForHumanSchema
from .utils import extract_answer, read_jsonl, save_jsonl
def convert_zero_shot(line, dataset_name):
class ResultsForHumanSchema(object):
def __init__(self,
index,
problem_input,
label,
model_input='',
model_output='',
parse_result='',
first_stage_output='',
second_stage_input='',
is_correct=False):
def to_dict(self):
def to_tsv(result_list, path):
def read_jsonl(path):
def load_dataset_as_result_schema(dataset_name, parent_path):
test_path = os.path.join(parent_path, dataset_name + '.jsonl')
loaded_jsonl = read_jsonl(test_path)
processed = []
for i, line in enumerate(loaded_jsonl):
problem_input = convert_zero_shot(line, dataset_name)
processed.append(
ResultsForHumanSchema(
index=i,
problem_input=problem_input,
label=line['label'] if line['label'] else line['answer'],
))
return processed | null |
156,655 | import os.path as osp
import tempfile
from typing import List
from opencompass.openicl.icl_evaluator import BaseEvaluator
from opencompass.registry import ICL_EVALUATORS, TEXT_POSTPROCESSORS
def humaneval_postprocess(text: str) -> str:
text = text.split('\n\n')[0]
if '```' in text:
text = text.split('```')[1]
if text.strip().startswith('def'):
text = '\n'.join(text.split('\n')[1:])
if not text.startswith(' '):
if text.startswith(' '):
text = ' ' + text.lstrip()
else:
text = '\n'.join([' ' + line for line in text.split('\n')])
return text | null |
156,659 | import re
from opencompass.registry import TEXT_POSTPROCESSORS
def strategyqa_pred_postprocess(text: str) -> str:
text = text.split('\n\n')[0]
text = text.split('answer is ')[-1]
match = re.search(r'(yes|no)', text.lower())
if match:
return match.group(1)
return '' | null |
156,660 | import re
from opencompass.registry import TEXT_POSTPROCESSORS
def strategyqa_dataset_postprocess(text: str) -> str:
return 'yes' if str(text) == 'True' else 'no' | null |
156,661 | import json
from datasets import Dataset
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS
from .base import BaseDataset
def ReCoRD_postprocess(text: str) -> str:
text = text.strip().split('\n')[0].replace('Answer: ', '').strip()
return text | null |
156,662 | import json
from datasets import Dataset
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS
from .base import BaseDataset
def Xsum_postprocess(text: str) -> str:
text = text.strip().split('\n')[0].strip()
return text | null |
156,666 | from mmengine.logging import MMLogger
The provided code snippet includes necessary dependencies for implementing the `get_logger` function. Write a Python function `def get_logger(log_level='INFO') -> MMLogger` to solve the following problem:
Get the logger for OpenCompass. Args: log_level (str): The log level. Default: 'INFO'. Choices are 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'.
Here is the function:
def get_logger(log_level='INFO') -> MMLogger:
"""Get the logger for OpenCompass.
Args:
log_level (str): The log level. Default: 'INFO'. Choices are 'DEBUG',
'INFO', 'WARNING', 'ERROR', 'CRITICAL'.
"""
return MMLogger.get_instance('OpenCompass',
logger_name='OpenCompass',
log_level=log_level) | Get the logger for OpenCompass. Args: log_level (str): The log level. Default: 'INFO'. Choices are 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'. |
156,667 | from typing import Dict, List, Union
from datasets import Dataset, DatasetDict
def _check_type_list(obj, typelist: List):
for _type in typelist:
if _type is None:
if obj is None:
return obj
elif isinstance(obj, _type):
return obj
raise TypeError(
f'Expected an object in {[_.__name__ if _ is not None else None for _ in typelist]} type, but got {obj}' # noqa
) | null |
156,668 | from typing import Dict, List, Union
from datasets import Dataset, DatasetDict
def _check_dataset(obj) -> Union[Dataset, DatasetDict]:
if isinstance(obj, Dataset) or isinstance(obj, DatasetDict):
return obj
else:
raise TypeError(
f'Expected a datasets.Dataset or a datasets.DatasetDict object, but got {obj}' # noqa
) | null |
156,669 | from typing import Dict, List, Union
from datasets import Dataset, DatasetDict
def _check_list(obj) -> List:
if isinstance(obj, List):
return obj
else:
raise TypeError(f'Expected a List object, but got {obj}') | null |
156,670 | from typing import Dict, List, Union
from datasets import Dataset, DatasetDict
def _check_str(obj) -> str:
if isinstance(obj, str):
return obj
else:
raise TypeError(f'Expected a str object, but got {obj}') | null |
156,671 | from typing import Dict, List, Union
from datasets import Dataset, DatasetDict
def _check_dict(obj) -> Dict:
if isinstance(obj, Dict):
return obj
else:
raise TypeError(f'Expected a Dict object, but got {obj}') | null |
156,672 | import subprocess
def get_git_root() -> str:
cmd = ['git', 'rev-parse', '--show-toplevel']
result = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
return result.stdout.decode('utf-8').strip() | null |
156,673 | import subprocess
def get_latest_commit(branch: str) -> str:
cmd = ['git', 'rev-parse', branch]
result = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
return result.stdout.decode('utf-8').strip() | null |
156,674 | import re
from opencompass.registry import TEXT_POSTPROCESSORS
def general_postprocess(text: str) -> str:
# Cut off the first newline, period, or comma
truncated_text = re.split(r'[\n.,]', text, 1)[0]
# Remove punctuation
no_punctuation = re.sub(r'[^\w\s]', '', truncated_text)
# Remove article
no_articles = re.sub(r'\b(a|an|the)\b',
'',
no_punctuation,
flags=re.IGNORECASE)
# Remove duplicated blank spaces
cleaned_text = re.sub(r'\s+', ' ', no_articles).strip()
return cleaned_text | null |
156,675 | import re
from opencompass.registry import TEXT_POSTPROCESSORS
def general_cn_postprocess(text: str) -> str:
truncated_text = re.split(r'[\n.,]', text, 1)[0]
no_punctuation = re.sub(r'[^\w\s]', '', truncated_text)
no_articles = re.sub(r'\b(a|an|the)\b',
'',
no_punctuation,
flags=re.IGNORECASE)
cleaned_text = re.sub(r'\s+', ' ', no_articles).strip()
import jieba
cleaned_text = ' '.join(jieba.cut(text))
return cleaned_text | null |
156,676 | import re
from opencompass.registry import TEXT_POSTPROCESSORS
def first_capital_postprocess(text: str) -> str:
for t in text:
if t.isupper():
return t
return '' | null |
156,677 | import re
from opencompass.registry import TEXT_POSTPROCESSORS
def first_capital_postprocess_multi(text: str) -> str:
match = re.search(r'([A-D]+)', text)
if match:
return match.group(1)
return '' | null |
156,679 | import os.path as osp
from typing import Dict
from mmengine.config import ConfigDict
def model_abbr_from_cfg(cfg: ConfigDict) -> str:
"""Generate model abbreviation from the model's confg."""
if 'abbr' in cfg:
return cfg['abbr']
model_abbr = cfg['type'] + '_' + '_'.join(
osp.realpath(cfg['path']).split('/')[-2:])
model_abbr = model_abbr.replace('/', '_')
return model_abbr
def dataset_abbr_from_cfg(cfg: ConfigDict) -> str:
"""Returns dataset abbreviation from the dataset's confg."""
if 'abbr' in cfg:
return cfg['abbr']
dataset_abbr = cfg['path']
if 'name' in cfg:
dataset_abbr += '_' + cfg['name']
dataset_abbr = dataset_abbr.replace('/', '_')
return dataset_abbr
The provided code snippet includes necessary dependencies for implementing the `task_abbr_from_cfg` function. Write a Python function `def task_abbr_from_cfg(task: Dict) -> str` to solve the following problem:
Returns task abbreviation from the task's confg.
Here is the function:
def task_abbr_from_cfg(task: Dict) -> str:
"""Returns task abbreviation from the task's confg."""
return '[' + ','.join([
f'{model_abbr_from_cfg(model)}/'
f'{dataset_abbr_from_cfg(dataset)}'
for i, model in enumerate(task['models'])
for dataset in task['datasets'][i]
]) + ']' | Returns task abbreviation from the task's confg. |
156,680 | import os.path as osp
from typing import Dict
from mmengine.config import ConfigDict
def model_abbr_from_cfg(cfg: ConfigDict) -> str:
def dataset_abbr_from_cfg(cfg: ConfigDict) -> str:
def get_infer_output_path(model_cfg: ConfigDict,
dataset_cfg: ConfigDict,
root_path: str = None,
file_extension: str = 'json') -> str:
# TODO: Rename this func
assert root_path is not None, 'default root_path is not allowed any more'
model_abbr = model_abbr_from_cfg(model_cfg)
dataset_abbr = dataset_abbr_from_cfg(dataset_cfg)
return osp.join(root_path, model_abbr, f'{dataset_abbr}.{file_extension}') | null |
156,682 | from __future__ import annotations
import hashlib
import json
from copy import deepcopy
from typing import Dict, List, Union
from mmengine.config import ConfigDict
The provided code snippet includes necessary dependencies for implementing the `get_prompt_hash` function. Write a Python function `def get_prompt_hash(dataset_cfg: Union[ConfigDict, List[ConfigDict]]) -> str` to solve the following problem:
Get the hash of the prompt configuration. Args: dataset_cfg (ConfigDict or list[ConfigDict]): The dataset configuration. Returns: str: The hash of the prompt configuration.
Here is the function:
def get_prompt_hash(dataset_cfg: Union[ConfigDict, List[ConfigDict]]) -> str:
"""Get the hash of the prompt configuration.
Args:
dataset_cfg (ConfigDict or list[ConfigDict]): The dataset
configuration.
Returns:
str: The hash of the prompt configuration.
"""
if isinstance(dataset_cfg, list):
if len(dataset_cfg) == 1:
dataset_cfg = dataset_cfg[0]
else:
hashes = ','.join([get_prompt_hash(cfg) for cfg in dataset_cfg])
hash_object = hashlib.sha256(hashes.encode())
return hash_object.hexdigest()
if 'reader_cfg' in dataset_cfg.infer_cfg:
# new config
reader_cfg = dict(type='DatasetReader',
input_columns=dataset_cfg.reader_cfg.input_columns,
output_column=dataset_cfg.reader_cfg.output_column)
dataset_cfg.infer_cfg.reader = reader_cfg
if 'train_split' in dataset_cfg.infer_cfg.reader_cfg:
dataset_cfg.infer_cfg.retriever[
'index_split'] = dataset_cfg.infer_cfg['reader_cfg'][
'train_split']
if 'test_split' in dataset_cfg.infer_cfg.reader_cfg:
dataset_cfg.infer_cfg.retriever[
'test_split'] = dataset_cfg.infer_cfg.reader_cfg.test_split
for k, v in dataset_cfg.infer_cfg.items():
dataset_cfg.infer_cfg[k]['type'] = v['type'].split('.')[-1]
d_json = json.dumps(dataset_cfg.infer_cfg.to_dict(), sort_keys=True)
hash_object = hashlib.sha256(d_json.encode())
return hash_object.hexdigest() | Get the hash of the prompt configuration. Args: dataset_cfg (ConfigDict or list[ConfigDict]): The dataset configuration. Returns: str: The hash of the prompt configuration. |
156,683 | import copy
from mmengine.config import ConfigDict
from opencompass.registry import LOAD_DATASET, MODELS
def build_dataset_from_cfg(dataset_cfg: ConfigDict) -> ConfigDict:
dataset_cfg = copy.deepcopy(dataset_cfg)
dataset_cfg.pop('infer_cfg', None)
dataset_cfg.pop('eval_cfg', None)
dataset_cfg.pop('abbr', None)
return LOAD_DATASET.build(dataset_cfg) | null |
156,684 | import copy
from mmengine.config import ConfigDict
from opencompass.registry import LOAD_DATASET, MODELS
def build_model_from_cfg(model_cfg: ConfigDict) -> ConfigDict:
model_cfg = copy.deepcopy(model_cfg)
model_cfg.pop('run_cfg', None)
model_cfg.pop('max_out_len', None)
model_cfg.pop('batch_size', None)
model_cfg.pop('abbr', None)
return MODELS.build(model_cfg) | null |
156,685 | import argparse
import copy
import json
import os.path as osp
import mmengine
from mmengine.config import Config, ConfigDict
from mmengine.utils import mkdir_or_exist
from tqdm import tqdm
from opencompass.registry import TEXT_POSTPROCESSORS
from opencompass.utils import build_dataset_from_cfg, get_infer_output_path
def parse_args():
parser = argparse.ArgumentParser(description='Run an evaluation task')
parser.add_argument('config', help='Train config file path')
parser.add_argument(
'-f',
'--force',
help='Force to run the task even if the results already exist',
action='store_true',
default=False)
parser.add_argument('-w',
'--work-dir',
help='Work path, all the outputs will be '
'saved in this path, including the slurm logs, '
'the evaluation results, the summary results, etc.'
'If not specified, the work_dir will be set to '
'./outputs/default.',
default=None,
type=str)
args = parser.parse_args()
return args | null |
156,687 | import argparse
import fnmatch
from typing import Dict
from mmengine.config import Config, ConfigDict
from opencompass.openicl.icl_inferencer import (CLPInferencer, GenInferencer,
PPLInferencer)
from opencompass.registry import ICL_PROMPT_TEMPLATES, ICL_RETRIEVERS
from opencompass.utils import (Menu, build_dataset_from_cfg,
build_model_from_cfg, dataset_abbr_from_cfg,
model_abbr_from_cfg)
def parse_args():
parser = argparse.ArgumentParser(description='Run an evaluation task')
parser.add_argument('config', help='Train config file path')
parser.add_argument('-n', '--non-interactive', action='store_true')
parser.add_argument('-a', '--all', action='store_true')
parser.add_argument('-p',
'--pattern',
type=str,
help='To match the dataset abbr.')
args = parser.parse_args()
return args | null |
156,688 | import argparse
import fnmatch
from typing import Dict
from mmengine.config import Config, ConfigDict
from opencompass.openicl.icl_inferencer import (CLPInferencer, GenInferencer,
PPLInferencer)
from opencompass.registry import ICL_PROMPT_TEMPLATES, ICL_RETRIEVERS
from opencompass.utils import (Menu, build_dataset_from_cfg,
build_model_from_cfg, dataset_abbr_from_cfg,
model_abbr_from_cfg)
def parse_model_cfg(model_cfg: ConfigDict) -> Dict[str, ConfigDict]:
model2cfg = {}
for model in model_cfg:
model2cfg[model_abbr_from_cfg(model)] = model
return model2cfg | null |
156,689 | import argparse
import fnmatch
from typing import Dict
from mmengine.config import Config, ConfigDict
from opencompass.openicl.icl_inferencer import (CLPInferencer, GenInferencer,
PPLInferencer)
from opencompass.registry import ICL_PROMPT_TEMPLATES, ICL_RETRIEVERS
from opencompass.utils import (Menu, build_dataset_from_cfg,
build_model_from_cfg, dataset_abbr_from_cfg,
model_abbr_from_cfg)
def parse_dataset_cfg(dataset_cfg: ConfigDict) -> Dict[str, ConfigDict]:
dataset2cfg = {}
for dataset in dataset_cfg:
dataset2cfg[dataset_abbr_from_cfg(dataset)] = dataset
return dataset2cfg | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.