index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
40,891
|
lakshmaiah94k/kanchi
|
refs/heads/master
|
/serializers.py
|
from rest_framework import serializers
from .models import StudentApp, StudentReg, Staff, Department
from django.contrib.auth.models import User
class StudentAppSerializer(serializers.ModelSerializer):
class Meta:
model = StudentApp
fields = ["id", "student_name", "email", "ssc_memo", "inter_memo"]
def create(self, validated_data):
return StudentApp.objects.create(**validated_data)
class StudentRegSerializer(serializers.ModelSerializer):
# password = serializers.CharField()
class Meta:
model = StudentReg
fields = ["id", "student_apps", "student_name", "student_email", "student_father_name", "student_mother_name",
"student_mobile", "student_profile_photo", "department", "user"]
def create(self, validated_data):
return StudentReg.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.student_apps = validated_data.get('student_apps', instance.student_apps)
instance.student_name = validated_data.get('student_name', instance.student_name)
instance.student_email = validated_data.get('student_email', instance.student_email)
instance.student_father_name = validated_data.get('student_father_name', instance.student_father_name)
instance.student_mother_name = validated_data.get('student_mother_name', instance.student_mother_name)
instance.student_mobile = validated_data.get('student_mobile', instance.student_mobile)
instance.student_profile_photo = validated_data.get('student_profile_photo', instance.student_profile_photo)
instance.department = validated_data.get('department', instance.department)
# instance.user = validated_data.get('user', instance.user)
instance.save()
return instance
class StaffSerializer(serializers.ModelSerializer):
password = serializers.CharField()
class Meta:
model = Staff
fields = ["id", "staff_name", "staff_email", "staff_father_name", "staff_mother_name",
"staff_profile_photo", "staff_mobile", "department", "user", "password"]
def create(self, validated_data):
return Staff.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.staff_name = validated_data.get('staff_name', instance.staff_name)
instance.staff_email = validated_data.get('staff_email', instance.staff_email)
instance.staff_father_name = validated_data.get('staff_father_name', instance.staff_father_name)
instance.staff_mother_name = validated_data.get('staff_mother_name', instance.staff_mother_name)
instance.staff_profile_photo = validated_data.get('staff_profile_photo', instance.staff_profile_photo)
instance.staff_mobile = validated_data.get('staff_mobile', instance.staff_mobile)
instance.department = validated_data.get('department', instance.department)
instance.user = validated_data.get('user', instance.user)
instance.save()
return instance
class DepartmentSerializer(serializers.ModelSerializer):
class Meta:
model = Department
fields = ["department_name"]
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["username", "password"]
def create(self, validated_data):
return User.objects.create_user(**validated_data)
|
{"/api.py": ["/serializers.py"]}
|
40,892
|
lakshmaiah94k/kanchi
|
refs/heads/master
|
/api.py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .serializers import StudentAppSerializer, StudentRegSerializer, StaffSerializer, DepartmentSerializer, UserSerializer
from .models import StudentReg, StudentApp, Staff, Department
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from rest_framework.permissions import IsAuthenticated
class StudentAppView(APIView):
def get(self, request, format=None):
user = StudentApp.objects.all()
serializer = StudentAppSerializer(user, many=True)
return Response(serializer.data)
def post(self, request):
serializer = StudentAppSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class StudentRegView(APIView):
def get(self, request):
user = StudentReg.objects.all()
serializer = StudentRegSerializer(user, many=True)
return Response(serializer.data)
def post(self, request):
serializer = StudentRegSerializer(data=request.data)
# import pdb;pdb.set_trace()
if serializer.is_valid():
user = StudentApp.objects.filter(email=serializer.validated_data['student_email'], is_verified=True)
if not user.exists():
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer.validated_data["student_email"] = user.first()
serializer.validated_data.pop()
# serial = UserSerializer(data=request.data)
# if serial.is_valid():
# serial.save()
# return Response(serial.data, status=status.HTTP_201_CREATED)
# users=User.objects.create_user(username=serial.validated_data["student_name"], password=serial.validated_data["password"])
# serializer.user=serial
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, pk):
user = StudentReg.objects.get(id=pk)
serializer = StudentRegSerializer(instance=user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class DepartmentView(APIView):
def get(self, request):
user = Department.objects.all()
serializer = DepartmentSerializer(user, many=True)
return Response(serializer.data)
class StaffView(APIView):
def get(self, request):
user = Staff.objects.all()
serializer = StaffSerializer(user, many=True)
return Response(serializer.data)
def post(self, request):
serializer = StaffSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, pk):
user = Staff.objects.get(id=pk)
serializer = StaffSerializer(instance=user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
user = Staff.objects.get(id=pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserView(APIView):
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
{"/api.py": ["/serializers.py"]}
|
40,936
|
hcarlsso/CloudVideo
|
refs/heads/master
|
/faafo/api/__init__.py
|
from pkg_resources import resource_filename
import flask
def create_app(**kwargs):
template_path = resource_filename(__name__, "templates")
# Create the Flask app
app = flask.Flask('faafo.api', template_folder=template_path)
from .models import get_db, get_api
db = get_db()
db.init_app(app)
# Configuration
config_dict = kwargs.get('config_dict', None)
with app.app_context():
manager = get_api(db)
if config_dict:
app.config.update(config_dict)
# Extensions like Flask-SQLAlchemy now know what the "current" app
# is while within this block. Therefore, you can now run........
db.create_all()
manager.init_app(app)
# Register blueprints
from .service import mainPage
app.register_blueprint(mainPage)
# Restless api
# Bootstrap(app)
return app
|
{"/faafo/api/__init__.py": ["/faafo/api/models.py", "/faafo/api/service.py"], "/test/test_api.py": ["/faafo/api/__init__.py"], "/test/test_video_conversion.py": ["/faafo/worker/service.py"]}
|
40,937
|
hcarlsso/CloudVideo
|
refs/heads/master
|
/faafo/worker/service.py
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# based on http://code.activestate.com/recipes/577120-julia-fractals/
import base64
import copy
import hashlib
import json
import os
import random
import tempfile
import time
import socket
import subprocess as subp
import shlex
from kombu.mixins import ConsumerMixin
import requests
endpoint_url = 'TEMP'
class VideoConverter(object):
def __init__(self, source, destination):
self.source = source
self.destination = destination
self.convert()
def convert(self):
cmd = """mencoder %s -ovc lavc -lavcopts vcodec=mpeg4:vbitrate=3000
-oac pcm -o %s""" % (self.source, self.destination)
print('Convert command')
print(cmd)
print("Converting video file")
subp.call(shlex.split(cmd))
class Worker(ConsumerMixin):
'''
Tell the queue worker is ready to work
'''
def __init__(self, connection, task_queue, endpoint):
self.connection = connection
self.endpoint = endpoint
self.task_queue = task_queue
def get_consumers(self, Consumer, channel):
return [Consumer(queues=self.task_queue,
accept=['json'],
callbacks=[self.process])]
def process(self, task, message):
result = {
'uuid': task['uuid'],
'url': socket.gethostname(),
'status' : 2, # Ready
}
# NOTE(berendt): only necessary when using requests < 2.4.2
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
requests.put(
"%s/v1/queue/%s" % (self.endpoint, str(task['uuid'])),
json.dumps(result), headers=headers)
message.ack()
return result
|
{"/faafo/api/__init__.py": ["/faafo/api/models.py", "/faafo/api/service.py"], "/test/test_api.py": ["/faafo/api/__init__.py"], "/test/test_video_conversion.py": ["/faafo/worker/service.py"]}
|
40,938
|
hcarlsso/CloudVideo
|
refs/heads/master
|
/faafo/client.py
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from pprint import pprint
import time
import shutil
MAX_WAIT_CYCLES = 10
WAIT_BETWEEN_CYCLES = 0.1
OUTPUTFILE = "output.avi"
def wait_to_transfer(url):
'''
How to ensure we dont wait until infinity
'''
wait_to_transfer = True
cycles = 0
while wait_to_transfer or cycles < MAX_WAIT_CYCLES:
response = requests.get(url)
if response.status_code == 200:
wait_to_transfer = False
time.sleep(WAIT_BETWEEN_CYCLES)
elif response.status_code == 303:
pass
else:
raise StandardError
cycles = cycles + 1
return response.json()
def send_file(url, file_to_send):
return requests.post(url, files={'file': open(file_to_send, 'rb')})
def make_one_request(url, file_to_send):
# s = requests.session()
session = requests.session()
response = session.get(url)
# Accepted
if response.status_code == 202:
# We are safe to
data = response.json()
if 'location' in data:
url_status = url + data['location']
data = wait_to_transfer(url_status )
else:
raise StandardError
# Upload the file
resp_upload = send_file(url_status, file_to_send)
info_upload = resp_upload.json()
url_wait_conversion = url + info_upload['download_wait']
# Wait for the file to be processed
data = wait_to_transfer(url_wait_conversion)
url_download = url + data['download']
response_converted_video = session.get(url_download, stream = True)
with open(OUTPUTFILE, "wb") as handle:
shutil.copyfileobj(response_converted_video.raw, handle)
# And we are done
else:
data = None
return OUTPUTFILE
def main():
pass
|
{"/faafo/api/__init__.py": ["/faafo/api/models.py", "/faafo/api/service.py"], "/test/test_api.py": ["/faafo/api/__init__.py"], "/test/test_video_conversion.py": ["/faafo/worker/service.py"]}
|
40,939
|
hcarlsso/CloudVideo
|
refs/heads/master
|
/faafo/api/models.py
|
from flask_sqlalchemy import SQLAlchemy
from flask_restless import APIManager
def get_db():
return SQLAlchemy()
def get_api(db):
class Queue(db.Model):
uuid = db.Column(db.String(36), primary_key=True)
url = db.Column(db.String(256), nullable=True)
status = db.Column(db.Integer, nullable=True)
def __repr__(self):
return '<Queue %s>' % self.uuid
manager = APIManager(flask_sqlalchemy_db=db)
manager.create_api(
Queue,
methods=['GET', 'POST', 'DELETE', 'PUT'],
collection_name = 'queue',
url_prefix='/v1')
return manager
|
{"/faafo/api/__init__.py": ["/faafo/api/models.py", "/faafo/api/service.py"], "/test/test_api.py": ["/faafo/api/__init__.py"], "/test/test_video_conversion.py": ["/faafo/worker/service.py"]}
|
40,940
|
hcarlsso/CloudVideo
|
refs/heads/master
|
/test/test_client.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httpretty
from sure import expect
import unittest
import sys
import tempfile
import os
from faafo import client
class TestRest(unittest.TestCase):
@httpretty.activate
def test_rest(self):
url = "http://localhost:8090/"
queue_location = "queue/12345"
body = '{"location": "%s"}' % queue_location
# import pdb; pdb.set_trace()
httpretty.register_uri(
httpretty.GET, url,
body=body,
status = 202,
content_type="application/json"
)
# Responses in the queue
# Can include more information here
responses=[
httpretty.Response(
body='{"status": "pending"}',
status=200,
content_type="application/json",
),
httpretty.Response(
body='{"status": "ready"}',
status=303,
content_type="application/json",
),
]
httpretty.register_uri(
httpretty.GET, url + queue_location,
responses = responses
)
# The upload phase
queue_location_conversion = "queue/45678"
httpretty.register_uri(
httpretty.POST, url + queue_location,
status = 200,
body='{"download_wait" : "%s"}' % queue_location_conversion,
content_type="application/json"
)
# Wait for conversion
responses=[
httpretty.Response(
body='{"status": "pending"}',
status=200,
content_type="application/json",
),
httpretty.Response(
body='{"download": "mypath"}',
status=303,
content_type="application/json",
),
]
httpretty.register_uri(
httpretty.GET, url + queue_location_conversion,
responses = responses
)
#
httpretty.register_uri(
httpretty.GET, url + "mypath",
body = 'RANDOM TEXT',
content_type="application/json",
)
# Create the file to stream
f = tempfile.NamedTemporaryFile(delete=False)
path = f.name
f.write("Hello World!\n")
f.close()
response = client.make_one_request(url, path)
with open(response, 'r') as myfile:
data=myfile.read().replace('\n', '')
self.assertEqual(data, 'RANDOM TEXT')
os.unlink(path)
os.unlink(response)
@httpretty.activate
def test_wait(self):
url = "http://localhost:8090/"
queue_location = "queue/12345"
# Responses in the queue
# Can include more information here
responses=[
httpretty.Response(
body='{"status": "pending"}',
status=200,
content_type="application/json",
),
httpretty.Response(
body='{"status": "ready"}',
status=303,
content_type="application/json",
),
]
httpretty.register_uri(
httpretty.GET, url + queue_location,
responses = responses
)
data = client.wait_to_transfer(url + queue_location)
self.assertDictEqual(data, {'status':'ready'})
@httpretty.activate
def test_upload(self):
url = "http://localhost:8090/queue/12345"
body = '{"status": "uploading"}'
httpretty.register_uri(
httpretty.POST, url,
status = 200,
body=body,
content_type="application/json"
)
# Create the file to stream
f = tempfile.NamedTemporaryFile(delete=False)
path = f.name
f.write("Hello World!\n")
f.close()
response = client.send_file(url, path)
expect(response.text).to.equal(body)
expect(httpretty.last_request().method).to.equal("POST")
# How to test that we got the right answer?
os.unlink(path)
if __name__ == '__main__':
unittest.main()
|
{"/faafo/api/__init__.py": ["/faafo/api/models.py", "/faafo/api/service.py"], "/test/test_api.py": ["/faafo/api/__init__.py"], "/test/test_video_conversion.py": ["/faafo/worker/service.py"]}
|
40,941
|
hcarlsso/CloudVideo
|
refs/heads/master
|
/test/test_api.py
|
import flask
import unittest
import json
import uuid
import os
import tempfile
from faafo.api import create_app
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
self.db_fd , self.filepath_db = tempfile.mkstemp()
options = {
'config_dict' : {
'DEBUG' : True,
'SQLALCHEMY_DATABASE_URI' : 'sqlite:///' + self.filepath_db,
'TESTING' : True,
'SQLALCHEMY_TRACK_MODIFICATIONS' : False
}
}
self.app = create_app(**options)
def tearDown(self):
if os.path.isfile(self.filepath_db):
os.close(self.db_fd)
os.unlink(self.filepath_db)
def test_simple(self):
with self.app.test_client() as c:
resp = c.get('/')
self.assertEqual(resp.status_code, 202)
def test_check_queue(self):
with self.app.test_client() as client:
my_uuid = str(uuid.uuid4())
resp = client.get('/queue/' + my_uuid)
self.assertEqual(resp.status_code, 404)
def test_post_db(self):
with self.app.test_client() as client:
my_uuid = str(uuid.uuid4())
ref = {
'uuid' : my_uuid,
'url' : 'bajs',
'status' : 1
}
headers = {'content-type': 'application/json'}
# Create a new resource
resp_post = client.post('/v1/queue' , data= json.dumps(ref),
headers = headers)
self.assertEqual(resp_post.status_code, 201)
resp_get = client.get('/v1/queue/' + my_uuid)
self.assertEqual(resp_get.status_code, 200)
self.assertDictEqual(json.loads(resp_get.data), ref)
# Update the resource
ref2 = {
'uuid' : my_uuid,
'url' : 'bajs',
'status' : 2
}
resp_put = client.put('/v1/queue/' + my_uuid ,
data= json.dumps(ref2),
headers = headers)
self.assertEqual(resp_put.status_code, 200)
resp_get2 = client.get('/v1/queue/' + my_uuid)
self.assertEqual(resp_get2.status_code, 200)
self.assertDictEqual(json.loads(resp_get2.data), ref2)
if __name__ == '__main__':
unittest.main()
|
{"/faafo/api/__init__.py": ["/faafo/api/models.py", "/faafo/api/service.py"], "/test/test_api.py": ["/faafo/api/__init__.py"], "/test/test_video_conversion.py": ["/faafo/worker/service.py"]}
|
40,942
|
hcarlsso/CloudVideo
|
refs/heads/master
|
/faafo/queues.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import kombu
from oslo_config import cfg
task_exchange = kombu.Exchange('tasks', type='direct')
task_queue = kombu.Queue('normal', task_exchange, routing_key='normal')
queues_opts = [
cfg.StrOpt('transport-url',
default='amqp://guest:guest@localhost:5672//',
help='AMQP connection URL.')
]
cfg.CONF.register_opts(queues_opts)
def list_opts():
"""Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(queues_opts))]
|
{"/faafo/api/__init__.py": ["/faafo/api/models.py", "/faafo/api/service.py"], "/test/test_api.py": ["/faafo/api/__init__.py"], "/test/test_video_conversion.py": ["/faafo/worker/service.py"]}
|
40,943
|
hcarlsso/CloudVideo
|
refs/heads/master
|
/faafo/api/service.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import flask
from flask import jsonify
from kombu import Connection
from kombu.pools import producers
from sqlalchemy.dialects import mysql
from faafo import queues
from faafo import version
mainPage = flask.Blueprint('index',__name__)
@mainPage.route('/', methods=['GET'])
def index():
# Create random string for identification
my_uuid = str(uuid.uuid4())
data = {'location' : 'queue/' + my_uuid}
# Put the job inside the rabbit queue.
return jsonify(data), 202
@mainPage.route('/queue/<string:jobid>', methods=['GET'])
def check_worker_queue(jobid):
# Check in database for job.
if True:
# Job does not exists
response = flask.jsonify({'code': 404,
'status': 'job not found'})
response.status_code = 404
elif False:
# Job is not ready to be processed
response = flask.jsonify({'code': 200,
'status': 'pending'})
response.status_code = 200
else:
# Here the job is ready to be processed.
response = flask.jsonify({'code': 303,
'status': 'pending'})
response.status_code = 303
return response
def generate_fractal(**kwargs):
with producers[connection].acquire(block=True) as producer:
producer.publish(kwargs['result'],
serializer='json',
exchange=queues.task_exchange,
declare=[queues.task_exchange],
routing_key='normal')
|
{"/faafo/api/__init__.py": ["/faafo/api/models.py", "/faafo/api/service.py"], "/test/test_api.py": ["/faafo/api/__init__.py"], "/test/test_video_conversion.py": ["/faafo/worker/service.py"]}
|
40,944
|
hcarlsso/CloudVideo
|
refs/heads/master
|
/test/test_video_conversion.py
|
import unittest
import tempfile
import os
import hashlib
import httpretty
import uuid
import kombu
from faafo.worker.service import VideoConverter
from faafo.worker.service import Worker
class TestVideoConversion(unittest.TestCase):
def test_simple(self):
fd_output, filepath_output = tempfile.mkstemp()
path = os.path.dirname(__file__)
path_input_video = os.sep.join([path, 'SampleVideo_1280x720_1mb.mkv'])
obj = VideoConverter(path_input_video, filepath_output)
ref_sum = '23dd5b552d16b53de705e1762965da9a2a843427aec147aa1d22f48face6d88a'
out_sum = hashlib.sha256(open(filepath_output, 'rb').read()).hexdigest()
self.assertEqual(out_sum, ref_sum)
os.close(fd_output)
os.unlink(filepath_output)
class TestWorker(unittest.TestCase):
@httpretty.activate
def test_prepare_for_job(self):
my_uuid = str(uuid.uuid4())
base_url = "http://localhost:8090"
url = base_url + "/v1/queue/" + my_uuid
httpretty.register_uri(
httpretty.PUT,
url,
body='{}',
status = 200,
content_type="application/json"
)
task = {
'uuid' : my_uuid
}
conn = kombu.Connection("memory://")
queue = conn.SimpleQueue('myqueue')
queue.put('test')
msg = queue.get(timeout=1)
worker = Worker(conn, queue, base_url)
res = worker.process(task, msg)
self.assertDictEqual(
res,
{
'uuid' : my_uuid,
'status' : 2,
'url' : 'localhost'
}
)
if __name__ == '__main__':
unittest.main()
|
{"/faafo/api/__init__.py": ["/faafo/api/models.py", "/faafo/api/service.py"], "/test/test_api.py": ["/faafo/api/__init__.py"], "/test/test_video_conversion.py": ["/faafo/worker/service.py"]}
|
41,237
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/test.py
|
from __init__ import *
from reverse_image_features import *
df = pd.read_csv('data/encode.csv', header= None) # 2569, 2049
#print(df.head())
print(df.shape)
Y = df[0]
df_feats = df[[_ for _ in df.columns if _ != 0]].values.astype(np.float)
print(df_feats.shape)
def knnmodel():
knn = NearestNeighbors(n_neighbors=5, n_jobs=8, algorithm='ball_tree')
knn.fit(df_feats)
joblib.dump(knn, 'data/knn.pkl')
if not os.path.exists('data/knn.pkl'):
knnmodel()
knn = joblib.load('data/knn.pkl')
def get_neighbors(knn,imagepath):
test_img_features = get_features(imagepath)
neighbors = knn.kneighbors(test_img_features, return_distance=True)
return neighbors
def get_names(neighbors):
result = []
for each in neighbors[1].flatten():
result.append(Y.tolist()[each])
shutil.copy(pjoin('data/all_imgs', result[-1]),'static/output')
print(result)
return result
#test_img_features = get_features('data/045.computer-keyboard/045_0003.jpg')
#get_names(get_neighbors(knn,'data/127.laptop-101/127_0008.jpg'))
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,238
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/misc/test_file.py
|
# Other functions taken from ignite_demo.py file and stored here
def rename_files(folder_path, name):
"""
Rename filse in folder
:param folder_path:
:param name:
:return:
"""
for i, each in enumerate(listdir(folder_path)):
src = pjoin(folder_path, each)
dest = pjoin(folder_path, name + str(i) + '.' + each.split('.')[-1])
shutil.move(src, dest)
def make_csvs():
"""
Takes a list of folders and generates csv files with image name and Resnet50 features.
:return: Nothing
"""
encode_folder('data/ignite_demo/all_images', 'data/ignite_demo/all_images.csv')
encode_folder('data/ignite_demo/DisposableGloves', 'data/ignite_demo/DisposableGloves.csv')
encode_folder('data/ignite_demo/ScrewDriver', 'data/ignite_demo/ScrewDriver.csv')
# make_csvs()
def azure_classify(img_path):
classification_key = "54c71598e9434d5fa7853360c4a9e4ce"
project_id = "858cb625-b783-405d-acf0-f7b99d077e57"
iteration_name = "Iteration1"
res = {}
classification_endpoint = "https://southeastasia.api.cognitive.microsoft.com"
predictor = CustomVisionPredictionClient(classification_key, endpoint=classification_endpoint)
with open(img_path, "rb") as image_contents:
results = predictor.classify_image(
project_id,
iteration_name,
image_contents.read(),
custom_headers={'Content-Type': 'application/octet-stream'})
for prediction in results.predictions:
#print("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100))
res[prediction.tag_name] = prediction.probability * 100
ans = max(res, key=res.get)
prob = max(res)
print(ans, prob)
return ans, prob
def test_driver():
for imgs in listdir('data/ignite_demo/testimage'):
inp = pjoin('data/ignite_demo/testimage', imgs)
print(inp)
res = azure_detect_object(inp)
draw_(inp, res)
pass
#this is taken from ignite_demo_app.py
@app.route('/decoder_test', methods=["POST", "GET"])
def decoder_test():
'''testing bar code samples'''
print("working")
decodeobj_op = []
bar_code_list = []
barcode_samples = ['GRA-13G266','19811','923458','2DGP6','5LW89','GRA-10J217',
'5LW55','2AJK2','GRA-23NN72','2358','16953','MCM-59215A43',
'GRA-5KPJ1','923437','GRA-1YUF9','GRA-53KG69','MOT-0153-42C',
'AMAZ-45003','5176','AMZ-45005','MCM-5497a48','1765','100161',
'1492398','1491308','1491310','1412794','79201078','1387508',
'1387503','1387505','23670224','34860240','1387506']
folder_path = r'C:\Users\ajayeswar.reddy\Downloads\bar_code_new'
for file in os.listdir(folder_path):
path_file = os.path.join(folder_path,file)
decodeobj = decode(path_file)
# print(decodeobj ,)
try:
bar_code = str(decodeobj[0].data)
bar_code = bar_code[2:-1]
print(bar_code, "path of file:",path_file, "\n")
bar_code_list.append(bar_code)
except IndexError:
print("Bar code not found")
decodeobj_op.append(decodeobj)
if bar_code == 0:
os.remove(file)
unnamed = list(set(barcode_samples)-set(bar_code_list))
print(bar_code_list)
print(len(bar_code_list))
print(unnamed)
return "Process done"
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,239
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/ignite_demo.py
|
import cv2
import shutil
import time
from os.path import join as pjoin
from os import listdir
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import pyzbar.pyzbar as pyzbar
import joblib
from reverse_image_features import *
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from azure.storage.blob import BlockBlobService
def train(csvpath):
df_feats, Y = csv_to_knn(csvpath)
y_dump = csvpath.split("/")[-1][:-4]
joblib.dump(Y, 'data/ignite_demo/Y_' + y_dump + '.pkl')
path = 'data/ignite_demo/knn_' + y_dump + '.pkl'
knnmodel(df_feats, path)
def csv_to_knn(csvpath):
df = pd.read_csv(csvpath, header=None) # 2569, 2049
# print(df.head())
Y = df[0]
df_feats = df[[_ for _ in df.columns if _ != 0]].values.astype(np.float)
# print(df_feats.shape)
return df_feats, Y
def make_csvs():
"""
Takes a list of folders and generates csv files with image name and Resnet50 features.
:return: Nothing
"""
encode_folder('data/ignite_demo/all_images', 'data/ignite_demo/all_images.csv')
encode_folder('data/ignite_demo/DisposableGloves', 'data/ignite_demo/DisposableGloves.csv')
encode_folder('data/ignite_demo/ScrewDriver', 'data/ignite_demo/ScrewDriver.csv')
# make_csvs()
def knnmodel(df_feats, path):
"""
Training a Knn model
:param df_feats:
:param path:
:return:
"""
knn = NearestNeighbors(n_neighbors=10, n_jobs=-2, algorithm='ball_tree')
knn.fit(df_feats)
joblib.dump(knn, path)
def get_neighbors(knn, imagepath):
test_img_features = get_features(imagepath)
neighbors = knn.kneighbors(test_img_features, return_distance=True)
return neighbors
def get_names(Y, neighbors,localbool):
result = []
for each in neighbors[1].flatten():
result.append(Y.tolist()[each])
# this is needed only for demo via chrome:
if localbool:
shutil.copy(pjoin('data/ignite_demo/all_images', result[-1]), 'static/output')
# print(result)
return result
if not os.path.exists('data/ignite_demo/knn_all_images.pkl'):
print("Training")
train('data/ignite_demo/all_images.csv')
knn = joblib.load('data/ignite_demo/knn_all_images.pkl')
Y = joblib.load('data/ignite_demo/Y_all_images.pkl')
def azure_detect_object(img_path):
"""
detect gloves and screwdriver in given image
:param img_path: Image path
:return:
"""
res = {}
template_prediction_key = "54c71598e9434d5fa7853360c4a9e4ce"
template_project_id = "54e7f828-d0c8-49d3-8802-9b402612b7c7"
template_iteration_name = "Iteration2"
template_prediction_endpoint = "https://southeastasia.api.cognitive.microsoft.com"
predictor = CustomVisionPredictionClient(template_prediction_key, endpoint=template_prediction_endpoint)
with open(img_path, "rb") as image_contents:
results = predictor.detect_image(
template_project_id,
template_iteration_name,
image_contents.read(),
custom_headers={'Content-Type': 'application/octet-stream'})
res = []
for prediction in results.predictions:
res.append(
(prediction.tag_name,
prediction.probability,
prediction.bounding_box.left,
prediction.bounding_box.top,
prediction.bounding_box.width,
prediction.bounding_box.height))
return res
def draw_(imgpath, res, crops_path):
"""
draw and crop the image based on Azure detection
:param imgpath: imagepath
:param res: detection coords
:return:
"""
ims = []
raw_im = cv2.imread(imgpath)
for i, each in enumerate(res):
tag, pa, x, y, w, h = list(each)
if pa < 0.43:
continue
img = cv2.imread(imgpath)
wi, he = img.shape[0], img.shape[1]
crop = img[int(y * wi): int((y + h) * wi), int(x * he):int((x + w) * he)]
croppath = pjoin(crops_path,'crop'+ str(time.time()).replace(".","_") + "_" + str(i) + '.jpg')
cv2.imwrite(croppath, crop)
# print((int(x * he), int(y * wi)), (int((x + w) * he), int((y + h) * wi)))
cv2.rectangle(img, (int(x * he), int(y * wi)), (int((x + w) * he), int((y + h) * wi)), (255, 0, 0), 2)
cv2.rectangle(raw_im, (int(x * he), int(y * wi)), (int((x + w) * he), int((y + h) * wi)), (255, 0, 0), 2)
# cv2.imshow('res' + str(i), img)
detect_path = "/".join(crops_path.split("/")[:-1])
decpath = pjoin(detect_path,'detect/dec' + str(i) + '.jpg')
# print(tag, pa)
# print(img.shape)
cv2.imwrite(decpath, img)
ims.append([tag, pa, decpath])
alldetect_path = 'static/input/in_dec.jpg'
cv2.imwrite(alldetect_path, raw_im)
# cv2.waitKey()
# cv2.destroyAllWindows()
return ims
def driver_crop_image(img_path,crops_path,filename,localbool=0,out_path=''):
res = azure_detect_object(img_path)
draw_(img_path, res,crops_path)
results = []
if len(os.listdir(crops_path))==0:
if localbool :
res = get_names(Y, get_neighbors(knn, img_path),localbool)
results = [{'crop_img': 'input/' + filename, 'out_img': ['output/' + e for e in res]}]
else:
res = get_names(Y, get_neighbors(knn, img_path), localbool)
results = [{'crop_img': 'input/' + filename, 'out_img': res}]
else :
for each in os.listdir(crops_path):
result = {}
result['crop_img'] = 'crops/' + each
res = get_names(Y, get_neighbors(knn, pjoin(crops_path,each)),localbool)
result['out_img'] = res #['output/' + e for e in res]
results.append(result)
return results
def decode(img_path):
image = cv2.imread(img_path)
decodedObjects = pyzbar.decode(image)
return decodedObjects
# driver_class_whole_image('data/ignite_demo/ScrewDriver/screwdriver10.jpg')
# driver_class_whole_image('data/ignite_demo/DisposableGloves/gloves7.jpg')
# driver_crop_image('data/ignite_demo/ScrewDriver/screwdriver3.jpg')
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,240
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/misc/get_preloads.py
|
#from misc import *
# from misc_imports import *
from os.path import join as pjoin
from os import environ
from azure.storage.blob import BlockBlobService
BLOB_ACCOUNT_NAME = environ['BLOB_ACCOUNT_NAME']
BLOB_ACCOUNT_PASS = environ['BLOB_ACCOUNT_PASS']
block_blob_service = BlockBlobService(
account_name=BLOB_ACCOUNT_NAME,
account_key=BLOB_ACCOUNT_PASS)
preloads_path = 'data/preloads' # when seen from root path.
print('downloading preloads from blob')
for blob in block_blob_service.list_blobs('contractspreloads'):
#print(blob.name)
block_blob_service.get_blob_to_path(
'contractspreloads', blob.name, pjoin(preloads_path, blob.name))
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,241
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/ignite_demo_app.py
|
import json
from ignite_demo import *
from flask import Flask, request, render_template, jsonify
from azure.storage.blob import BlockBlobService
import time
import os
import pandas as pd
from search_utils import *
try:
from config import *
print('getting variables from config.py')
except BaseException:
BLOB_ACCOUNT_NAME = os.environ['BLOB_ACCOUNT_NAME']
BLOB_ACCOUNT_PASS = os.environ['BLOB_ACCOUNT_PASS']
BLOB_CONTAINER_NAME = os.environ['BLOB_CONTAINER_NAME']
print('getting environ variable')
# Blob service to get the files from
block_blob_service = BlockBlobService(
account_name=BLOB_ACCOUNT_NAME,
account_key=BLOB_ACCOUNT_PASS)
# blob container name
container_name = BLOB_CONTAINER_NAME
app = Flask(__name__, template_folder='templates')
App_path = os.path.dirname(os.path.abspath(__file__))
TEMP_PATH = os.path.join(App_path, 'temp')
if not os.path.exists(TEMP_PATH):
os.mkdir(TEMP_PATH)
data_path = os.path.join(App_path, 'data')
def make_df(filepath='data/ignite_demo/catalog.xlsx'):
df = pd.read_excel(filepath)
df['long description'] = df[df.columns[1:-1]].apply(lambda x: '$$$'.join(x.dropna().astype(str)), axis=1)
df = df[['Image', 'long description']]
return df
@app.route('/', methods=["POST", "GET"])
def home():
return render_template('home.html')
@app.route('/upload', methods=["POST", "GET"])
def upload():
try:
clean_one_dir('static')
makedirs()
except BaseException:
pass
global graph
static_path = pjoin(App_path, 'static')
input_dir = pjoin(static_path, 'input')
try:
upload = request.files.getlist("file")[0]
except IndexError:
return render_template('home.html')
print(upload.filename)
filename = upload.filename
if not allowedfile(filename):
return render_template('home.html')
filepath = pjoin(input_dir, filename)
upload.save(filepath)
localbool = 1
with graph.as_default():
results = driver_crop_image(filepath, 'static/crops', filename, localbool)
for each_result in results:
each_result['out_img'] = ['output/' + e for e in each_result['out_img']]
return render_template('ignite_dyn.html', infile='input/' + filename, seq=results)
@app.route('/decoder', methods=["POST", "GET"])
def decoder():
if len(os.listdir('static')) != 0:
cleandir()
makedirs()
try:
upload = request.files.getlist("file")[0]
except IndexError:
return render_template('home.html')
print(upload.filename)
static_path = pjoin(App_path, 'static')
input_dir = pjoin(static_path, 'input')
filename = upload.filename
if not allowedfile(filename):
return render_template('home.html')
filepath = pjoin(input_dir, filename)
upload.save(filepath)
decodeobj = decode(filepath)
if len(decodeobj) == 0:
decodeobj = [{'type': 'Barcode not found in image', 'data': 'Barcode not found in image'}]
return render_template('decoder.html', infile='input/' + filename, obj=decodeobj)
@app.route('/blob_upload', methods=["POST", "GET"])
def blob_upload():
global graph
folder_name_inst = pjoin('temp', 'temp_' + str(time.time()).replace(".", "_"))
in_path, out_path, _, detect_path, crops_path = makedirs_uniq(folder_name_inst)
got_json = request.json
'''
Sample input Json
{
"ZipId":"1555658192865",
"FileName":"gloves100.jpg",
"BpcCode":99999,
"UserId":0
}
'''
try:
zip_id = got_json['ZipId']
BPC_codes = got_json['BpcCode']
filename = got_json['FileName']
# Rkey_list = got_json['FileList']
user_id = got_json['UserId']
# user_name = got_json['UserName']
for key, val in got_json.items():
if not val:
return jsonify({"error": "Missing " + str(key)}), 400
except KeyError:
clean_one_dir(folder_name_inst)
return jsonify({"error": "Missing required parameters"}), 400
except TypeError:
return jsonify({"error": "Missing required parameters"}), 400
blob_file_path = "bpc/" + str(BPC_codes) + '/' + str(zip_id) + '/image/' + filename
print(blob_file_path)
local_filepath = pjoin(in_path, filename)
try:
block_blob_service.get_blob_to_path(
container_name, blob_file_path, local_filepath)
except BaseException:
clean_one_dir(folder_name_inst)
return jsonify({"error": "File not found in Blob"}), 400
with graph.as_default():
results = driver_crop_image(local_filepath, crops_path, filename, localbool=0, out_path=out_path)
for each in results:
crop_filename = each['crop_img']
blob_crop_path = "bpc/" + str(BPC_codes) + '/' + str(zip_id) + '/' + crop_filename
# uploading file to blob
block_blob_service.create_blob_from_path(container_name, blob_crop_path, pjoin(folder_name_inst, crop_filename))
clean_one_dir(folder_name_inst)
return jsonify(results), 200
@app.route('/blob_decoder', methods=["POST", "GET"])
def blob_decoder():
got_json = request.json
if len(os.listdir('static')) != 0:
cleandir()
try:
zip_id = got_json['ZipId']
BPC_codes = got_json['BpcCode']
filename = got_json['FileName']
# Rkey_list = got_json['FileList']
user_id = got_json['UserId']
# user_name = got_json['UserName']
for key, val in got_json.items():
if not val:
return jsonify({"error": "Missing " + str(key)}), 400
except KeyError:
return jsonify({"error": "Missing required parameters"}), 400
except TypeError:
return jsonify({"error": "Missing required parameters"}), 400
folder_name_inst = pjoin('temp', 'temp_' + str(time.time()).replace(".", "_"))
in_path, out_path, _, detect_path, crops_path = makedirs_uniq(folder_name_inst)
'''
Sample input Json
{
"ZipId":"1555658192865",
"FileName":"gloves100.jpg",
"BpcCode":99999,
"UserId":0
}
'''
blob_file_path = "bpc/" + str(BPC_codes) + '/' + str(zip_id) + '/barcode/' + filename
local_filepath = pjoin(in_path, filename)
try:
block_blob_service.get_blob_to_path(
container_name, blob_file_path, local_filepath)
except BaseException:
return jsonify({"error": "File not found in Blob"}), 400
with graph.as_default():
results = decode(local_filepath)
try:
bar_code = str(results[0].data)
except IndexError:
return jsonify({"error": "Bar code not found"}), 400
bar_code = bar_code[2:-1]
clean_one_dir(folder_name_inst)
df = make_df()
res = search_text(bar_code, df)
return jsonify({'input': bar_code, 'output': res}), 200
@app.route('/text_search', methods=["POST", "GET"])
def se_text():
# print(make_df())
got_json = request.json
# got_json = {"text" : ""}
inp_text = got_json["Text"]
df = make_df()
res = search_text(inp_text, df)
return jsonify({'input': inp_text, 'output': res}), 200
def allowedfile(name):
# print(name[-3:])
return name.split()[-1].lower() in ['jpg', 'jpeg', 'png']
def makedirs_uniq(path):
folders = ['input', 'output', 'inputfiles', 'detect', 'crops']
if not os.path.exists(path):
os.mkdir(path)
for foldername in folders:
if not os.path.exists(pjoin(path, foldername)):
os.mkdir(pjoin(path, foldername))
return pjoin(
path, 'input'), pjoin(
path, 'output'), pjoin(
path, 'inputfiles'), pjoin(
path, 'detect'), pjoin(
path, 'crops')
def makedirs():
folders = ['input', 'output', 'inputfiles', 'detect', 'crops']
if not os.path.exists('static'):
os.mkdir('static')
for foldername in folders:
if not os.path.exists(pjoin('static', foldername)):
os.mkdir(pjoin('static', foldername))
def cleandir(foldername=''):
try:
shutil.rmtree('static/input')
shutil.rmtree('static/output')
shutil.rmtree('static/inputfiles')
shutil.rmtree('static/detect')
shutil.rmtree('static/crops')
except BaseException:
pass
def clean_one_dir(foldername):
try:
shutil.rmtree(foldername)
except BaseException:
pass
def api():
app.run(debug=True, host="0.0.0.0", port=4050, threaded=True)
if __name__ == '__main__':
api()
# pass
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,242
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/search_utils.py
|
def match_strength(longdesc, text_str):
text_list = text_str.split('$$$')
num_of_attribs = len(text_list)
in_count = 0
for attrib in text_list:
attrib = attrib.replace(" ", "")
attrib = attrib.replace(",", "")
longdesc = longdesc.replace(" ", "")
longdesc = longdesc.replace(",", "")
if attrib.upper() in longdesc.upper():
in_count = in_count + 1
return in_count / num_of_attribs
def max_match(match_dict):
return max(match_dict, key=match_dict.get)
def search_text(text,df):
"""
takes text as input and resturns matched key
:param text:
:param df:
:return:
"""
distance_dict = {}
for i, each_sent in enumerate(df['long description'].tolist()):
distance_dict[df.loc[i]['Image']] = match_strength(each_sent, text)
match_key = max_match(distance_dict)
print(match_key)
res = [match_key]
return res
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,243
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/misc/barcode_scannertest.py
|
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
from os import listdir
from os.path import join as pjoin
def decode(img_path):
image = cv2.imread(img_path)
decodedObjects = pyzbar.decode(image)
for obj in decodedObjects:
print("Type:", obj.type)
print("Data: ", obj.data, "\n")
# cv2.imshow("Frame", image)
# cv2.waitKey(0)
def test(folderpath):
for each in listdir(folderpath):
print(each)
decode(pjoin(folderpath,each))
print("________________")
# test(folderpath="../data/barcode_samples")
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,244
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/reverse_image_features.py
|
import os
import keras
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
from keras.preprocessing import image
from keras.models import Model
from keras.models import load_model
import tensorflow as tf
import random
import time
import numpy as np
# from deployment import graph
if not os.path.exists('data/pre_trained_resnet50.h5'):
# model = keras.applications.vgg16(weights='imagenet',include_top=True)
model = keras.applications.ResNet50(weights='imagenet',include_top=True)
#model.summary()
model.save('data/pre_trained_resnet50.h5')
else:
model = load_model('data/pre_trained_resnet50.h5')
graph = tf.get_default_graph()
def load_image(path):
img = image.load_img(path,target_size=model.input_shape[1:3])
x = image.img_to_array(img)
x = np.expand_dims(x,axis=0)
x = preprocess_input(x)
return img,x
def feature_extract(model=model):
feat_extractor = Model(inputs=model.input, outputs=model.get_layer("avg_pool").output)
#feat_extractor.summary()
return feat_extractor
def get_features(img_path):
img, x = load_image(img_path)
#print("shape of x: ", x.shape)
model_1 = feature_extract()
feats = model_1.predict(x)
# print(y.shape)
return feats
def encode_folder(folder_path,filepath):
for each_file in sorted(os.listdir(folder_path)) :
with open(filepath,'a') as f:
f.write(str(each_file)+','+ ",".join([str(x) for x in get_features(os.path.join(folder_path,each_file)).reshape((2048,))])+'\n')
print(each_file)
# if __name__ == '__main__':
# get_features('data/045.computer-keyboard/045_0003.jpg')
# encode_folder('/data/visualsearch/unclean_data/caltech_256/all_imgs','data/encode.csv')
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,245
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/__init__.py
|
import os
import cv2 as cv
from flask import Flask,request,render_template
from os.path import join as pjoin
import keras
from keras import backend as K
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import joblib
import shutil
datapath = '/data/visualsearch'
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,246
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/misc/main.py
|
from __init__ import *
from keras_applications import resnet50
img_width =224
img_height = 224
def read_img_file(img_path):
img = Image.open(img_path)
if img.mode != 'RGB':
img = img.convert('RGB')
return img
def resize_img_to_array(img, img_shape=(224, 224)):
img_array = np.array(
img.resize(
img_shape
)
)
return img_array
def make_resnet_model(input_shape=[224,224,3]):
model = ResNet50(input_shape=input_shape,
weights='imagenet',
include_top=False)
for layer in model.layers:
layer.trainable = False
return model
def get_conv_feat(f, model):
img = read_img_file(f)
np_img = resize_img_to_array(img, img_shape=(img_width, img_height))
X = preprocess_input(np.expand_dims(np_img, axis=0).astype(np.float))
X_conv = model.predict(X)
X_conv_2d = X_conv[0].flatten()
return X_conv_2d
def test(path):
x=read_img_file(path)
np_img=resize_img_to_array(x)
#print(x.shape)
model = make_resnet_model(input_shape=[224, 224, 3])
X = preprocess_input(np.expand_dims(np_img, axis=0).astype(np.float))
X_conv = model.predict(X)
print(X_conv.shape)
pass
if __name__ == '__main__':
#print(os.listdir(datapath))
#x=get_conv_feat('data/045.computer-keyboard/045_0001.jpg',make_resnet_conv())
#print(x.shape)
make_resnet_model().summary()
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,247
|
cerofrais/reverseImageSearch
|
refs/heads/master
|
/app.py
|
from __init__ import *
from test import *
from yolov3_first import *
app = Flask(__name__,template_folder='templates')
App_path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(App_path,'data')
@app.route('/')
def home():
return render_template('home.html')
@app.route('/upload', methods=["POST","GET"])
def upload():
if len(os.listdir('static')) != 0:
cleandir()
global graph
with graph.as_default():
if not os.path.exists('static/input'):
os.mkdir('static/input')
if not os.path.exists('static/output'):
os.mkdir('static/output')
if not os.path.exists('static/inputfiles'):
os.mkdir('static/inputfiles')
static_path = pjoin(App_path, 'static')
input_dir = pjoin(static_path,'input')
#for upload in request.files.getlist("file"):
#print(request)
upload = request.files.getlist("file")[0]
print(upload.filename)
filename = upload.filename
if not allowedfile(filename):
return render_template('home.html')
filepath = pjoin(input_dir,filename)
# print(zz)
upload.save(filepath)
yolov3_detect(filepath)
results = []
for each in os.listdir('static/inputfiles'):
result = {}
result ['in_img'] = 'inputfiles/'+each
result ['out_img'] = ['output/'+e for e in get_names(get_neighbors(knn,pjoin('static/inputfiles',each)))]
results.append(result)
# result[each] = get_names(get_neighbors(knn,pjoin('inputfiles',each)))
print(results)
# result = get_names(get_neighbors(knn, filepath))
# result1 = result['crop0.jpg']
# return render_template('dynamic.html', img_1 = 'input/'+filename, img_2 = 'output/'+result1[0],\
# img_3 = 'output/'+result1[1],img_4 = 'output/'+result1[2], strout = " \n ".join(result))
return render_template('dyn.html', infile= 'input/'+filename, seq = results)
def allowedfile(name):
#print(name[-3:])
return name[-3:] in ['jpg','jpeg','png']
def cleandir():
try :
shutil.rmtree('static/input')
shutil.rmtree('static/output')
shutil.rmtree('static/inputfiles')
except:
pass
def api():
app.run(debug = True, host = "0.0.0.0", port = 4040)
if __name__ == '__main__':
api()
|
{"/test.py": ["/__init__.py", "/reverse_image_features.py"], "/ignite_demo.py": ["/reverse_image_features.py"], "/ignite_demo_app.py": ["/ignite_demo.py", "/search_utils.py"], "/misc/main.py": ["/__init__.py"], "/app.py": ["/__init__.py", "/test.py"]}
|
41,248
|
bchao1/minirenderer
|
refs/heads/master
|
/main.py
|
from .renderer.parser import
|
{"/gif.py": ["/renderer/renderer.py"], "/renderer/renderer.py": ["/renderer/canvas.py", "/renderer/parser.py"]}
|
41,249
|
bchao1/minirenderer
|
refs/heads/master
|
/renderer/geometry.py
|
import numpy as np
class Vertex:
def __init__(self, x, y, z, id):
self._x = x
self._y = y
self._z = z
self._id = id
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z
@property
def c(self):
return self._c
@property
def id(self):
return self._id
@property
def coor2D(self):
return (self._x, self._y)
@property
def coor3D(self):
return (self._x, self._y, self._z)
def perspective2D(self, c):
"""
c is the camera z position (in pixels).
Default use coor2D where c = inf.
"""
return (self.x/(1-self.z/c), self.y/(1-self.z/c))
def __sub__(self, v):
return np.subtract(self.coor3D, v.coor3D)
class Face:
_vertices = None
def __init__(self, vids):
self._vids = vids
@property
def vids(self):
return self._vids
@property
def zmean(self):
""" Simple wrap-around for painter's algorithm """
return np.mean([self._vertices[vid].z for vid in self._vids])
@property
def normal(self):
v0 = self._vertices[self._vids[0]]
v1 = self._vertices[self._vids[1]]
v2 = self._vertices[self._vids[2]]
l0 = v1 - v0
l1 = v2 - v0
n = np.cross(l0, l1)
norm = np.sqrt(np.sum(n ** 2)) + 1e-20 # guard
return n / norm
def dist(t):
return t[1] - t[0]
def normalize(v):
norm = np.sqrt(np.sum(v ** 2)) + 1e-20
return v / norm
def lookAt(camera, center, up):
camera = np.array(camera)
center = np.array(center)
up = np.array(up)
z = normalize(camera - center)
x = normalize(np.cross(up, z))
y = normalize(np.cross(z, x))
m_inv = np.eye(4)
tr = np.eye(4)
for i in range(3):
m_inv[0][i] = x[i]
m_inv[1][i] = y[i]
m_inv[2][i] = z[i]
tr[i][3] = -center[i]
return np.matmul(m_inv, tr)
if __name__ == '__main__':
v = Vertex(0, 0, 0, 0)
|
{"/gif.py": ["/renderer/renderer.py"], "/renderer/renderer.py": ["/renderer/canvas.py", "/renderer/parser.py"]}
|
41,250
|
bchao1/minirenderer
|
refs/heads/master
|
/renderer/parser.py
|
import math
import numpy as np
from . import geometry
class WireframeParser:
def __init__(self):
pass
def readfile(self, filename):
self._vertices = {}
self._faces = []
self.coors = []
with open(filename, 'r') as file:
for line in file:
if not line.strip():
continue
line = line.strip().split(' ')
line = [s for s in line if len(s) > 0]
if line[0] == 'v':
coor = [float(c) for c in line[1:]] # x, y, z coordinates
coor.append(1) # dummy 4-th dimension
self.coors.append(coor)
elif line[0] == 'f':
vs = [int(v.split('/')[0]) for v in line[1:]]
f = geometry.Face(vs)
self._faces.append(f)
self.coors = np.stack(self.coors) # raw coordinates
def adjust_camera(self, camera, center, up):
view = geometry.lookAt(camera, center, up)
print(view)
self.camera_coors = np.transpose(np.matmul(view, np.transpose(self.coors)))
print(self.camera_coors.shape)
self._min_x, self._min_y = math.inf, math.inf
self._max_x, self._max_y = -math.inf, -math.inf
for i, coor in enumerate(self.camera_coors):
vid = i + 1
v = geometry.Vertex(*coor[:-1], vid)
self._vertices[vid] = v
self._min_x, self._max_x = min(self._min_x, v.x), max(self._max_x, v.x)
self._min_y, self._max_y = min(self._min_y, v.y), max(self._max_y, v.y)
geometry.Face._vertices = self._vertices
self.faces.sort(key = lambda f: f.zmean)
@property
def vertices(self):
return self._vertices
@property
def faces(self):
return self._faces
@property
def x_range(self):
return (self._min_x, self._max_x)
@property
def y_range(self):
return (self._min_y, self._max_y)
def get_canvas_size(self, scale):
def d(t):
return t[1] - t[0]
return (int(scale * d(self.x_range)), int(scale * d(self.y_range)))
def test():
wf = WireframeParser()
wf.readfile('../examples/airboat.obj')
print(wf.y_range)
if __name__ == '__main__':
test()
|
{"/gif.py": ["/renderer/renderer.py"], "/renderer/renderer.py": ["/renderer/canvas.py", "/renderer/parser.py"]}
|
41,251
|
bchao1/minirenderer
|
refs/heads/master
|
/renderer/canvas.py
|
from PIL import Image, ImageDraw, ImageOps
class WireframeCanvas:
def __init__(self, imsize, fg_color, bg_color):
self.imsize = imsize
self.fg_color = fg_color
self.bg_color = bg_color
self.img = Image.new('RGB', self.imsize, self.bg_color)
self.draw = ImageDraw.Draw(self.img)
def drawline(self, c1, c2):
self.draw.line([c1, c2], fill=self.fg_color)
def drawpoly(self, coors, outline=None, fill=None):
self.draw.polygon(coors, outline=outline, fill=fill)
def postprocess(self):
self.img = ImageOps.flip(self.img)
self.img = ImageOps.expand(self.img, border=10, fill=self.bg_color)
def show(self):
self.img.show()
def save(self, path):
self.img.save(path)
def close(self):
self.img.close()
@property
def size(self):
return self.img.size
def test():
wf = WireframeCanvas()
wf.drawline((100, 350), (150, 300))
wf.show()
if __name__ == '__main__':
test()
|
{"/gif.py": ["/renderer/renderer.py"], "/renderer/renderer.py": ["/renderer/canvas.py", "/renderer/parser.py"]}
|
41,252
|
bchao1/minirenderer
|
refs/heads/master
|
/renderer/colors.py
|
import random
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
BG = (186, 218, 219)
def rand_color():
return (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))
def color_grad(color, intensity):
return tuple(int(intensity * c) for c in color)
|
{"/gif.py": ["/renderer/renderer.py"], "/renderer/renderer.py": ["/renderer/canvas.py", "/renderer/parser.py"]}
|
41,253
|
bchao1/minirenderer
|
refs/heads/master
|
/gif.py
|
import numpy as np
from renderer.renderer import WireframeRenderer
from PIL import ImageOps
if __name__ == '__main__':
mode = 'triangle'
obj = 'bunny'
inpath = './examples/{}.obj'.format(obj)
outpath = './images/{}_{}.png'.format(obj, mode)
wf = WireframeRenderer()
wf.readfile(inpath)
im_list = []
max_w, max_h = -np.inf, -np.inf
for i in range(72):
r = 100
theta = 5 * i
rad = np.pi * theta / 180
camera = [r * np.cos(rad), 0, r * np.sin(rad)]
center = [0, 0, 0]
up = [0, 1, 0]
canvas = wf.render(mode, camera, center, up)
w, h = canvas.size
max_w = max(w, max_w)
max_h = max(h, max_h)
im_list.append(canvas.img)
im_list = [ImageOps.pad(img, (max_w, max_h), color=(255,255,255)) for img in im_list]
im_list[0].save('images/{}_render.gif'.format(obj), save_all=True, append_images=im_list[1:], duration=100, loop=0)
|
{"/gif.py": ["/renderer/renderer.py"], "/renderer/renderer.py": ["/renderer/canvas.py", "/renderer/parser.py"]}
|
41,254
|
bchao1/minirenderer
|
refs/heads/master
|
/renderer/renderer.py
|
import numpy as np
from .canvas import WireframeCanvas
from .parser import WireframeParser
from . import geometry
from . import colors
class WireframeRenderer:
def __init__(self,
imsize=1000,
fg_color=colors.WHITE,
bg_color=colors.WHITE
):
self.parser = WireframeParser()
self.imsize = imsize
self.fg_color = fg_color
self.bg_color = bg_color
self.canvas = None
def rescale(self, c, c_range):
cmin, cmax = c_range
return 1.0 * self.scale * (c - cmin)
def get_scale(self, x_range, y_range):
dx = geometry.dist(x_range)
dy = geometry.dist(y_range)
sz = max(dx, dy)
scale = 1.0 * self.imsize / sz
return scale
def readfile(self, infile):
self.parser.readfile(infile)
def adjust_camera(self, camera, center, up):
self.parser.adjust_camera(camera, center, up)
def render(self, mode, camera, center, up):
# compute camera coordinates
self.adjust_camera(camera, center, up)
self.scale = self.get_scale(self.parser.x_range, self.parser.y_range)
self.canvas_size = self.parser.get_canvas_size(self.scale)
self.canvas = WireframeCanvas(self.canvas_size, self.fg_color, self.bg_color)
light_dir = np.array([0, 0, 1]) # light direction, into the frame
for f in self.parser.faces:
f_canvas = [] # face coordinates to draw on canvas
n = f.normal # compute normal vector of face
I = np.dot(n, light_dir) # compute color intensity of polygons
for vid in f.vids:
x, y = self.parser.vertices[vid].coor2D # world coordinates
x_canvas = self.rescale(x, self.parser.x_range) # transform to canvas coordinates
y_canvas = self.rescale(y, self.parser.y_range)
f_canvas.append((x_canvas, y_canvas))
if mode == 'wireframe':
self.canvas.drawpoly(f_canvas, outline=colors.BLACK)
elif mode == 'triangle':
self.canvas.drawpoly(f_canvas, fill=colors.color_grad(colors.WHITE, I))
self.canvas.postprocess()
return self.canvas
|
{"/gif.py": ["/renderer/renderer.py"], "/renderer/renderer.py": ["/renderer/canvas.py", "/renderer/parser.py"]}
|
41,258
|
KirillovProj/Ant-optimizing-algorithm
|
refs/heads/master
|
/area_generation.py
|
import random
import matplotlib.pyplot as plt
import numpy as np
'''
Generate random points on a plane. rangeX and rangeY set plane's width and length.
'''
def generate(quantity, rangeX=int, rangeY=int):
points = []
for i in range(quantity):
x = random.randrange(0, rangeX)
y = random.randrange(0, rangeY)
points.append((x,y))
return np.array(points)
'''
Matrix of distance between every single generated point
'''
def get_distance_matrix(generated):
return np.array([[np.linalg.norm(i - j) for j in generated] for i in generated])
'''
Shows plane with points, they're annotated by indexes and connected with lines reflecting the best path.
'''
def show_map(coordinates_list, best_path):
x,y = (zip(*coordinates_list))
plt.scatter(*zip(*coordinates_list))
for i, txt in enumerate (list(range(len(coordinates_list)))):
plt.annotate(txt,(x[i],y[i]))
for edge in range(len(best_path)):
try:
plt.plot([x[best_path[edge]],x[best_path[edge+1]]],[y[best_path[edge]],y[best_path[edge+1]]],'k-')
except IndexError:
pass
plt.show()
|
{"/main.py": ["/area_generation.py"]}
|
41,259
|
KirillovProj/Ant-optimizing-algorithm
|
refs/heads/master
|
/main.py
|
from area_generation import generate, show_map, get_distance_matrix
import random
import numpy as np
quantity = int(input('Input quantity of points: '))
max_length = int(input('Input max length between points: '))
iterations = int(input('Input number of iterations: '))
class Map:
'''
initialize our optimization system. Ant count is set to points' quantity by default,
evaporation rate means how fast pheromones evaporate 0<x<1. Less means faster.
Alpha and beta are coefficients that can change the weights of two key factors:
alpha is responsible for pheromone, beta is responsible for distance.
Starting pheromones and pheromones_per_run are pretty straightforward.
Different problems will require different values for these keys.
'''
def __init__(self, ant_count=quantity, evap_rate=0.6, starting_pheromone=0.5, alpha=1, beta=4, pheromones_per_run=100):
self.nodes = generate(quantity,max_length,max_length)
self.distances = get_distance_matrix(self.nodes)
self.pheromones = np.ones((quantity, quantity))*starting_pheromone
self.evaporation_rate = evap_rate
self.ant_count = ant_count
self.alpha = alpha
self.beta = beta
self.best_length = 0
self.phero_per_run = pheromones_per_run
'''
After every run-through pheromones evaporate, then most popular edges receive an additional batch of phero
'''
def evaporation(self):
self.pheromones*=self.evaporation_rate
self.pheromones+=self.pheromones_batch
'''
First we initialize phero batch matrix with zeros. Then we unleash ants. After every single ant we're adding
pheromones they secrete to the batch. When all ants within iteration done running, we start
the evaportation process.
'''
def iteration(self):
self.pheromones_batch = np.zeros((quantity,quantity))
for ant in range(self.ant_count):
self.get_ant_starting_pos()
self.choose_path()
pheromones_to_add = self.phero_per_run/self.path_length
for edge in self.path:
self.pheromones_batch[edge[0]][edge[1]]+=pheromones_to_add
self.evaporation()
'''
Simple func that gets new position for every ant. Because when all ants start from the same
position they may not find some good shortcuts even after many iterations.
'''
def get_ant_starting_pos(self):
self.position = random.randint(0, quantity - 1)
'''
At the start we initialize movement and path lists. Former will take indexes of points that ant run through,
latter will take tuples of indexes for paths, this helps us understand where to put pheromones.
Path_length will calculate overall length for the record.
We make list of indexes of available nodes and then immediately remove our starting point from it.
'''
def choose_path(self):
self.movement, self.path = [], []
self.path_length = 0
available_nodes = list(range(quantity))
available_nodes.remove(self.position)
self.movement.append(self.position)
'''
While there're still nodes in our list we will calculate probabilities of choosing any available path
(minus the nodes ant has already visited). It uses standard ant optimization formula that calculates
probability via pheromones on edges and distance between points.
Than ant chooses a path using random choice with weights. We update all linked variables.
'''
while available_nodes:
probabilities = []
denominator = sum((self.pheromones[self.position][i]**self.alpha)*((100/self.distances[self.position][i])**self.beta) for i in available_nodes)
for node in available_nodes:
numerator = (self.pheromones[self.position][node]**self.alpha)*((100/self.distances[self.position][node])**self.beta)
probabilities.append(numerator/denominator)
choice = random.choices(population=available_nodes, weights=probabilities)[0]
self.path_length+=self.distances[self.position][choice]
self.movement.append(choice)
self.path.append((self.position,choice))
self.position = choice
available_nodes.remove(choice)
'''
If that's our first run-through or a run became more optimized, we rewrite our data for the record.
'''
if not self.best_length or self.path_length<self.best_length:
self.best_length = self.path_length
self.best_path = self.movement
x = Map()
for i in range(iterations):
x.iteration()
print(f'Best path: {x.best_path}')
print(f'Best length: {x.best_length}')
show_map(x.nodes, x.best_path)
|
{"/main.py": ["/area_generation.py"]}
|
41,265
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/items.py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JobItem(scrapy.Item):
url = scrapy.Field()
title = scrapy.Field()
company_name = scrapy.Field()
experience_requirements = scrapy.Field()
location = scrapy.Field()
description = scrapy.Field()
salary = scrapy.Field()
industry = scrapy.Field()
role = scrapy.Field()
address = scrapy.Field()
telephone = scrapy.Field()
email_id = scrapy.Field()
recruiter_name = scrapy.Field()
reference_id = scrapy.Field()
website = scrapy.Field()
posted_date = scrapy.Field()
contact_dump = scrapy.Field()
premium = scrapy.Field()
google_address = scrapy.Field()
place_id = scrapy.Field()
international_phone_number = scrapy.Field()
formatted_phone_number = scrapy.Field()
google_url = scrapy.Field()
website = scrapy.Field()
latitude = scrapy.Field()
longitude = scrapy.Field()
class CandidatescraperItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
mobile = scrapy.Field()
location = scrapy.Field()
sector = scrapy.Field()
source = scrapy.Field()
date = scrapy.Field()
class SectorItem(scrapy.Item):
date = scrapy.Field()
title = scrapy.Field()
location = scrapy.Field()
description = scrapy.Field()
company_name = scrapy.Field()
contact_person_name = scrapy.Field()
number = scrapy.Field()
position = scrapy.Field()
role = scrapy.Field()
portal = scrapy.Field()
google_address = scrapy.Field()
google_phone_number = scrapy.Field()
station = scrapy.Field()
email = scrapy.Field()
url = scrapy.Field()
class ZaubaItem(scrapy.Item):
CIN = scrapy.Field()
CompanyName = scrapy.Field()
RoC = scrapy.Field()
RegistrationNumber = scrapy.Field()
DateofIncorporation = scrapy.Field()
Email = scrapy.Field()
Website = scrapy.Field()
Address = scrapy.Field()
Directors = scrapy.Field()
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,266
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/BabaJob_Mumbai_Teacher.py
|
import time
import scrapy
import logging
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders.init import InitSpider
from scrapy.http import Request, FormRequest
from scrapy.spiders import Rule
from crawler.items import CandidatescraperItem
class BabaJobSpider(InitSpider):
name = "babajob_mumbai_teacher"
allowed_domains=["babajob.com"]
login_page = 'http://www.babajob.com/login'
start_urls = [
#"http://www.babajob.com/Hire-BPO-in-Thane-sort-dateDesc-in_last_days-2",
#"http://www.babajob.com/Hire-Driver-in-Thane-sort-dateDesc-in_last_days-2",
#"http://www.babajob.com/Hire-Helper-in-Thane-sort-dateDesc-in_last_days-2",
#"http://www.babajob.com/Hire-Delivery-in-Thane-sort-dateDesc-in_last_days-2",
#"http://www.babajob.com/Hire-Receptionist-in-Thane-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Other-in-Thane-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-DataEntry-in-Thane-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Cashier-in-Thane-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Sales-in-Thane-sort-dateDesc-in_last_days-2",
#"http://www.babajob.com/Hire-Management-in-Thane-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Teacher-in-Thane-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Accountant-in-Thane-sort-dateDesc-in_last_days-2",
#"http://www.babajob.com/Hire-Steward-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Maid-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Cook-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Nanny-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Guard-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Laborer-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Tailor-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Nurse-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Machinist-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Engineer-in-Thane-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Beautician-in-Thane-sort-dateDesc-in_last_days-2",
#
#"http://www.babajob.com/Hire-BPO-in-Mumbai-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Driver-in-Mumbai-sort-dateDesc-in_last_days-2",
#"http://www.babajob.com/Hire-Helper-in-Mumbai-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Delivery-in-Mumbai-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Receptionist-in-Mumbai-sort-dateDesc-in_last_days-2",
#"http://www.babajob.com/Hire-Other-in-Mumbai-sort-dateDesc-in_last_days-2",
#"http://www.babajob.com/Hire-DataEntry-in-Mumbai-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Cashier-in-Mumbai-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Sales-in-Mumbai-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Management-in-Mumbai-sort-dateDesc-in_last_days-2",
"http://www.babajob.com/Hire-Teacher-in-Mumbai-sort-dateDesc-in_last_days-1-include_mobile_users",
# "http://www.babajob.com/Hire-Accountant-in-Mumbai-sort-dateDesc-in_last_days-2",
# "http://www.babajob.com/Hire-Steward-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Maid-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Cook-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Nanny-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Guard-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Laborer-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Tailor-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Nurse-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Machinist-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Engineer-in-Mumbai-sort-dateDesc-in_last_days-2",
# # "http://www.babajob.com/Hire-Beautician-in-Mumbai-sort-dateDesc-in_last_days-2",
]
def init_request(self):
print ("Init")
self.download_delay=15
"""This function is called before crawling starts."""
return Request(url=self.login_page, callback=self.login)
def login(self, response):
print ("Login")
"""Generate a login request."""
return FormRequest.from_response(response,
formdata={'LoginText': '7303038426', 'Password': 'Nishit123'},
clickdata={'id':'updateprofile-submit'},
callback=self.check_login_response)
def check_login_response(self, response):
print ("CheckLogin")
if "Nishit" in response.body:
# Now the crawling can begin..
print ("Logged in")
return self.initialized()
else:
print ("Login Failed")
# Something went wrong, we couldn't log in, so nothing happens.
def parse(self, response):
print ("Parse")
next = response.xpath('//a[@id="cp1_pagerSeekersGrid_btnNext"]/@href')
for each in response.xpath('//div[@class="s-card-inner"]'):
href = each.xpath('div/div[@class="col-sm-7 s-col-data"]/h2/div/a/@href')
item = CandidatescraperItem()
item['sector'] = response.xpath('//div[@id="dropdownMenu1"]/text()').extract_first()
item['location'] = response.xpath('//div[@id="dropdownMenu2"]/text()').extract_first()
item['source'] = {"BabaJob"}
date = time.strftime("%d/%m/%Y")
# print("date----------------------------------")
# print(date)
item['date']=date
time.sleep(15)
url = response.urljoin(href.extract_first())
req = scrapy.Request(url, callback=self.parse_job_details,meta={'item': item})
yield req
if(next is not None):
url = response.urljoin(next.extract_first())
paginate_req = scrapy.Request(url, callback=self.parse)
yield paginate_req
else:
return
def parse_job_details(self, response):
item = response.meta['item']
item['name']=response.xpath('//div[@id="cp1_nonDeletedUserContent"]/div/div/div/div/div/div/div/div[@class="col-sm-7"]/div/h1/text()').extract_first()
try:
onclick = response.xpath('//div[@id="cp1_shortlistButtonContainer"]/div/a/@onclick').extract_first()
number=onclick[onclick.find('+'):onclick.find(',')]
item['mobile']=number.replace("+91","") #11 is the index number of last digit
print("item-------------------")
#print item
#with open("BabaJobs.txt", "a") as text_file:text_file.write(item['mobile']+"\n")
#yield item
except:
return
finally:
yield item
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,267
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/BabaJobsSpider.py
|
import scrapy
import requests
import json
import pprint
import re
from datetime import datetime
from datetime import timedelta
from crawler.items import JobItem
class BabaJobsScrapy(scrapy.Spider):
name = "babajobs"
allowed_domains=["babajob.com"]
start_urls = [
# 'http://www.babajob.com/Jobs-DataEntry-in-Thane-page-1',
# 'http://www.babajob.com/Jobs-OfficeBoy-in-Thane-page-1',
# 'http://www.babajob.com/Jobs-Receptionist-in-Thane-page-1',
# 'http://www.babajob.com/Jobs-OfficeClerk-in-Thane-page-1',
# 'http://www.babajob.com/Jobs-Sales-in-Thane-page-1',
# 'http://www.babajob.com/Jobs-DeliveryCollections-in-Thane-page-1',
#
# 'http://www.babajob.com/Jobs-DataEntry-in-Mumbai-page-1',
# 'http://www.babajob.com/Jobs-OfficeBoy-in-Mumbai-page-1',
# 'http://www.babajob.com/Jobs-Receptionist-in-Mumbai-page-1',
# 'http://www.babajob.com/Jobs-OfficeClerk-in-Mumbai-page-1',
# 'http://www.babajob.com/Jobs-Sales-in-Mumbai-page-1',
# 'http://www.babajob.com/Jobs-DeliveryCollections-in-Mumbai-page-1',
]
def parse(self, response):
pass
# if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is not None:
#
# count_string = re.findall(r'\d+',response.xpath('//h4[@style="color: #000; margin-top: 15px; margin-left: 17px;"]/span[@style="font-size: 13px"]/text()').extract_first())
# total_count = int(count_string[0])
# count_per_page = 0
#
# for each in response.xpath('//div[@itemtype="http://schema.org/JobPosting"]'):
# href = each.xpath('div/div/h2[@class="s-card-title"]/a/@href')
# experience_requirements = each.xpath('div/div/ul/li/text()').extract()[1]
# count_per_page = count_per_page + 1
# url = response.urljoin(href.extract_first())
# req = scrapy.Request(url, callback=self.parse_job_details)
#
# if 'sponsored' in each.xpath('div/div/h4[@itemtype="http://schema.org/Organization"]//text()').extract_first().strip().lower():
# req.meta['premium'] = True
# # posted_date_string = each.xpath('div/div/ul/li/p[@class="info-label-inline info-label-key"]/text()').extract_first()
# # posted_date_list = posted_date_string.split()
# else:
# req.meta['premium'] = False
# posted_date_string = each.xpath('div/div/ul/li/p[@class="info-label-value is-recent"]/text()').extract_first()
# posted_date_list = posted_date_string.split()
# if not posted_date_list[1] == 'hours':
# continue
#
# req.meta['url'] = url
# req.meta['experience_requirements'] = experience_requirements
# yield req
#
#
# nextUrl = ""
# flag = False
# try:
# if int(response.meta['total_items_iterated']) <= int(response.meta['total_count']):
# flag = True
# total_items_iterated = int(response.meta['total_items_iterated']) + count_per_page
# except:
# #first page
# flag = True
# total_items_iterated = count_per_page
# finally:
# if not flag:
# return
# else:
# url = response.url
# page_count = re.findall(r'\d+',url)[0]
# page_count = int(page_count)
# next_page = str(page_count + 1)
# nextUrl = re.sub(r'\d+', next_page, url)
# paginate_req = scrapy.Request(nextUrl, callback=self.parse)
# paginate_req.meta['total_count'] = total_count
# paginate_req.meta['total_items_iterated'] = total_items_iterated
# yield paginate_req
def parse_job_details(self, response):
url = response.meta['url'].split('?')[0]
job = JobItem()
job['url'] = response.meta['url']
job['title'] = response.xpath('//div[@class="row"]/div[@class="col-sm-12"]/h1/text()').extract_first()
job['posted_date'] = self._join(response.xpath('//div[@class="job-title-right"]/div[@class="date-posted"]//text()').extract())
job['company_name'] = response.xpath('//div[div[@class="col-sm-2 job-label-text"]/img[@alt="Employer picture"]]/div[@class="col-sm-10 job-info-text"]/text()').extract_first()
job['salary'] = response.xpath('//div[div[@class="col-sm-2 job-label-text"]/img[@alt="Salary"]]/div[@class="col-sm-10 job-info-text"]/text()').extract_first()
job['location'] = self._strip(response.xpath('//div[div[@class="col-sm-2 job-label-text"]/img[@alt="Location"]]/div[@class="col-sm-10 job-info-text"]/text()').extract_first())
# google_maps_url = response.xpath('//div[@id="mapRow"]/div[@class="col-sm-10 job-info-text"]/a/@href').extract_first()
job['description'] = response.xpath('//div[div[@class="col-sm-2 job-label-text"]/img[@alt="Description"]]/div[@class="col-sm-10 job-info-text"]/text()').extract_first()
job['experience_requirements'] = response.meta['experience_requirements']
try:
job['premium'] = response.meta['premium']
except KeyError:
job['premium'] = 'NA'
job['contact_dump'] = 'NA'
job['recruiter_name'] = 'NA'
job['reference_id'] = 'NA'
job['address'] = 'NA'
job['industry'] = 'NA'
job['role'] = 'NA'
yield job
def _rstrip(self, l):
return [x.strip().replace("\r\n,","") for x in l]
def _join(self, l, delimeter=' '):
return delimeter.join(self._rstrip(l)) # to remove \r\n characters
def _fetch(self, data, key, subkey=None):
if key in data.keys():
if subkey is not None and subkey in data[key].keys():
return data[key][subkey]
else:
return data[key]
else:
return 'NA'
def _strip(self, s):
return s.strip().replace("\r\n,","")
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,268
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/Sector.py
|
import re
from datetime import datetime, timedelta
import scrapy
import requests
from crawler.items import SectorItem
BACK_OFFICE = [
# ============== Naukri ==============
"http://www.naukri.com/back-office-jobs-in-mumbai",
"http://www.naukri.com/back-office-executive-jobs-in-mumbai",
"http://www.naukri.com/back-office-assistant-jobs-in-mumbai",
"http://www.naukri.com/data-entry-jobs-in-mumbai",
"http://www.naukri.com/computer-operator-jobs-in-mumbai",
"http://www.naukri.com/office-admin-jobs-in-mumbai",
"http://www.naukri.com/receptionist-jobs-in-mumbaibackofficejobs-in-mumbai",
"http://www.naukri.com/backofficeexecutive-jobs-in-mumbai",
"http://www.naukri.com/backofficeassistant-jobs-in-mumbai",
"http://www.naukri.com/dataentry-jobs-in-mumbai",
"http://www.naukri.com/computeroperator-jobs-in-mumbai",
"http://www.naukri.com/officeadmin-jobs-in-mumbai",
# ============== Olx ==============
"https://www.olx.in/mumbai/jobs/q-back-office/",
"https://www.olx.in/mumbai/jobs/q-back-office-executive/",
"https://www.olx.in/mumbai/jobs/q-back-office-assistant/",
"https://www.olx.in/mumbai/jobs/q-data-entry/",
"https://www.olx.in/mumbai/jobs/q-computer-operator/",
"https://www.olx.in/mumbai/jobs/q-office-admin/",
"https://www.olx.in/mumbai/jobs/q-receptionist/",
"https://www.olx.in/mumbai/jobs/q-backoffice/",
"https://www.olx.in/mumbai/jobs/q-backofficeexecutive/",
"https://www.olx.in/mumbai/jobs/q-backofficeassistant/",
"https://www.olx.in/mumbai/jobs/q-dataentry/",
"https://www.olx.in/mumbai/jobs/q-computeroperator/",
"https://www.olx.in/mumbai/jobs/q-officeadmin/",
# ============== Shine ==============
"http://www.shine.com/job-search/simple/back-office/mumbai/",
"http://www.shine.com/job-search/simple/back-office-executive/mumbai/",
"http://www.shine.com/job-search/simple/back-office-assistant/mumbai/",
"http://www.shine.com/job-search/simple/data-entry/mumbai/",
"http://www.shine.com/job-search/simple/computer-operator/mumbai/",
"http://www.shine.com/job-search/simple/office-admin/mumbai/",
"http://www.shine.com/job-search/simple/receptionist/mumbai/",
]
ACCOUNTS = [
# ============== Naukri ==============
"http://www.naukri.com/accounts-jobs-in-mumbai",
"http://www.naukri.com/account-jobs-in-mumbai",
"http://www.naukri.com/accountant-jobs-in-mumbai",
"http://www.naukri.com/accounts-assistant-jobs-in-mumbai",
"http://www.naukri.com/account-assistant-jobs-in-mumbai",
"http://www.naukri.com/book-keeping-jobs-in-mumbai",
"http://www.naukri.com/accounting-jobs-in-mumbai",
"http://www.naukri.com/finance-jobs-in-mumbai",
"http://www.naukri.com/finance-and-accounts-jobs-in-mumbai",
"http://www.naukri.com/accountsassistant-jobs-in-mumbai",
"http://www.naukri.com/accountassistant-jobs-in-mumbai",
"http://www.naukri.com/bookkeeping-jobs-in-mumbai",
"http://www.naukri.com/accountingjobs-in-mumbai",
"http://www.naukri.com/financejobs-in-mumbai",
"http://www.naukri.com/financeandaccounts-jobs-in-mumbai",
# ============== Olx ==============
"https://www.olx.in/mumbai/jobs/q-accounts/",
"https://www.olx.in/mumbai/jobs/q-accountant/",
"https://www.olx.in/mumbai/jobs/q-account/",
"https://www.olx.in/mumbai/jobs/q-accounts-assistant/",
"https://www.olx.in/mumbai/jobs/q-account-assistant/",
"https://www.olx.in/mumbai/jobs/q-book-keeping/",
"https://www.olx.in/mumbai/jobs/q-accounting/",
"https://www.olx.in/mumbai/jobs/q-finance/",
"https://www.olx.in/mumbai/jobs/q-finance-and-accounts/",
"https://www.olx.in/mumbai/jobs/q-accountsassistant/",
"https://www.olx.in/mumbai/jobs/q-accountassistant/",
"https://www.olx.in/mumbai/jobs/q-bookkeeping/",
"https://www.olx.in/mumbai/jobs/q-financeandaccounts/",
# ============== Shine ==============
"http://www.shine.com/job-search/simple/accounts/mumbai/",
"http://www.shine.com/job-search/simple/accountant/mumbai/",
"http://www.shine.com/job-search/simple/account/mumbai/",
"http://www.shine.com/job-search/simple/accounts-assistant/mumbai/",
"http://www.shine.com/job-search/simple/account-assistant/mumbai/",
"http://www.shine.com/job-search/simple/book-keeping/mumbai/",
"http://www.shine.com/job-search/simple/accounting/mumbai/",
"http://www.shine.com/job-search/simple/finance/mumbai/",
"http://www.shine.com/job-search/simple/finance-and-accounts/mumbai/",
"http://www.shine.com/job-search/simple/accountsassistant/mumbai/",
"http://www.shine.com/job-search/simple/accountassistant/mumbai/",
"http://www.shine.com/job-search/simple/bookkeeping/mumbai/",
"http://www.shine.com/job-search/simple/financeandaccounts/mumbai/",
]
OFFICE_BOY = [
# ============== Naukri ==============
"http://www.naukri.com/poen-jobs-in-mumbai",
"http://www.naukri.com/office-boy-jobs-in-mumbai",
"http://www.naukri.com/office-boy-assistant-jobs-in-mumbai",
"http://www.naukri.com/helper-jobs-in-mumbai"
"http://www.naukri.com/officeboy-jobs-in-mumbai",
"http://www.naukri.com/officeboyassistant-jobs-in-mumbai",
# ============== Olx ==============
"https://www.olx.in/mumbai/jobs/q-poen/",
"https://www.olx.in/mumbai/jobs/q-office-boy/",
"https://www.olx.in/mumbai/jobs/q-office-assistant/",
"https://www.olx.in/mumbai/jobs/q-helper/",
"https://www.olx.in/mumbai/jobs/q-officeboy/",
"https://www.olx.in/mumbai/jobs/q-officeassistant/",
# ============== Shine ==============
"http://www.shine.com/job-search/simple/poen/mumbai/",
"http://www.shine.com/job-search/simple/office-boy/mumbai/",
"http://www.shine.com/job-search/simple/office-assistant/mumbai/",
"http://www.shine.com/job-search/simple/helper/mumbai/",
"http://www.shine.com/job-search/simple/officeboy/mumbai/",
"http://www.shine.com/job-search/simple/officeassistant/mumbai/",
]
class SectorSpider(scrapy.Spider):
name = "sector"
allowed_domains = ["naukri.com", "shine.com", "olx.in"]
start_urls = []
def __init__(self, sector=None, **kwargs):
if sector is not None:
sectors = sector.split(',')
if "back_office" in sectors:
self.start_urls += BACK_OFFICE
elif "accounts" in sectors:
self.start_urls += ACCOUNTS
elif "office_boy" in sectors:
self.start_urls += OFFICE_BOY
else:
self.start_urls = BACK_OFFICE + ACCOUNTS + OFFICE_BOY
def parse(self, response):
if "naukri.com" in response.url:
return self.parse_naukri(response)
elif "shine.com" in response.url:
return self.parse_shine(response)
elif "olx.in" in response.url:
print('sending to olx parse')
return self.parse_olx(response)
else:
return
def parse_shine(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
rx = response.xpath
for i in range(0, 50):
# print(i)
url = rx('.//*[@itemtype="http://schema.org/JobPosting"]['+str(i)+']/div[2]/a/@href').extract_first()
date = rx('.//*[@itemtype="http://schema.org/JobPosting"]['+str(i)+']/div[3]/div[2]/text()').extract_first()
if date is not None:
# print("URL got {}".format(url))
# print("Date got {}".format(date))
posted_date_list = date.split()
posted_date = datetime.strptime(posted_date_list[2], '%d-%b-%Y').date()
# print("posted_date {}".format(posted_date))
today = datetime.now().date()
# print("Today {}".format(today))
if (today - posted_date) <= timedelta(1):
# print("Scrapping......")
url = 'http://shine.com'+url
# print(url)
req = scrapy.Request(url, callback=self.parse_shine_details)
req.meta['position'] = position
req.meta['url'] = url
yield req
nextUrl = ""
flag = False
try:
# if int(response.meta['total_items_iterated']) <= int(response.meta['total_count']):
url = response.url
page_count = re.findall(r'\d+', url)[0]
page_count = int(page_count)
print("current page count {}".format(page_count))
next_page = page_count + 1
if next_page > 100:
return # adding this, because the total-items-iterated logic fails miserably
nextUrl = re.sub(r'\d+', str(next_page), url)
print("Next url {}".format(nextUrl))
flag = True
# total_items_iterated = int(response.meta['total_items_iterated']) + count_per_page
except:
#first page
flag = True
nextUrl = response.url + str(2) + "/"
# total_items_iterated = count_per_page
finally:
if not flag:
return
else:
paginate_req = scrapy.Request(nextUrl, callback=self.parse_shine)
paginate_req.meta['position'] = position
# paginate_req.meta['total_count'] = total_count
# paginate_req.meta['total_items_iterated'] = total_items_iterated
yield paginate_req
def parse_shine_details(self, response):
# url = response.meta['url'].split('?')[0]
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is None:
return
job = SectorItem()
job_posting = response.xpath('//div[@itemtype="http://schema.org/JobPosting"]')[0]
job['title'] = job_posting.xpath('//h1[@itemprop="title"]/text()').extract_first()
job['date'] = job_posting.xpath('//span[@itemprop="datePosted"]/text()').extract_first()
# old location xpath: '//a[@class="normaljdsnippet jd_location curText cls_jd_primary_location"]//text()'
job['location'] = self._join(job_posting.xpath(
'//span[@itemtype="http://schema.org/Place"]/*//text()'
).extract(), delimeter=',')
job['description'] = job_posting.xpath('//span[@itemprop="description"]//text()').extract()
job['company_name'] = job_posting.xpath('//span[@itemprop="name"]/h2/text()').extract_first()
job['contact_person_name'] = ''
recruiter_details = job_posting.xpath('//div[@class="ropen cls_rect_detail_div"]/ul//text()').extract()
for i in recruiter_details:
try:
phone = int(i.encode('utf-8').strip())
job['number'] = phone
except ValueError:
pass
try:
if re.match('Email\s*(.*)', i):
job['email'] = recruiter_details[recruiter_details.index(i)+1]
except:
pass
job['url'] = response.url
job['role'] = self._join(job_posting.xpath('//span[@itemprop="skills"]/a/text()').extract(), delimeter=',')
job['position'] = response.meta.get('position', '')
job['portal'] = 'shine'
yield job
def parse_olx(self, response):
print('in parse')
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
print(response.url)
print("POSITION {}".format(position))
# promoted
for i in range(0, 50):
tbody = response.xpath(".//*[@id='promotedAd']/tbody")
href = tbody.xpath("tr["+str(i)+"]/td/table/tbody/tr[1]/td[2]/h3/a/@href").extract()
date = response.xpath("tr["+str(i)+"]/td/table/tbody/tr[2]/td[1]/p/text()").extract()
if len(href) > 0:
print(href)
href = self._rstrip(href)[0]
# date = self._rstrip(date)[0]
req = scrapy.Request(href, callback=self.parse_olx_details)
req.meta['url'] = href
req.meta['premium'] = True
req.meta['position'] = position
yield req
# normal
for i in range(0, 100):
tbody = response.xpath(".//*[@id='offers_table']/tbody")
href = tbody.xpath("tr["+str(i)+"]/td/table/tbody/tr[1]/td[2]/h3/a/@href").extract()
date = tbody.xpath("tr["+str(i)+"]/td/table/tbody/tr[2]/td[1]/p/text()").extract()
if len(href) > 0 and len(date) > 0:
href = self._rstrip(href)[0]
date = self._rstrip(date)[0]
if date.lower() == 'yesterday':
req = scrapy.Request(href, callback=self.parse_olx_details)
req.meta['url'] = href
req.meta['position'] = position
yield req
base_url = response.url.split('?')[0]
try:
query_params = response.url.split('?')[1]
current_page = query_params.split('page=')[1]
next = int(current_page) + 1
if str(current_page) == str(response.meta.get('previous_page_number', '')):
return
except IndexError:
# first page
current_page = 1
next = 2
finally:
next_page = base_url + "?page=" + str(next)
req = scrapy.Request(next_page, callback=self.parse_olx)
req.meta['previous_page_number'] = current_page
req.meta['position'] = position
yield req
def parse_olx_details(self, response):
job_title = response.xpath(".//*[@id='offer_active']/div[4]/div[1]/div[1]/div[1]/h1/text()").extract_first()
job_title = self._rstrip([job_title])
salary = response.xpath(".//*[@id='offeractions']/div/div[1]/div[1]/strong/span/text()").extract()
salary = self._join(salary).encode('utf-8')
name = response.xpath(".//*[@id='offeractions']/div/div[1]/div[2]/div/p/span[1]/text()").extract_first()
if name is not None:
name = name.encode('utf-8')
phone_no = response.xpath(".//*[@id='contact_methods']/li[3]/div[2]/strong[1]/text()").extract_first()
if phone_no is not None:
phone_no = phone_no.encode('utf-8')
jd = response.xpath(".//*[@id='textContent']/p/text()").extract()
job_desc = self._join(jd)
location = response.xpath(".//*[@id='offer_active']/div[4]/div[1]/div[1]/div[1]/p[1]/span/span[2]/strong/text()").extract()
location = self._join(location)
job = SectorItem()
job['url'] = response.url
job['date'] = 'yesterday'
job['title'] = job_title
job['location'] = location
job['description'] = job_desc
job['company_name'] = ''
job['contact_person_name'] = name
job['number'] = phone_no
job['portal'] = 'olx'
job['position'] = response.meta.get('position', '')
yield job
def parse_naukri(self, response):
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]/a/@href').extract_first() is not None:
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
for each in response.xpath('//div[@itemtype="http://schema.org/JobPosting"]'):
href = each.xpath('a/@href').extract_first()
posted_date_string = each.xpath('div[@class="other_details"]'
'/div[@class="rec_details"]/span[@class="date"]/text()').extract_first()
url = response.urljoin(href)
if posted_date_string == "1 day ago":
req = scrapy.Request(url, callback=self.parse_naukri_details)
req.meta['url'] = url
req.meta['position'] = position
yield req
if response.xpath('//div[@class="pagination"]/a/@href').extract_first() is not None:
next = response.xpath('//div[@class="pagination"]/a/@href').extract()
if len(next) > 1:
next = response.urljoin(next[1])
else:
next = response.urljoin(next[0])
paginate_req = scrapy.Request(next, callback=self.parse_naukri)
next_paginate_number = int(str(next[str(next).rfind('-')+1:]))
try:
current_paginate_number = int(response.meta['current_paginate_number'])
except KeyError:
# current page is first page of pagination
current_paginate_number = 1
finally:
if current_paginate_number > next_paginate_number:
return
paginate_req.meta['current_paginate_number'] = next_paginate_number
paginate_req.meta['position'] = position
yield paginate_req
def parse_naukri_details(self, response):
# extract job-id
url = response.meta['url'].split('?')[0]
index = url.rfind('-')
jid = url[index+1:]
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is None:
return
job = SectorItem()
job_posting = response.xpath('//div[@itemtype="http://schema.org/JobPosting"]')[0]
job['url'] = response.url
job['title'] = self._join(job_posting.xpath('//h1[@itemprop="title"]/text()').extract())
job['company_name'] = self._join(job_posting.xpath('//a[@itemprop="hiringOrganization"]/text()').extract())
location = job_posting.xpath('//em[@itemprop="jobLocation"]')
if location is not None:
job['location'] = self._join(location[0].xpath('//div[@itemprop="name"]/a/text()').extract(), delimeter=', ')
job['description'] = self._join(job_posting.xpath('//ul[@itemprop="description"]//text()').extract())
job['role'] = ''
for role in job_posting.xpath('//p/span[@itemprop="occupationalCategory"]'):
if role.xpath('a/text()').extract_first() is not None:
job['role'] += self._join(role.xpath('a/text()').extract(), delimeter=', ')
else:
job['role'] += ', ' + self._join(role.xpath('text()').extract(), delimeter=', ')
# parsing for contact details
contact_url = 'http://www.naukri.com/jd/contactDetails?file=' + str(jid)
r = requests.get(contact_url)
if r.status_code == 200:
contact = r.json()['fields']
contact = {k.lower(): v for k, v in contact.items()} # convert all keys to lowercase
try:
if 'email address' in contact.keys() and 'src' in contact['email address'].keys():
contact['email address'].pop('src', 'no src found')
job['email'] = self._fetch(contact, 'email address', 'title')
except:
pass
job['number'] = self._fetch(contact, 'telephone')
# job['email'] = self._fetch(contact, 'email address', 'title')
job['contact_person_name'] = self._fetch(contact, 'recruiter name')
job['date'] = job_posting.xpath('//div[@class="sumFoot"]//text()').re('Posted\s*(.*)')
job['portal'] = 'naukri'
job['position'] = response.meta.get('position', '')
yield job
def _rstrip(self, l):
return [x.strip().replace("\r\n,", "") for x in l]
def _join(self, l, delimeter=' '):
return delimeter.join(self._rstrip(l)) # to remove \r\n characters
def _fetch(self, data, key, subkey=None):
if key in data.keys():
if subkey is not None and subkey in data[key].keys():
return data[key][subkey]
else:
return data[key]
else:
return 'NA'
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,269
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/ShineSpider.py
|
import scrapy
import requests
import json
import pprint
import re
from datetime import datetime
from datetime import timedelta
from crawler.items import JobItem
class ShineScrapy(scrapy.Spider):
name = "shine"
allowed_domains=["shine.com"]
start_urls = [
# 'http://www.shine.com/job-search/simple/data-entry/thane/',
# 'http://www.shine.com/job-search/simple/office-boy/thane/',
# 'http://www.shine.com/job-search/simple/office-administrator/thane/',
# 'http://www.shine.com/job-search/simple/computer-operator/thane/',
# 'http://www.shine.com/job-search/simple/data-operator/thane/',
# 'http://www.shine.com/job-search/simple/field-executive/thane/',
# 'http://www.shine.com/job-search/simple/data-collection/thane/',
# 'http://www.shine.com/job-search/simple/marketing-executive/thane/',
# 'http://www.shine.com/job-search/simple/delivery/thane/',
# 'http://www.shine.com/job-search/simple/courier/thane/',
# 'http://www.shine.com/job-search/simple/delivery-executive/thane/',
# 'http://www.shine.com/job-search/simple/delivery-boy/thane/',
#
# 'http://www.shine.com/job-search/simple/data-entry/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/office-boy/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/office-administrator/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/computer-operator/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/data-operator/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/field-executive/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/data-collection/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/marketing-executive/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/delivery/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/courier/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/delivery-executive/navi-mumbai/',
# 'http://www.shine.com/job-search/simple/delivery-boy/navi-mumbai/',
#
# 'http://www.shine.com/job-search/simple/data-entry/mumbai/',
# 'http://www.shine.com/job-search/simple/office-boy/mumbai/',
# 'http://www.shine.com/job-search/simple/office-administrator/mumbai/',
# 'http://www.shine.com/job-search/simple/computer-operator/mumbai/',
# 'http://www.shine.com/job-search/simple/data-operator/mumbai/',
# 'http://www.shine.com/job-search/simple/field-executive/mumbai/',
# 'http://www.shine.com/job-search/simple/data-collection/mumbai/',
# 'http://www.shine.com/job-search/simple/marketing-executive/mumbai/',
# 'http://www.shine.com/job-search/simple/delivery/mumbai/',
# 'http://www.shine.com/job-search/simple/courier/mumbai/',
# 'http://www.shine.com/job-search/simple/delivery-executive/mumbai/',
# 'http://www.shine.com/job-search/simple/delivery-boy/mumbai/',
]
def parse(self, response):
pass
# if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is not None:
# count_string = re.findall(r'\d+',response.xpath('//div[@class="num_key"]/em/text()').extract_first())
# total_count = int(count_string[0])
# count_per_page = 0
#
# for each in response.xpath('//div[@class="search_listing"]'):
# href = each.xpath('div[@class="search_listingleft"]/a/@href')
# count_per_page = count_per_page + 1
# posted_date_string = each.xpath('div[@class="apply"]/div[@class="share_links"]/text()').extract_first()
# url = response.urljoin(href.extract_first())
#
# posted_date_list = posted_date_string.split()
# posted_date = datetime.strptime(posted_date_list[2], '%d-%b-%Y').date()
# today = datetime.now().date()
# if (today - posted_date) <= timedelta(1):
# req = scrapy.Request(url, callback=self.parse_job_details)
# req.meta['url'] = url
# yield req
#
# nextUrl = ""
# flag = False
# try:
# if int(response.meta['total_items_iterated']) <= int(response.meta['total_count']):
# url = response.url
# page_count = re.findall(r'\d+',url)[0]
# page_count = int(page_count)
# next_page = str(page_count + 1)
# nextUrl = re.sub(r'\d+', next_page, url)
# flag = True
# total_items_iterated = int(response.meta['total_items_iterated']) + count_per_page
# except:
# #first page
# flag = True
# nextUrl = response.url + str(2) + "/"
# total_items_iterated = count_per_page
# finally:
# if not flag:
# return
# else:
# paginate_req = scrapy.Request(nextUrl, callback=self.parse)
# paginate_req.meta['total_count'] = total_count
# paginate_req.meta['total_items_iterated'] = total_items_iterated
# yield paginate_req
def parse_job_details(self, response):
url = response.meta['url'].split('?')[0]
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is None:
# todo: add logic to handle error pages
# could not parse this page. add to error item
# error = ErrorItem()
# error['url'] = response.meta['url']
# yield error
return
job = JobItem()
job_posting = response.xpath('//div[@itemtype="http://schema.org/JobPosting"]')[0]
job['url'] = response.meta['url']
job['title'] = job_posting.xpath('//h1[@itemprop="title"]/text()').extract_first()
job['company_name'] = job_posting.xpath('//span[@itemprop="name"]/h2/text()').extract_first()
job['posted_date'] = job_posting.xpath('//span[@itemprop="datePosted"]/text()').extract_first()
experience_requirements = job_posting.xpath('//span[@itemprop="experienceRequirements"]/text()').extract()
job['experience_requirements'] = experience_requirements[0]
try:
job['salary'] = experience_requirements[1]
except Exception:
job['salary'] = 'NA'
job['location'] = self._join(job_posting.xpath('//a[@class="normaljdsnippet jd_location curText cls_jd_primary_location"]/text()').extract(), delimeter=',') #check this out later
job['role'] = self._join(job_posting.xpath('//span[@itemprop="skills"]/a/text()').extract(), delimeter=',') #
job['description'] = job_posting.xpath('//span[@itemprop="description"]//text()').extract()
for i in job['description']:
if re.match('(http\:\/\/|https\:\/\/)?([a-z0-9][a-z0-9\-]*\.)+[a-z0-9][a-z0-9\-]*$', i):
job['website'] = i
job['industry'] = self._join(job_posting.xpath('//span[@itemprop="industry"]/a/text()').extract(), delimeter=',')
recruiter_details = job_posting.xpath('//div[@class="ropen cls_rect_detail_div"]/ul//text()').extract()
for i in recruiter_details:
if re.match('Email\s*(.*)', i):
job['email_id'] = recruiter_details[recruiter_details.index(i)+1]
else:
job['email_id'] = 'NA'
if re.match('Telephone\s*(.*)', i):
job['telephone'] = recruiter_details[recruiter_details.index(i)+1]
else:
job['telephone'] = 'NA'
job['contact_dump'] = 'NA'
job['recruiter_name'] = 'NA'
job['reference_id'] = 'NA'
job['address'] = 'NA'
yield job
def _rstrip(self, l):
return [x.strip().replace("\r\n,","") for x in l]
def _join(self, l, delimeter=' '):
return delimeter.join(self._rstrip(l)) # to remove \r\n characters
def _fetch(self, data, key, subkey=None):
if key in data.keys():
if subkey is not None and subkey in data[key].keys():
return data[key][subkey]
else:
return data[key]
else:
return 'NA'
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,270
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/NaukriSpider.py
|
import scrapy
import requests
import json
from crawler.items import JobItem
class IndeedScrapy(scrapy.Spider):
name = "naukri"
allowed_domains=["naukri.com"]
start_urls = [
# "http://www.naukri.com/office-boy-jobs-in-thane",
# "http://www.naukri.com/office-administrator-jobs-in-thane",
# "http://www.naukri.com/receptionist-jobs-in-thane",
# "http://www.naukri.com/data-entry-jobs-in-thane",
# "http://www.naukri.com/computer-operator-jobs-in-thane",
# "http://www.naukri.com/data-operator-jobs-in-thane",
# "http://www.naukri.com/field-executive-jobs-in-thane",
# "http://www.naukri.com/data-collection-jobs-in-thane",
# "http://www.naukri.com/marketing-executive-jobs-in-thane",
# "http://www.naukri.com/delivery-jobs-in-thane",
# "http://www.naukri.com/courier-jobs-in-thane",
# "http://www.naukri.com/delivery-executive-jobs-in-thane",
# "http://www.naukri.com/delivery-boy-jobs-in-thane",
#
# "http://www.naukri.com/office-boy-jobs-in-navi-mumbai",
# "http://www.naukri.com/office-administrator-jobs-in-navi-mumbai",
# "http://www.naukri.com/receptionist-jobs-in-navi-mumbai",
# "http://www.naukri.com/data-entry-jobs-in-navi-mumbai",
# "http://www.naukri.com/computer-operator-jobs-in-navi-mumbai",
# "http://www.naukri.com/data-operator-jobs-in-navi-mumbai",
# "http://www.naukri.com/field-executive-jobs-in-navi-mumbai",
# "http://www.naukri.com/data-collection-jobs-in-navi-mumbai",
# "http://www.naukri.com/marketing-executive-jobs-in-navi-mumbai",
# "http://www.naukri.com/delivery-jobs-in-navi-mumbai",
# "http://www.naukri.com/courier-jobs-in-navi-mumbai",
# "http://www.naukri.com/delivery-executive-jobs-in-navi-mumbai",
# "http://www.naukri.com/delivery-boy-jobs-in-navi-mumbai",
#
# "http://www.naukri.com/office-boy-jobs-in-mumbai",
# "http://www.naukri.com/office-administrator-jobs-in-mumbai",
# "http://www.naukri.com/receptionist-jobs-in-mumbai",
# "http://www.naukri.com/data-entry-jobs-in-mumbai",
# "http://www.naukri.com/computer-operator-jobs-in-mumbai",
# "http://www.naukri.com/data-operator-jobs-in-mumbai",
# "http://www.naukri.com/field-executive-jobs-in-mumbai",
# "http://www.naukri.com/data-collection-jobs-in-mumbai",
# "http://www.naukri.com/marketing-executive-jobs-in-mumbai",
# "http://www.naukri.com/delivery-jobs-in-mumbai",
# "http://www.naukri.com/courier-jobs-in-mumbai",
# "http://www.naukri.com/delivery-executive-jobs-in-mumbai",
# "http://www.naukri.com/delivery-boy-jobs-in-mumbai",
#
# "http://www.naukri.com/office-boy-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/office-administrator-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/receptionist-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/data-entry-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/computer-operator-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/data-operator-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/field-executive-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/data-collection-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/marketing-executive-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/delivery-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/courier-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/delivery-executive-jobs-in-mumbai-suburbs",
# "http://www.naukri.com/delivery-boy-jobs-in-mumbai-suburbs",
]
def parse(self, response):
pass
# if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]/a/@href').extract_first() is not None:
#
# for each in response.xpath('//div[@itemtype="http://schema.org/JobPosting"]'):
# href = each.xpath('a/@href').extract_first()
# posted_date_string = each.xpath('div[@class="other_details"]/div[@class="rec_details"]/span[@class="date"]/text()').extract_first()
# url = response.urljoin(href)
#
# if posted_date_string == "1 day ago":
# req = scrapy.Request(url, callback=self.parse_job_details)
# req.meta['url'] = url
# yield req
#
# if response.xpath('//div[@class="pagination"]/a/@href').extract_first() is not None:
# next = response.xpath('//div[@class="pagination"]/a/@href').extract()
# if len(next) > 1 :
# next = response.urljoin(next[1])
# else:
# next = response.urljoin(next[0])
# paginate_req = scrapy.Request(next, callback=self.parse)
# next_paginate_number = int(str(next[str(next).rfind('-')+1:]))
# try:
# current_paginate_number = int(response.meta['current_paginate_number'])
# except KeyError:
# # current page is first page of pagination
# current_paginate_number = 1
# finally:
# if current_paginate_number > next_paginate_number:
# return
# paginate_req.meta['current_paginate_number'] = next_paginate_number
# yield paginate_req
def parse_job_details(self, response):
# extract job-id
url = response.meta['url'].split('?')[0]
index = url.rfind('-')
jid = url[index+1:]
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is None:
# todo: add logic to handle error pages
# could not parse this page. add to error item
# error = ErrorItem()
# error['url'] = response.meta['url']
# yield error
return
job = JobItem()
job_posting = response.xpath('//div[@itemtype="http://schema.org/JobPosting"]')[0]
job['url'] = response.meta['url']
job['title'] = self._join(job_posting.xpath('//h1[@itemprop="title"]/text()').extract())
job['company_name'] = self._join(job_posting.xpath('//a[@itemprop="hiringOrganization"]/text()').extract())
job['experience_requirements'] = self._join(job_posting.xpath('//span[@itemprop="experienceRequirements"]/text()').extract())
location = job_posting.xpath('//em[@itemprop="jobLocation"]')[0]
job['location'] = self._join(location.xpath('//div[@itemprop="name"]/a/text()').extract(), delimeter=', ')
job['description'] = self._join(job_posting.xpath('//ul[@itemprop="description"]//text()').extract())
job['salary'] = self._join(job_posting.xpath('//span[@itemprop="baseSalary"]/text()').extract())
job['industry'] = self._join(job_posting.xpath('//span[@itemprop="industry"]/a/text()').extract(), delimeter=', ')
job['role'] = ''
for role in job_posting.xpath('//p/span[@itemprop="occupationalCategory"]'):
if role.xpath('a/text()').extract_first() is not None:
job['role'] += self._join(role.xpath('a/text()').extract(), delimeter=', ')
else:
job['role'] += ', ' + self._join(role.xpath('text()').extract(), delimeter=', ')
# parsing for contact details
contact_url = 'http://www.naukri.com/jd/contactDetails?file=' + str(jid)
r = requests.get(contact_url)
if r.status_code == 200:
contact = r.json()['fields']
contact = {k.lower():v for k,v in contact.items()} # convert all keys to lowercase
if 'email address' in contact.keys() and 'src' in contact['email address'].keys():
contact['email address'].pop('src', 'no src found')
job['contact_dump'] = json.dumps(contact)
job['telephone'] = self._fetch(contact, 'telephone')
job['email_id'] = self._fetch(contact, 'email address', 'title')
job['recruiter_name'] = self._fetch(contact, 'recruiter name')
job['reference_id'] = self._fetch(contact, 'reference id')
job['website'] = self._fetch(contact, 'website')
job['address'] = self._fetch(contact, 'address')
# else:
# job['contact_dump'] = 'NA'
# job['telephone'] = 'NA'
# job['email_id'] = 'NA'
# job['recruiter_name'] = 'NA'
# job['reference_id'] = 'NA'
# job['website'] = 'NA'
# job['address'] = 'NA'
job['posted_date'] = job_posting.xpath('//div[@class="sumFoot"]//text()').re('Posted\s*(.*)')
yield job
def _rstrip(self, l):
return [x.strip().replace("\r\n,","") for x in l]
def _join(self, l, delimeter=' '):
return delimeter.join(self._rstrip(l)) # to remove \r\n characters
def _fetch(self, data, key, subkey=None):
if key in data.keys():
if subkey is not None and subkey in data[key].keys():
return data[key][subkey]
else:
return data[key]
else:
return 'NA'
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,271
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/BabaJob_Thane_Teacher.py
|
import time
import scrapy
import logging
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders.init import InitSpider
from scrapy.http import Request, FormRequest
from scrapy.spiders import Rule
from crawler.items import CandidatescraperItem
class BabaJobSpider(InitSpider):
name = "babajob_thane_teacher"
allowed_domains=["babajob.com"]
login_page = 'http://www.babajob.com/login'
start_urls = [
"http://www.babajob.com/Hire-Teacher-in-Thane-sort-dateDesc-in_last_days-1-include_mobile_users",
]
def init_request(self):
print ("Init")
self.download_delay = 15
"""This function is called before crawling starts."""
return Request(url=self.login_page, callback=self.login)
def login(self, response):
print ("Login")
"""Generate a login request."""
return FormRequest.from_response(response,
formdata={'LoginText': '7303038426', 'Password': 'Nishit123'},
clickdata={'id':'updateprofile-submit'},
callback=self.check_login_response)
def check_login_response(self, response):
print ("CheckLogin")
if "Nishit" in response.body:
# Now the crawling can begin..
print ("Logged in")
return self.initialized()
else:
print ("Login Failed")
# Something went wrong, we couldn't log in, so nothing happens.
def parse(self, response):
print ("Parse")
next = response.xpath('//a[@id="cp1_pagerSeekersGrid_btnNext"]/@href')
for each in response.xpath('//div[@class="s-card-inner"]'):
href = each.xpath('div/div[@class="col-sm-7 s-col-data"]/h2/div/a/@href')
item = CandidatescraperItem()
item['sector'] = response.xpath('//div[@id="dropdownMenu1"]/text()').extract_first()
item['location'] = response.xpath('//div[@id="dropdownMenu2"]/text()').extract_first()
item['source'] = {"BabaJob"}
date = time.strftime("%d/%m/%Y")
# print("date----------------------------------")
# print(date)
item['date']=date
time.sleep(15)
url = response.urljoin(href.extract_first())
req = scrapy.Request(url, callback=self.parse_job_details,meta={'item': item})
yield req
if(next is not None):
url = response.urljoin(next.extract_first())
paginate_req = scrapy.Request(url, callback=self.parse)
yield paginate_req
else:
return
def parse_job_details(self, response):
item = response.meta['item']
item['name']=response.xpath('//div[@id="cp1_nonDeletedUserContent"]/div/div/div/div/div/div/div/div[@class="col-sm-7"]/div/h1/text()').extract_first()
try:
onclick = response.xpath('//div[@id="cp1_shortlistButtonContainer"]/div/a/@onclick').extract_first()
number=onclick[onclick.find('+'):onclick.find(',')]
item['mobile']=number.replace("+91","") #11 is the index number of last digit
print("item-------------------")
#print item
#with open("BabaJobs.txt", "a") as text_file:text_file.write(item['mobile']+"\n")
#yield item
except:
return
finally:
yield item
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,272
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/phoneandemailspider.py
|
"""
This spider does a full text search for phone nos and email address in all the pages crawled.
Phone regex: \b[789]\d{2}[-.]?\d{3}[-.]?\d{4}\b
Email regex: [\w\.]+@[\w\.]+\.\w+
"""
import re
import traceback
from boto3.session import Session
import scrapy
class LeadsItem(scrapy.Item):
number = scrapy.Field()
email = scrapy.Field()
company_name = scrapy.Field()
class Sector1Spider(scrapy.Spider):
name = "phoneandemail"
allowed_domains = ["timesjobs.com", "needjobsoon.com"]
start_urls = [
# ==================== TimesJobs ====================
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Back%20Office&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Back%20Office%20Executive&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Back%20Office%20Assistant&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Back%20Entry&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Computer%20Operator&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Office%20Admin&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Receptionist&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Poen&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Office%20Boy&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Office%20Assistant&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Helper&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Accounts&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Accountant&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Account&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Accounts%20Assistant&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Book%20Keeping&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Accounting&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
"http://www.timesjobs.com/candidate/job-search.html?from=submit&actualTxtKeywords=Finance%20and%20Accounts&searchBy=0&rdoOperator=OR&searchType=personalizedSearch&txtLocation=Mumbai&luceneResultSize=25&postWeek=60&txtKeywords=0DQT0back%20office0DQT0&pDate=I&sequence=1&startPage=1",
# ==================== NeedJobSoon ====================
"http://www.needjobsoon.com/jobs/Back%20Office/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Back%20Office%20Executive/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Back%20Office%20Assistant/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Back%20Entry/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Computer%20Operator/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Office%20Admin/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Receptionist/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Poen/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Office%20Boy/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Office%20Assistant/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Helper/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Accounts/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Accountant/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Account/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Accounts%20Assistant/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Book%20Keeping/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Accounting/mumbai?page=0",
"http://www.needjobsoon.com/jobs/Finance%20and%20Accounts/mumbai?page=0",
]
def parse(self, response):
if "timesjob" in response.url:
all_links = response.xpath('*//a/@href').extract()
for link in all_links:
if 'JobDetailView.html'.lower() in link.lower():
url = response.urljoin(link)
yield scrapy.Request(url, callback=self.parse_details)
current = int(response.url.split('&startPage=')[1])
if current < 50:
next_params = "&sequence={}&startPage={}".format(current+1, current+1)
next = response.url.split('&sequence=')[0] + next_params
yield scrapy.Request(next, callback=self.parse)
elif "needjobsoon" in response.url:
all_links = response.xpath('*//a/@href').extract()
for link in all_links:
if 'needjobsoon.com/job/'.lower() in link.lower():
url = response.urljoin(link)
yield scrapy.Request(url, callback=self.parse_details)
next = response.xpath('//li[@class="pager-next"]/a/@href').extract_first()
next = response.urljoin(next)
print(next)
yield scrapy.Request(next, callback=self.parse)
else:
return
def parse_details(self, response):
if "timesjob" in response.url:
company_name = response.xpath("//*[@class=\"jd-company-name\"]//text()").extract_first()
elif "needjobsoon" in response.url:
company_name = re.findall(r"\bPosted By\b : (.)+", response.body)
company_name = ' '.join(company_name)
else:
company_name = 'NA'
number = ','.join(list(set(re.findall(r"\b[789]\d{2}[-.]?\d{3}[-.]?\d{4}\b", response.body))))
email = ','.join(list(set(re.findall(r"[\w\.]+@[\w\.]+\.\w+", response.body))))
if company_name is not None:
company_name.strip()
dynamodb_session = Session(aws_access_key_id='AKIAJT6AN3A5WZEZ74WA',
aws_secret_access_key='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',
region_name="us-east-1")
dynamodb = dynamodb_session.resource('dynamodb', region_name='us-east-1')
try:
try:
phone = number
if not (phone is None or len(phone) < 8):
print("Uploading phone no {}".format(number))
company_name = 'NA' if company_name is None else company_name
table = dynamodb.Table('phone_no_leads')
table.put_item(Item={'phone': phone, 'company': company_name})
except KeyError:
print("Suppressed error {}".format(traceback.format_exc()))
try:
if not(email is None or len(email) < 4):
print("Uploading email {}".format(email))
company_name = 'NA' if company_name is None else company_name
table = dynamodb.Table('email_leads')
table.put_item(Item={'email': email, 'company': company_name})
except KeyError:
print("Suppressed error {}".format(traceback.format_exc()))
except:
print("Suppressed error {}".format(traceback.format_exc()))
return {
'company_name': company_name,
'phone_no': number,
'email': email
}
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,273
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/Sector1.py
|
import json
import re
import scrapy
from crawler.items import SectorItem
BACK_OFFICE = [
# ============== Careesma ==============
"http://www.careesma.in/jobs?q=back+office&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=back+office+executive&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=back+office+assistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=data+entry&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=computer+operator&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=office+admin&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=receptionist&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=backofficejobs&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=backofficeexecutive&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=backofficeassistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=dataentry&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=computeroperator&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=officeadmin&lc=Mumbai&pg=1",
# ============== BabaJobs ==============
"http://www.babajob.com/Jobs-Receptionist-in-Mumbai-page-1",
# ============== ClickIndia ==============
"http://mumbai.clickindia.com/qu.php?xd=back+office&page=1",
"http://mumbai.clickindia.com/qu.php?xd=back+office+executive&page=1",
"http://mumbai.clickindia.com/qu.php?xd=back+office+assistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=data+entry&page=1",
"http://mumbai.clickindia.com/qu.php?xd=computer+operator&page=1",
"http://mumbai.clickindia.com/qu.php?xd=office+admin&page=1",
"http://mumbai.clickindia.com/qu.php?xd=receptionist&page=1",
"http://mumbai.clickindia.com/qu.php?xd=backoffice&page=1",
"http://mumbai.clickindia.com/qu.php?xd=backofficeexecutive&page=1",
"http://mumbai.clickindia.com/qu.php?xd=backofficeassistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=dataentry&page=1",
"http://mumbai.clickindia.com/qu.php?xd=computeroperator&page=1",
"http://mumbai.clickindia.com/qu.php?xd=officeadmin&page=1",
"http://mumbai.clickindia.com/qu.php?xd=receptionist&page=1",
# ============== Indeed ==============
"http://www.indeed.co.in/jobs?q=back+office&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=back+office+executive&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=back+office+assistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=data+entry&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=computer+operator&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=office+admin&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=receptionist&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=backofficejobs&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=backofficeexecutive&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=backofficeassistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=dataentry&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=computeroperator&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=officeadmin&l=Mumbai,+Maharashtra&start=0",
]
ACCOUNTS = [
# ============== Careesma ==============
"http://www.careesma.in/jobs?q=accounts&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=account&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accountant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accounts+assistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=account+assistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=book+keeping&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accounting&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=finance&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=finance+and+accounts&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accountsassistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accountassistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=bookkeeping&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accountingjobs+in+mumbai",
"http://www.careesma.in/jobs?q=financejobs+in+mumbai",
"http://www.careesma.in/jobs?q=financeandaccounts&lc=Mumbai&pg=1",
# ============== Baba jobs ==============
"http://www.babajob.com/Jobs-Accountant-in-Mumbai-page-1",
# ============== ClickIndia ==============
"http://mumbai.clickindia.com/qu.php?xd=accounts&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accountant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=account&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accounts+assistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=account+assistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=book+keeping&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accounting&page=1",
"http://mumbai.clickindia.com/qu.php?xd=finance&page=1",
"http://mumbai.clickindia.com/qu.php?xd=finance+and+accounts&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accountsassistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accountassistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=bookkeeping&page=1",
"http://mumbai.clickindia.com/qu.php?xd=financeandaccounts&page=1",
# ============== Indeed ==============
"http://www.indeed.co.in/jobs?q=accounts&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=account&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accountant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accounts+assistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=account+assistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=book+keeping&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accounting&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=finance&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=finance+and+accounts&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accountsassistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accountassistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=bookkeeping&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accountingjobs+in+mumbai",
"http://www.indeed.co.in/jobs?q=financejobs+in+mumbai",
"http://www.indeed.co.in/jobs?q=financeandaccounts&l=Mumbai,+Maharashtra&start=0",
]
OFFICE_BOY = [
# ============== Careesma ==============
"http://www.careesma.in/jobs?q=poen&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=office+boy&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=office+boy+assistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=helper&lc=Mumbai&pg=1"
"http://www.careesma.in/jobs?q=officeboy&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=officeboyassistant&lc=Mumbai&pg=1",
# ============== Babajob ==============
"http://www.babajob.com/Jobs-Helper-in-Mumbai-page-1",
# ============== Click India ==============
"http://mumbai.clickindia.com/qu.php?xd=poen&page=1",
"http://mumbai.clickindia.com/qu.php?xd=office+boy&page=1",
"http://mumbai.clickindia.com/qu.php?xd=office+assistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=helper&page=1",
"http://mumbai.clickindia.com/qu.php?xd=officeboy&page=1",
"http://mumbai.clickindia.com/qu.php?xd=officeassistant&page=1",
# ============== Indeed ==============
"http://www.indeed.co.in/jobs?q=poen&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=office+boy&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=office+boy+assistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=helper&l=Mumbai,+Maharashtra&start=0"
"http://www.indeed.co.in/jobs?q=officeboy&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=officeboyassistant&l=Mumbai,+Maharashtra&start=0",
]
class Sector1Spider(scrapy.Spider):
name = "sector1"
allowed_domains = ["careesma.in", "babajob.com", "indeed.co.in", "clickindia.com", "click.in"]
start_urls = []
def __init__(self, sector=None, **kwargs):
if sector is not None:
sectors = sector.split(',')
if "back_office" in sectors:
self.start_urls += BACK_OFFICE
elif "accounts" in sectors:
self.start_urls += ACCOUNTS
elif "office_boy" in sectors:
self.start_urls += OFFICE_BOY
else:
self.start_urls = BACK_OFFICE + ACCOUNTS + OFFICE_BOY
def parse(self, response):
if "careesma.in" in response.url:
return self.parse_careesma(response)
elif "babajob.com" in response.url:
return self.parse_baba(response)
elif "clickindia.com" in response.url:
return self.parse_clickindia(response)
elif "indeed.co.in" in response.url:
return self.parse_indeed(response)
else:
return
def parse_careesma(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
rx = response.xpath
for i in range(0, 50):
url = rx(".//*[@id='results-list']/li["+str(i)+"]/div[1]/div[1]/div/h2/a/@href").extract_first()
if url is not None:
url = "http://careesma.in"+url
date = rx(".//*[@id='results-list']/li["+str(i)+"]/div[1]/div[2]/span//text()").extract_first()
if date is not None:
date = date.encode('utf-8')
if date.strip().lower().find('yesterday') > -1:
req = scrapy.Request(url, callback=self.parse_careesma_details)
req.meta['position'] = position
yield req
# pagination
curret_page = response.url.split('pg=')[1]
next_page = int(curret_page) + 1
if next_page < 100:
next_url = response.url.split('pg=')[0] + "pg=" + str(next_page)
req = scrapy.Request(next_url, callback=self.parse_careesma)
req.meta['position'] = position
yield req
def parse_careesma_details(self, response):
rx = response.xpath
job = SectorItem()
job['title'] = rx(".//*[@id='job-head']/div[1]/h1//text()").extract()
job['date'] = 'yesterday'
job['description'] = self._join(rx(".//*[@id='page-content']/div/div[2]/div[1]/div[2]/p/text()").extract())
# index = job['description'].rfind('Location')
# job['location'] = job['description'][index:]
job['location'] = rx(".//*[@id='job-head']/div[1]/h2//text()").extract_first()
job['company_name'] = rx(".//*[@id='job-head']/div[1]/h3/a/span/text()").extract_first()
# contact_person_name = ''
# job['role'] = rx(".//*[@id='page-content']/div/div[2]/div[1]/div[5]/dl[2]/dd[4]/text()").extract_first()
job['position'] = response.meta.get('position', '')
job['portal'] = 'careesma'
yield job
def parse_baba(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
i = 0
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is not None:
for each in response.xpath('//div[@itemtype="http://schema.org/JobPosting"]'):
href = each.xpath('div/div/h2[@class="s-card-title"]/a/@href')
url = response.urljoin(href.extract_first())
posted_date_string = each.xpath('div[1]/div[2]/ul/li[2]/p[2]/text()').extract_first()
i += 1
if posted_date_string is not None and posted_date_string.rfind('hours') > -1:
_l_path = ".//*[@id='searchResults']/div[2]/div["+str(i)+"]/div/div[1]/div[1]/div/ul/li[1]/text()"
print(_l_path)
location = response.xpath(_l_path).extract_first()
if location is None:
location = ''
req = scrapy.Request(url, callback=self.parse_baba_details)
req.meta['url'] = url
req.meta['location'] = json.dumps(location)
yield req
url = response.url
page_count = url.split('page-')[1]
page_count = int(page_count)
next_page = str(page_count + 1)
nextUrl = url.split('page-')[0] + "page-" + next_page
paginate_req = scrapy.Request(nextUrl, callback=self.parse_baba)
paginate_req.meta['position'] = position
yield paginate_req
def parse_baba_details(self, response):
job = SectorItem()
rx = response.xpath
job['date'] = 'yesterday'
job['title'] = rx('//div[@class="row"]/div[@class="col-sm-12"]/h1/text()').extract_first()
job['location'] = response.meta.get('location', '')
job['description'] = rx('//div[div[@class="col-sm-2 job-label-text"]/img[@alt="Description"]]/div[@class="col-sm-10 job-info-text"]/text()').extract_first()
job['company_name'] = rx('//div[div[@class="col-sm-2 job-label-text"]/img[@alt="Employer picture"]]/div[@class="col-sm-10 job-info-text"]/text()').extract_first()
# job['contact_person_name'] =
# job['number'] = phone_no
job['portal'] = 'babajobs'
job['position'] = response.meta.get('position', '')
yield job
def parse_clickindia(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
rx = response.xpath
for i in range(1, 100):
url = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/@href").extract_first()
if url is not None:
title = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/div[2]/span[2]//text()").extract()
description = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/div[2]/div[3]/text()").extract()
phone = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/div[2]/span[1]/text()").extract_first()
date = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/div[2]/div[4]/span[1]/text()").extract_first()
if date is not None:
date = date.split('-')[1].strip()
req = scrapy.Request(url, callback=self.parse_clickindia_details)
req.meta['position'] = position
req.meta['title'] = json.dumps(title)
req.meta['description'] = json.dumps(description)
req.meta['phone'] = json.dumps(phone)
req.meta['date'] = json.dumps(date)
yield req
url = response.url
page_no = url.split('page=')[1]
next_page = int(page_no)+1
if next_page < 10:
next_url = url.split('page=')[0] + 'page=' + str(next_page)
req = scrapy.Request(next_url, callback=self.parse_clickindia)
req.meta['position'] = position
yield req
def parse_clickindia_details(self, response):
job = SectorItem()
rx = response.xpath
company_name = rx("//html/body/div[2]/div[2]/div[1]/div[1]/div[6]/span[2]//text()").extract()
desc = rx("//html/body/div[2]/div[2]/div[1]/div[2]//text()").extract()
location = rx("//html/body/div[2]/div[1]/h1/span/text()").extract()
contact_person_name = rx(".//*[@id='detail_reply_box']/div/div[1]/div[1]//text()").extract()
job['date'] = response.meta.get('date', '')
job['title'] = response.meta.get('title', '')
job['location'] = location
job['description'] = response.meta.get('description', '') + " " + json.dumps(desc)
job['company_name'] = company_name
job['contact_person_name'] = contact_person_name
job['number'] = response.meta.get('phone', '')
job['position'] = response.meta.get('position', '')
job['portal'] = 'clickindia'
yield job
def parse_clickin(self, response):
rx = response.xpath
url = rx(".//*[@class='clickin-listingpagePostsNoImage']/div[1]/div[1]/a")
date = rx(".//*[@class='clickin-listingpagePostsNoImage']/div[1]/div[4]/span//text()")
pass
def parse_clickin_details(self, response):
rx = response.xpath
title = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/header/h1//text()").extract()
role = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/div[4]/div/ul/li[1]/b//text()").extract()
location = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/div[4]/div/ul/li[4]/b//text()").extract()
desc = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/p//text()").extract()
company = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/div[4]/div/ul/li[7]/b//text()").extract()
number = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/div[5]/div[1]/div[1]//text()").extract()
pass
def parse_indeed(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
rx = response.xpath
if rx("//div[@itemtype='http://schema.org/JobPosting']").extract_first() is not None:
i = 0
for each in rx("//div[@itemtype='http://schema.org/JobPosting']"):
i += 1
title = self._join(each.xpath("h2//text()").extract())
url = self._join(each.xpath("h2/a/@href").extract())
company_name = self._join(each.xpath("span[1]/span/text()").extract())
location = self._join(each.xpath("span[2]/span/span/text()").extract())
date = self._join(each.xpath("table/*/*/*/*/span[@class='date']//text()").extract())
if date.rfind('1 day ago') > -1:
print("YAY!")
url = 'http://www.indeed.co.in' + url
req = scrapy.Request(url, callback=self.parse_indeed_details)
req.meta['title'] = title
req.meta['company_name'] = company_name
req.meta['location'] = location
req.meta['date'] = date
req.meta['position'] = position
yield req
if 'next' in response.xpath('.//div[@class="pagination"]/a//text()').extract()[-1].lower():
next = response.xpath('.//div[@class="pagination"]/a/@href').extract()[-1]
if 'indeed.co.in' not in next:
next = 'http://indeed.co.in' + next
print("FOUND NEXT!! {}".format(next))
paginate_req = scrapy.Request(next, callback=self.parse_indeed)
paginate_req.meta['position'] = position
yield paginate_req
def parse_indeed_details(self, response):
job = SectorItem()
job['date'] = response.meta.get('date', '')
job['title'] = response.meta.get('title', '')
job['location'] = response.meta.get('location', '')
job['description'] = self._join(response.xpath(".//*[@id='job_summary']//text()").extract())
job['company_name'] = response.meta.get('company_name', '')
# job['contact_person_name'] = response.meta.get('company_name', '')
job['number'] = ', '.join(re.findall(r'[0-9]{10}|[0-9]{8}', job['description']))
job['position'] = response.meta.get('position', '')
job['portal'] = 'indeed'
yield job
def _rstrip(self, l):
return [x.strip().replace("\r\n,", "") for x in l]
def _join(self, l, delimeter=' '):
return delimeter.join(self._rstrip(l)) # to remove \r\n characters
def _fetch(self, data, key, subkey=None):
if key in data.keys():
if subkey is not None and subkey in data[key].keys():
return data[key][subkey]
else:
return data[key]
else:
return 'NA'
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,274
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/CompaniesInMumbai.py
|
"""
This spider does a full text search for phone nos and email address in all the pages crawled.
Phone regex: \b[789]\d{2}[-.]?\d{3}[-.]?\d{4}\b
Email regex: [\w\.]+@[\w\.]+\.\w+
"""
import re
import requests
import traceback
from boto3.session import Session
from urlparse import urlparse, parse_qs, urljoin
import scrapy
AJAX_URLS = [
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Accounting/Consulting/%20Taxation&scrpt_name=accounting_taxation_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=IT-%20e-commerce/Internet&scrpt_name=e-commerce_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Airlines/Aviation&scrpt_name=airlines-aviation_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=IT-%20Hardware&scrpt_name=hardware_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Automobiles&scrpt_name=automobiles_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=IT-%20Systems/EDP/MIS&scrpt_name=edp_mis_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Call%20Centres&scrpt_name=call_centres_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Law%20/%20Legal%20Consultants&scrpt_name=law_legalconsultants_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Management%20/Engineering%20/Environ.%20Consultants&scrpt_name=managementconsultants_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Real%20Estate%20Agents&scrpt_name=realestateagents_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Mutual%20Fund/%20Stock%20Broking&scrpt_name=brokingfirms_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Engineering&scrpt_name=engineering_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Paints&scrpt_name=paints_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=FMCG&scrpt_name=fmcg_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Garment%20/%20Textiles/%20Accessories&scrpt_name=garment_textiles_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Power/Energy&scrpt_name=power_energy_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Hospitals/Healthcare&scrpt_name=hospitals_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Security&scrpt_name=security_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Institutes%20-%20Management&scrpt_name=management_institutes_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Sugar&scrpt_name=sugar_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Insurance&scrpt_name=insurance_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Iron%20and%20Steel&scrpt_name=iron-steel_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Agriculture/Dairy/Fertlizer&scrpt_name=agriculture_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=IT-%20ERP/CRM&scrpt_name=erp_crm_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Auto%20Ancillaries/%20Auto%20components&scrpt_name=auto-ancillaries_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=IT-%20QA/Testing&scrpt_name=IT_qa_testing_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=BPO%20/%20KPO&scrpt_name=bpo-kpo_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=IT-Software%20Services&scrpt_name=software_services_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Cement/Marble/%20Ceramics/Stones&scrpt_name=cementmarble-ceramicsstones.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Construction%20/%20Real%20%20Estate&scrpt_name=realestate_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Media%20/%20Entertainment&scrpt_name=media_entertainment_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Electrical/Electronics&scrpt_name=electrical_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Office%20Automation&scrpt_name=officeautomation_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Financial%20Services&scrpt_name=financialservices_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Petrochemicals/%20Oil/%20Gas/%20Refineries&scrpt_name=petrochemicals_companies_mumbai.phphttp://",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Placement%20/%20HR%20/%20Training%20Consultants&scrpt_name=placement_consultants_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Govt/Defence/Embassies&scrpt_name=defence_embassies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Rubber/Plastic/%20Glass/%20Wood&scrpt_name=rubber_plastic_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Institutes%20-%20Engineering&scrpt_name=engineering_institute_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Sports/Toys&scrpt_name=sports_toys_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Institutes%20-%20Schools/Colleges&scrpt_name=school-colleges_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Metals/Mining&scrpt_name=metals_mining_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Advertising/Event%20Mgmt/%20PR/MR&scrpt_name=advertising_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=IT-%20Embedded/EDA/VLSI&scrpt_name=IT-embedded_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Architectural%20Services/%20Interior%20Designing&scrpt_name=architect-interior_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=IT-%20Network%20Admin./Security&scrpt_name=networkadmin_security_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Banks&scrpt_name=banking_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=IT-%20Telecom%20/Mobile&scrpt_name=telecom_mobile_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Capital%20Goods%20/Machine%20Manufacturing&scrpt_name=capital_goods_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Leather/%20shoes/%20Accessories&scrpt_name=leathershoes_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Chemical&scrpt_name=chemicals_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Travel%20/%20Tourism&scrpt_name=travel_tourism_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Courier/%20Logistics/%20Packaging/%20Transportation&scrpt_name=logistics_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=NGOs/World%20Bodies/%20Associations&scrpt_name=ngos_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Export%20Houses&scrpt_name=export-import_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Paper/Publishing/%20Printing/%20Stationary&scrpt_name=paper_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Food%20Processing/%20Beverages&scrpt_name=foodprocessing_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Pharmaceuticals/%20BioTech/%20Research&scrpt_name=pharmaceutical_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Gems%20/Jewelleries/Watches&scrpt_name=gems_jewelleries_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Retail&scrpt_name=retail_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Hotels%20/%20Resorts&scrpt_name=hotels_resorts_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Shipping%20/%20Marine&scrpt_name=shipping_marrine_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Institutes%20-%20Others/%20Universities&scrpt_name=institutes-universities_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Telecommunication/%20Mobile&scrpt_name=telecommunication_companies_mumbai.php",
"http://www.companiesinmumbai.com/ajax.php?gofor=show_listing&tot_page=80000&page=1&category=Consumer%20Goods%20-%20Durables/Home%20Appliances&scrpt_name=consumergoods_mumbai.php",
]
class LeadsItem(scrapy.Item):
number = scrapy.Field()
email = scrapy.Field()
company_name = scrapy.Field()
class Sector1Spider(scrapy.Spider):
name = "companiesinmumbai"
allowed_domains = ["companiesinmumbai.com"]
start_urls = []
def __init__(self, **kwargs):
href_regex = r"href=\"(.*?)\""
base_url = "http://www.companiesinmumbai.com/"
for url in AJAX_URLS:
r = requests.get(url)
scrapy_urls = re.findall(href_regex, r.text)
scrapy_urls = list(set(scrapy_urls))
[self.start_urls.append(urljoin(base_url, _url)) for _url in scrapy_urls]
def parse(self, response):
company_name = parse_qs(urlparse(response.url).query).get('id', None)
number = ','.join(list(set(re.findall(r"\b[789]\d{2}[-.]?\d{3}[-.]?\d{4}\b", str(response.body)))))
email = ','.join(list(set(re.findall(r"[\w\.]+@[\w\.]+\.\w+", str(response.body)))))
if company_name is not None:
company_name = company_name[0].strip()
dynamodb_session = Session(aws_access_key_id='AKIAJT6AN3A5WZEZ74WA',
aws_secret_access_key='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',
region_name="us-east-1")
dynamodb = dynamodb_session.resource('dynamodb', region_name='us-east-1')
try:
try:
phone = number
if not (phone is None or len(phone) < 8):
print("Uploading phone no {}".format(number))
company_name = 'NA' if company_name is None else company_name
table = dynamodb.Table('phone_no_leads')
table.put_item(Item={'phone': phone, 'company': company_name, 'source': 'companiesinmumbai'})
except KeyError:
print("Suppressed error {}".format(traceback.format_exc()))
try:
if not (email is None or len(email) < 4):
print("Uploading email {}".format(email))
company_name = 'NA' if company_name is None else company_name
table = dynamodb.Table('email_leads')
table.put_item(Item={'email': email, 'company': company_name, 'source': 'companiesinmumbai'})
except KeyError:
print("Suppressed error {}".format(traceback.format_exc()))
except:
print("Suppressed error {}".format(traceback.format_exc()))
data = {
'company_name': company_name,
'number': number,
'email': email
}
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,275
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/ExhaustiveSpider.py
|
import json
import re
from datetime import datetime, timedelta
import scrapy
import requests
from crawler.items import SectorItem
BACK_OFFICE = [
# ============== Naukri ==============
"http://www.naukri.com/back-office-jobs-in-mumbai",
"http://www.naukri.com/back-office-executive-jobs-in-mumbai",
"http://www.naukri.com/back-office-assistant-jobs-in-mumbai",
"http://www.naukri.com/data-entry-jobs-in-mumbai",
"http://www.naukri.com/computer-operator-jobs-in-mumbai",
"http://www.naukri.com/office-admin-jobs-in-mumbai",
"http://www.naukri.com/receptionist-jobs-in-mumbaibackofficejobs-in-mumbai",
"http://www.naukri.com/backofficeexecutive-jobs-in-mumbai",
"http://www.naukri.com/backofficeassistant-jobs-in-mumbai",
"http://www.naukri.com/dataentry-jobs-in-mumbai",
"http://www.naukri.com/computeroperator-jobs-in-mumbai",
"http://www.naukri.com/officeadmin-jobs-in-mumbai",
# ============== Olx ==============
"https://www.olx.in/mumbai/jobs/q-back-office/",
"https://www.olx.in/mumbai/jobs/q-back-office-executive/",
"https://www.olx.in/mumbai/jobs/q-back-office-assistant/",
"https://www.olx.in/mumbai/jobs/q-data-entry/",
"https://www.olx.in/mumbai/jobs/q-computer-operator/",
"https://www.olx.in/mumbai/jobs/q-office-admin/",
"https://www.olx.in/mumbai/jobs/q-receptionist/",
"https://www.olx.in/mumbai/jobs/q-backoffice/",
"https://www.olx.in/mumbai/jobs/q-backofficeexecutive/",
"https://www.olx.in/mumbai/jobs/q-backofficeassistant/",
"https://www.olx.in/mumbai/jobs/q-dataentry/",
"https://www.olx.in/mumbai/jobs/q-computeroperator/",
"https://www.olx.in/mumbai/jobs/q-officeadmin/",
# ============== Shine ==============
"http://www.shine.com/job-search/simple/back-office/mumbai/",
"http://www.shine.com/job-search/simple/back-office-executive/mumbai/",
"http://www.shine.com/job-search/simple/back-office-assistant/mumbai/",
"http://www.shine.com/job-search/simple/data-entry/mumbai/",
"http://www.shine.com/job-search/simple/computer-operator/mumbai/",
"http://www.shine.com/job-search/simple/office-admin/mumbai/",
"http://www.shine.com/job-search/simple/receptionist/mumbai/",
# ============== Careesma ==============
"http://www.careesma.in/jobs?q=back+office&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=back+office+executive&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=back+office+assistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=data+entry&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=computer+operator&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=office+admin&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=receptionist&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=backofficejobs&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=backofficeexecutive&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=backofficeassistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=dataentry&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=computeroperator&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=officeadmin&lc=Mumbai&pg=1",
# ============== BabaJobs ==============
"http://www.babajob.com/Jobs-Receptionist-in-Mumbai-page-1",
# ============== ClickIndia ==============
"http://mumbai.clickindia.com/qu.php?xd=back+office&page=1",
"http://mumbai.clickindia.com/qu.php?xd=back+office+executive&page=1",
"http://mumbai.clickindia.com/qu.php?xd=back+office+assistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=data+entry&page=1",
"http://mumbai.clickindia.com/qu.php?xd=computer+operator&page=1",
"http://mumbai.clickindia.com/qu.php?xd=office+admin&page=1",
"http://mumbai.clickindia.com/qu.php?xd=receptionist&page=1",
"http://mumbai.clickindia.com/qu.php?xd=backoffice&page=1",
"http://mumbai.clickindia.com/qu.php?xd=backofficeexecutive&page=1",
"http://mumbai.clickindia.com/qu.php?xd=backofficeassistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=dataentry&page=1",
"http://mumbai.clickindia.com/qu.php?xd=computeroperator&page=1",
"http://mumbai.clickindia.com/qu.php?xd=officeadmin&page=1",
"http://mumbai.clickindia.com/qu.php?xd=receptionist&page=1",
# ============== Indeed ==============
"http://www.indeed.co.in/jobs?q=back+office&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=back+office+executive&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=back+office+assistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=data+entry&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=computer+operator&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=office+admin&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=receptionist&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=backofficejobs&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=backofficeexecutive&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=backofficeassistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=dataentry&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=computeroperator&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=officeadmin&l=Mumbai,+Maharashtra&start=0",
]
ACCOUNTS = [
# ============== Naukri ==============
"http://www.naukri.com/accounts-jobs-in-mumbai",
"http://www.naukri.com/account-jobs-in-mumbai",
"http://www.naukri.com/accountant-jobs-in-mumbai",
"http://www.naukri.com/accounts-assistant-jobs-in-mumbai",
"http://www.naukri.com/account-assistant-jobs-in-mumbai",
"http://www.naukri.com/book-keeping-jobs-in-mumbai",
"http://www.naukri.com/accounting-jobs-in-mumbai",
"http://www.naukri.com/finance-jobs-in-mumbai",
"http://www.naukri.com/finance-and-accounts-jobs-in-mumbai",
"http://www.naukri.com/accountsassistant-jobs-in-mumbai",
"http://www.naukri.com/accountassistant-jobs-in-mumbai",
"http://www.naukri.com/bookkeeping-jobs-in-mumbai",
"http://www.naukri.com/accountingjobs-in-mumbai",
"http://www.naukri.com/financejobs-in-mumbai",
"http://www.naukri.com/financeandaccounts-jobs-in-mumbai",
# ============== Olx ==============
"https://www.olx.in/mumbai/jobs/q-accounts/",
"https://www.olx.in/mumbai/jobs/q-accountant/",
"https://www.olx.in/mumbai/jobs/q-account/",
"https://www.olx.in/mumbai/jobs/q-accounts-assistant/",
"https://www.olx.in/mumbai/jobs/q-account-assistant/",
"https://www.olx.in/mumbai/jobs/q-book-keeping/",
"https://www.olx.in/mumbai/jobs/q-accounting/",
"https://www.olx.in/mumbai/jobs/q-finance/",
"https://www.olx.in/mumbai/jobs/q-finance-and-accounts/",
"https://www.olx.in/mumbai/jobs/q-accountsassistant/",
"https://www.olx.in/mumbai/jobs/q-accountassistant/",
"https://www.olx.in/mumbai/jobs/q-bookkeeping/",
"https://www.olx.in/mumbai/jobs/q-financeandaccounts/",
# ============== Shine ==============
"http://www.shine.com/job-search/simple/accounts/mumbai/",
"http://www.shine.com/job-search/simple/accountant/mumbai/",
"http://www.shine.com/job-search/simple/account/mumbai/",
"http://www.shine.com/job-search/simple/accounts-assistant/mumbai/",
"http://www.shine.com/job-search/simple/account-assistant/mumbai/",
"http://www.shine.com/job-search/simple/book-keeping/mumbai/",
"http://www.shine.com/job-search/simple/accounting/mumbai/",
"http://www.shine.com/job-search/simple/finance/mumbai/",
"http://www.shine.com/job-search/simple/finance-and-accounts/mumbai/",
"http://www.shine.com/job-search/simple/accountsassistant/mumbai/",
"http://www.shine.com/job-search/simple/accountassistant/mumbai/",
"http://www.shine.com/job-search/simple/bookkeeping/mumbai/",
"http://www.shine.com/job-search/simple/financeandaccounts/mumbai/",
# ============== Careesma ==============
"http://www.careesma.in/jobs?q=accounts&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=account&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accountant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accounts+assistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=account+assistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=book+keeping&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accounting&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=finance&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=finance+and+accounts&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accountsassistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accountassistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=bookkeeping&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=accountingjobs+in+mumbai",
"http://www.careesma.in/jobs?q=financejobs+in+mumbai",
"http://www.careesma.in/jobs?q=financeandaccounts&lc=Mumbai&pg=1",
# ============== Baba jobs ==============
"http://www.babajob.com/Jobs-Accountant-in-Mumbai-page-1",
# ============== ClickIndia ==============
"http://mumbai.clickindia.com/qu.php?xd=accounts&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accountant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=account&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accounts+assistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=account+assistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=book+keeping&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accounting&page=1",
"http://mumbai.clickindia.com/qu.php?xd=finance&page=1",
"http://mumbai.clickindia.com/qu.php?xd=finance+and+accounts&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accountsassistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=accountassistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=bookkeeping&page=1",
"http://mumbai.clickindia.com/qu.php?xd=financeandaccounts&page=1",
# ============== Indeed ==============
"http://www.indeed.co.in/jobs?q=accounts&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=account&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accountant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accounts+assistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=account+assistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=book+keeping&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accounting&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=finance&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=finance+and+accounts&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accountsassistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accountassistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=bookkeeping&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=accountingjobs+in+mumbai",
"http://www.indeed.co.in/jobs?q=financejobs+in+mumbai",
"http://www.indeed.co.in/jobs?q=financeandaccounts&l=Mumbai,+Maharashtra&start=0",
]
OFFICE_BOY = [
# ============== Naukri ==============
"http://www.naukri.com/poen-jobs-in-mumbai",
"http://www.naukri.com/office-boy-jobs-in-mumbai",
"http://www.naukri.com/office-boy-assistant-jobs-in-mumbai",
"http://www.naukri.com/helper-jobs-in-mumbai"
"http://www.naukri.com/officeboy-jobs-in-mumbai",
"http://www.naukri.com/officeboyassistant-jobs-in-mumbai",
# ============== Olx ==============
"https://www.olx.in/mumbai/jobs/q-poen/",
"https://www.olx.in/mumbai/jobs/q-office-boy/",
"https://www.olx.in/mumbai/jobs/q-office-assistant/",
"https://www.olx.in/mumbai/jobs/q-helper/",
"https://www.olx.in/mumbai/jobs/q-officeboy/",
"https://www.olx.in/mumbai/jobs/q-officeassistant/",
# ============== Shine ==============
"http://www.shine.com/job-search/simple/poen/mumbai/",
"http://www.shine.com/job-search/simple/office-boy/mumbai/",
"http://www.shine.com/job-search/simple/office-assistant/mumbai/",
"http://www.shine.com/job-search/simple/helper/mumbai/",
"http://www.shine.com/job-search/simple/officeboy/mumbai/",
"http://www.shine.com/job-search/simple/officeassistant/mumbai/",
# ============== Careesma ==============
"http://www.careesma.in/jobs?q=poen&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=office+boy&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=office+boy+assistant&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=helper&lc=Mumbai&pg=1"
"http://www.careesma.in/jobs?q=officeboy&lc=Mumbai&pg=1",
"http://www.careesma.in/jobs?q=officeboyassistant&lc=Mumbai&pg=1",
# ============== Babajob ==============
"http://www.babajob.com/Jobs-Helper-in-Mumbai-page-1",
# ============== Click India ==============
"http://mumbai.clickindia.com/qu.php?xd=poen&page=1",
"http://mumbai.clickindia.com/qu.php?xd=office+boy&page=1",
"http://mumbai.clickindia.com/qu.php?xd=office+assistant&page=1",
"http://mumbai.clickindia.com/qu.php?xd=helper&page=1",
"http://mumbai.clickindia.com/qu.php?xd=officeboy&page=1",
"http://mumbai.clickindia.com/qu.php?xd=officeassistant&page=1",
# ============== Indeed ==============
"http://www.indeed.co.in/jobs?q=poen&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=office+boy&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=office+boy+assistant&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=helper&l=Mumbai,+Maharashtra&start=0"
"http://www.indeed.co.in/jobs?q=officeboy&l=Mumbai,+Maharashtra&start=0",
"http://www.indeed.co.in/jobs?q=officeboyassistant&l=Mumbai,+Maharashtra&start=0",
]
class ExhaustiveSpider(scrapy.Spider):
name = "exhaustive"
allowed_domains = ["naukri.com", "shine.com", "olx.in", "careesma.in", "babajob.com", "indeed.co.in", "clickindia.com", "click.in"]
start_urls = []
def __init__(self, sector=None, **kwargs):
if sector is not None:
sectors = sector.split(',')
if "back_office" in sectors:
self.start_urls += BACK_OFFICE
elif "accounts" in sectors:
self.start_urls += ACCOUNTS
elif "office_boy" in sectors:
self.start_urls += OFFICE_BOY
else:
self.start_urls = BACK_OFFICE + ACCOUNTS + OFFICE_BOY
def parse(self, response):
if "naukri.com" in response.url:
return self.parse_naukri(response)
elif "shine.com" in response.url:
return self.parse_shine(response)
elif "olx.in" in response.url:
return self.parse_olx(response)
elif "careesma.in" in response.url:
return self.parse_careesma(response)
elif "babajob.com" in response.url:
return self.parse_baba(response)
elif "clickindia.com" in response.url:
return self.parse_clickindia(response)
elif "indeed.co.in" in response.url:
return self.parse_indeed(response)
else:
return
def parse_shine(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
rx = response.xpath
for i in range(0, 50):
# print(i)
url = rx('.//*[@itemtype="http://schema.org/JobPosting"]['+str(i)+']/div[2]/a/@href').extract_first()
url = 'http://shine.com'+url
req = scrapy.Request(url, callback=self.parse_shine_details)
req.meta['position'] = position
req.meta['url'] = url
yield req
nextUrl = ""
flag = False
try:
# if int(response.meta['total_items_iterated']) <= int(response.meta['total_count']):
url = response.url
page_count = re.findall(r'\d+', url)[0]
page_count = int(page_count)
print("current page count {}".format(page_count))
next_page = page_count + 1
if next_page > 100:
return # adding this, because the total-items-iterated logic fails miserably
nextUrl = re.sub(r'\d+', str(next_page), url)
print("Next url {}".format(nextUrl))
flag = True
# total_items_iterated = int(response.meta['total_items_iterated']) + count_per_page
except:
#first page
flag = True
nextUrl = response.url + str(2) + "/"
# total_items_iterated = count_per_page
finally:
if not flag:
return
else:
paginate_req = scrapy.Request(nextUrl, callback=self.parse_shine)
paginate_req.meta['position'] = position
# paginate_req.meta['total_count'] = total_count
# paginate_req.meta['total_items_iterated'] = total_items_iterated
yield paginate_req
def parse_shine_details(self, response):
# url = response.meta['url'].split('?')[0]
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is None:
return
job = SectorItem()
job_posting = response.xpath('//div[@itemtype="http://schema.org/JobPosting"]')[0]
job['title'] = job_posting.xpath('//h1[@itemprop="title"]/text()').extract_first()
job['date'] = job_posting.xpath('//span[@itemprop="datePosted"]/text()').extract_first()
# old location xpath: '//a[@class="normaljdsnippet jd_location curText cls_jd_primary_location"]//text()'
job['location'] = self._join(job_posting.xpath(
'//span[@itemtype="http://schema.org/Place"]/*//text()'
).extract(), delimeter=',')
job['description'] = job_posting.xpath('//span[@itemprop="description"]//text()').extract()
job['company_name'] = job_posting.xpath('//span[@itemprop="name"]/h2/text()').extract_first()
job['contact_person_name'] = ''
recruiter_details = job_posting.xpath('//div[@class="ropen cls_rect_detail_div"]/ul//text()').extract()
for i in recruiter_details:
try:
phone = int(i.encode('utf-8').strip())
job['number'] = phone
except ValueError:
pass
try:
if re.match('Email\s*(.*)', i):
job['email'] = recruiter_details[recruiter_details.index(i)+1]
except:
pass
job['url'] = response.url
job['role'] = self._join(job_posting.xpath('//span[@itemprop="skills"]/a/text()').extract(), delimeter=',')
job['position'] = response.meta.get('position', '')
job['portal'] = 'shine'
yield job
def parse_olx(self, response):
print('in parse')
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
print(response.url)
print("POSITION {}".format(position))
# promoted
for i in range(0, 50):
tbody = response.xpath(".//*[@id='promotedAd']/tbody")
href = tbody.xpath("tr["+str(i)+"]/td/table/tbody/tr[1]/td[2]/h3/a/@href").extract()
if len(href) > 0:
print(href)
href = self._rstrip(href)[0]
req = scrapy.Request(href, callback=self.parse_olx_details)
req.meta['url'] = href
req.meta['premium'] = True
req.meta['position'] = position
yield req
# normal
for i in range(0, 100):
tbody = response.xpath(".//*[@id='offers_table']/tbody")
href = tbody.xpath("tr["+str(i)+"]/td/table/tbody/tr[1]/td[2]/h3/a/@href").extract()
if len(href) > 0:
href = self._rstrip(href)[0]
req = scrapy.Request(href, callback=self.parse_olx_details)
req.meta['url'] = href
req.meta['position'] = position
yield req
base_url = response.url.split('?')[0]
try:
query_params = response.url.split('?')[1]
current_page = query_params.split('page=')[1]
next = int(current_page) + 1
if str(current_page) == str(response.meta.get('previous_page_number', '')):
return
except IndexError:
# first page
current_page = 1
next = 2
finally:
next_page = base_url + "?page=" + str(next)
req = scrapy.Request(next_page, callback=self.parse_olx)
req.meta['previous_page_number'] = current_page
req.meta['position'] = position
yield req
def parse_olx_details(self, response):
job_title = response.xpath(".//*[@id='offer_active']/div[4]/div[1]/div[1]/div[1]/h1/text()").extract_first()
job_title = self._rstrip([job_title])
salary = response.xpath(".//*[@id='offeractions']/div/div[1]/div[1]/strong/span/text()").extract()
salary = self._join(salary).encode('utf-8')
name = response.xpath(".//*[@id='offeractions']/div/div[1]/div[2]/div/p/span[1]/text()").extract_first()
if name is not None:
name = name.encode('utf-8')
phone_no = response.xpath(".//*[@id='contact_methods']/li[3]/div[2]/strong[1]/text()").extract_first()
if phone_no is not None:
phone_no = phone_no.encode('utf-8')
jd = response.xpath(".//*[@id='textContent']/p/text()").extract()
job_desc = self._join(jd)
location = response.xpath(".//*[@id='offer_active']/div[4]/div[1]/div[1]/div[1]/p[1]/span/span[2]/strong/text()").extract()
location = self._join(location)
job = SectorItem()
job['url'] = response.url
job['date'] = 'yesterday'
job['title'] = job_title
job['location'] = location
job['description'] = job_desc
job['company_name'] = ''
job['contact_person_name'] = name
job['number'] = phone_no
job['portal'] = 'olx'
job['position'] = response.meta.get('position', '')
yield job
def parse_naukri(self, response):
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]/a/@href').extract_first() is not None:
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
for each in response.xpath('//div[@itemtype="http://schema.org/JobPosting"]'):
href = each.xpath('a/@href').extract_first()
# posted_date_string = each.xpath('div[@class="other_details"]'
# '/div[@class="rec_details"]/span[@class="date"]/text()').extract_first()
url = response.urljoin(href)
req = scrapy.Request(url, callback=self.parse_naukri_details)
req.meta['url'] = url
req.meta['position'] = position
yield req
if response.xpath('//div[@class="pagination"]/a/@href').extract_first() is not None:
next = response.xpath('//div[@class="pagination"]/a/@href').extract()
if len(next) > 1:
next = response.urljoin(next[1])
else:
next = response.urljoin(next[0])
paginate_req = scrapy.Request(next, callback=self.parse_naukri)
next_paginate_number = int(str(next[str(next).rfind('-')+1:]))
try:
current_paginate_number = int(response.meta['current_paginate_number'])
except KeyError:
# current page is first page of pagination
current_paginate_number = 1
finally:
if current_paginate_number > next_paginate_number:
return
paginate_req.meta['current_paginate_number'] = next_paginate_number
paginate_req.meta['position'] = position
yield paginate_req
def parse_naukri_details(self, response):
# extract job-id
url = response.meta['url'].split('?')[0]
index = url.rfind('-')
jid = url[index+1:]
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is None:
return
job = SectorItem()
job_posting = response.xpath('//div[@itemtype="http://schema.org/JobPosting"]')[0]
job['url'] = response.url
job['title'] = self._join(job_posting.xpath('//h1[@itemprop="title"]/text()').extract())
job['company_name'] = self._join(job_posting.xpath('//a[@itemprop="hiringOrganization"]/text()').extract())
location = job_posting.xpath('//em[@itemprop="jobLocation"]')
if location is not None:
job['location'] = self._join(location[0].xpath('//div[@itemprop="name"]/a/text()').extract(), delimeter=', ')
job['description'] = self._join(job_posting.xpath('//ul[@itemprop="description"]//text()').extract())
job['role'] = ''
for role in job_posting.xpath('//p/span[@itemprop="occupationalCategory"]'):
if role.xpath('a/text()').extract_first() is not None:
job['role'] += self._join(role.xpath('a/text()').extract(), delimeter=', ')
else:
job['role'] += ', ' + self._join(role.xpath('text()').extract(), delimeter=', ')
# parsing for contact details
contact_url = 'http://www.naukri.com/jd/contactDetails?file=' + str(jid)
r = requests.get(contact_url)
if r.status_code == 200:
contact = r.json()['fields']
contact = {k.lower(): v for k, v in contact.items()} # convert all keys to lowercase
try:
if 'email address' in contact.keys() and 'src' in contact['email address'].keys():
contact['email address'].pop('src', 'no src found')
job['email'] = self._fetch(contact, 'email address', 'title')
except:
pass
job['number'] = self._fetch(contact, 'telephone')
# job['email'] = self._fetch(contact, 'email address', 'title')
job['contact_person_name'] = self._fetch(contact, 'recruiter name')
job['date'] = job_posting.xpath('//div[@class="sumFoot"]//text()').re('Posted\s*(.*)')
job['portal'] = 'naukri'
job['position'] = response.meta.get('position', '')
yield job
def parse_careesma(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
rx = response.xpath
for i in range(0, 50):
url = rx(".//*[@id='results-list']/li["+str(i)+"]/div[1]/div[1]/div/h2/a/@href").extract_first()
if url is not None:
url = "http://careesma.in"+url
# date = rx(".//*[@id='results-list']/li["+str(i)+"]/div[1]/div[2]/span//text()").extract_first()
# if date is not None:
# date = date.encode('utf-8')
# if date.strip().lower().find('yesterday') > -1:
req = scrapy.Request(url, callback=self.parse_careesma_details)
req.meta['position'] = position
yield req
# pagination
curret_page = response.url.split('pg=')[1]
next_page = int(curret_page) + 1
if next_page < 100:
next_url = response.url.split('pg=')[0] + "pg=" + str(next_page)
req = scrapy.Request(next_url, callback=self.parse_careesma)
req.meta['position'] = position
yield req
def parse_careesma_details(self, response):
rx = response.xpath
job = SectorItem()
job['title'] = rx(".//*[@id='job-head']/div[1]/h1//text()").extract()
job['date'] = 'yesterday'
job['description'] = self._join(rx(".//*[@id='page-content']/div/div[2]/div[1]/div[2]/p/text()").extract())
# index = job['description'].rfind('Location')
# job['location'] = job['description'][index:]
job['location'] = rx(".//*[@id='job-head']/div[1]/h2//text()").extract_first()
job['company_name'] = rx(".//*[@id='job-head']/div[1]/h3/a/span/text()").extract_first()
# contact_person_name = ''
# job['role'] = rx(".//*[@id='page-content']/div/div[2]/div[1]/div[5]/dl[2]/dd[4]/text()").extract_first()
job['position'] = response.meta.get('position', '')
job['portal'] = 'careesma'
yield job
def parse_baba(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
i = 0
if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is not None:
for each in response.xpath('//div[@itemtype="http://schema.org/JobPosting"]'):
href = each.xpath('div/div/h2[@class="s-card-title"]/a/@href')
url = response.urljoin(href.extract_first())
# posted_date_string = each.xpath('div[1]/div[2]/ul/li[2]/p[2]/text()').extract_first()
i += 1
# if posted_date_string is not None and posted_date_string.rfind('hours') > -1:
_l_path = ".//*[@id='searchResults']/div[2]/div["+str(i)+"]/div/div[1]/div[1]/div/ul/li[1]/text()"
print(_l_path)
location = response.xpath(_l_path).extract_first()
if location is None:
location = ''
req = scrapy.Request(url, callback=self.parse_baba_details)
req.meta['url'] = url
req.meta['location'] = json.dumps(location)
yield req
url = response.url
page_count = url.split('page-')[1]
page_count = int(page_count)
next_page = str(page_count + 1)
nextUrl = url.split('page-')[0] + "page-" + next_page
paginate_req = scrapy.Request(nextUrl, callback=self.parse_baba)
paginate_req.meta['position'] = position
yield paginate_req
def parse_baba_details(self, response):
job = SectorItem()
rx = response.xpath
job['date'] = 'yesterday'
job['title'] = rx('//div[@class="row"]/div[@class="col-sm-12"]/h1/text()').extract_first()
job['location'] = response.meta.get('location', '')
job['description'] = rx('//div[div[@class="col-sm-2 job-label-text"]/img[@alt="Description"]]/div[@class="col-sm-10 job-info-text"]/text()').extract_first()
job['company_name'] = rx('//div[div[@class="col-sm-2 job-label-text"]/img[@alt="Employer picture"]]/div[@class="col-sm-10 job-info-text"]/text()').extract_first()
# job['contact_person_name'] =
# job['number'] = phone_no
job['portal'] = 'babajobs'
job['position'] = response.meta.get('position', '')
yield job
def parse_clickindia(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
rx = response.xpath
for i in range(1, 100):
url = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/@href").extract_first()
if url is not None:
title = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/div[2]/span[2]//text()").extract()
description = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/div[2]/div[3]/text()").extract()
phone = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/div[2]/span[1]/text()").extract_first()
# date = rx(".//*[@id='updates']/div["+str(i)+"]/a[1]/div[2]/div[4]/span[1]/text()").extract_first()
# if date is not None:
# date = date.split('-')[1].strip()
req = scrapy.Request(url, callback=self.parse_clickindia_details)
req.meta['position'] = position
req.meta['title'] = json.dumps(title)
req.meta['description'] = json.dumps(description)
req.meta['phone'] = json.dumps(phone)
# req.meta['date'] = json.dumps(date)
yield req
url = response.url
page_no = url.split('page=')[1]
next_page = int(page_no)+1
if next_page < 10:
next_url = url.split('page=')[0] + 'page=' + str(next_page)
req = scrapy.Request(next_url, callback=self.parse_clickindia)
req.meta['position'] = position
yield req
def parse_clickindia_details(self, response):
job = SectorItem()
rx = response.xpath
company_name = rx("//html/body/div[2]/div[2]/div[1]/div[1]/div[6]/span[2]//text()").extract()
desc = rx("//html/body/div[2]/div[2]/div[1]/div[2]//text()").extract()
location = rx("//html/body/div[2]/div[1]/h1/span/text()").extract()
contact_person_name = rx(".//*[@id='detail_reply_box']/div/div[1]/div[1]//text()").extract()
# job['date'] = response.meta.get('date', '')
job['title'] = response.meta.get('title', '')
job['location'] = location
job['description'] = response.meta.get('description', '') + " " + json.dumps(desc)
job['company_name'] = company_name
job['contact_person_name'] = contact_person_name
job['number'] = response.meta.get('phone', '')
job['position'] = response.meta.get('position', '')
job['portal'] = 'clickindia'
yield job
def parse_clickin(self, response):
rx = response.xpath
url = rx(".//*[@class='clickin-listingpagePostsNoImage']/div[1]/div[1]/a")
date = rx(".//*[@class='clickin-listingpagePostsNoImage']/div[1]/div[4]/span//text()")
pass
def parse_clickin_details(self, response):
rx = response.xpath
title = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/header/h1//text()").extract()
role = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/div[4]/div/ul/li[1]/b//text()").extract()
location = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/div[4]/div/ul/li[4]/b//text()").extract()
desc = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/p//text()").extract()
company = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/div[4]/div/ul/li[7]/b//text()").extract()
number = rx(".//*[@id='clickin-leftPanelInner-top']/div[3]/div[1]/div[1]/div[5]/div[1]/div[1]//text()").extract()
pass
def parse_indeed(self, response):
if response.url in BACK_OFFICE:
position = 'back_office'
elif response.url in ACCOUNTS:
position = 'accounts'
elif response.url in OFFICE_BOY:
position = 'office_boy'
else:
position = response.meta.get('position', '')
rx = response.xpath
if rx("//div[@itemtype='http://schema.org/JobPosting']").extract_first() is not None:
i = 0
for each in rx("//div[@itemtype='http://schema.org/JobPosting']"):
i += 1
title = self._join(each.xpath("h2//text()").extract())
url = self._join(each.xpath("h2/a/@href").extract())
company_name = self._join(each.xpath("span[1]/span/text()").extract())
location = self._join(each.xpath("span[2]/span/span/text()").extract())
date = self._join(each.xpath("table/*/*/*/*/span[@class='date']//text()").extract())
# if date.rfind('1 day ago') > -1:
url = 'http://www.indeed.co.in' + url
req = scrapy.Request(url, callback=self.parse_indeed_details)
req.meta['title'] = title
req.meta['company_name'] = company_name
req.meta['location'] = location
# req.meta['date'] = date
req.meta['position'] = position
yield req
if 'next' in response.xpath('.//div[@class="pagination"]/a//text()').extract()[-1].lower():
next = response.xpath('.//div[@class="pagination"]/a/@href').extract()[-1]
if 'indeed.co.in' not in next:
next = 'http://indeed.co.in' + next
print("FOUND NEXT!! {}".format(next))
paginate_req = scrapy.Request(next, callback=self.parse_indeed)
paginate_req.meta['position'] = position
yield paginate_req
def parse_indeed_details(self, response):
job = SectorItem()
job['date'] = response.meta.get('date', '')
job['title'] = response.meta.get('title', '')
job['location'] = response.meta.get('location', '')
job['description'] = self._join(response.xpath(".//*[@id='job_summary']//text()").extract())
job['company_name'] = response.meta.get('company_name', '')
# job['contact_person_name'] = response.meta.get('company_name', '')
job['number'] = ', '.join(re.findall(r'[0-9]{10}|[0-9]{8}', job['description']))
job['position'] = response.meta.get('position', '')
job['portal'] = 'indeed'
yield job
def _rstrip(self, l):
return [x.strip().replace("\r\n,", "") for x in l]
def _join(self, l, delimeter=' '):
return delimeter.join(self._rstrip(l)) # to remove \r\n characters
def _fetch(self, data, key, subkey=None):
if key in data.keys():
if subkey is not None and subkey in data[key].keys():
return data[key][subkey]
else:
return data[key]
else:
return 'NA'
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,276
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/OlxComplete.py
|
import scrapy
from crawler.items import JobItem
class OlxComplete(scrapy.Spider):
name = "olx_complete"
allowed_domains = ["olx.in"]
start_urls = [
# 'https://www.olx.in/mumbai/customer-service/?page=1',
# # 'https://www.olx.in/mumbai/online-part-time/?page=1',
# 'https://www.olx.in/mumbai/marketing/?page=1',
# 'https://www.olx.in/mumbai/advertising-pr/?page=1',
# 'https://www.olx.in/mumbai/hotels-tourism/?page=1',
# 'https://www.olx.in/mumbai/human-resources/?page=1',
# 'https://www.olx.in/mumbai/clerical-administration/?page=1',
# 'https://www.olx.in/mumbai/sales/?page=1',
# 'https://www.olx.in/mumbai/manufacturing/?page=1',
# 'https://www.olx.in/mumbai/part-time/?page=1',
# 'https://www.olx.in/mumbai/other-jobs/?page=1',
# 'https://www.olx.in/mumbai/it/?page=1',
# 'https://www.olx.in/mumbai/education/?page=1',
# 'https://www.olx.in/mumbai/accounting-finance/?page=1',
]
def parse(self, response):
pass
# # promoted
# for i in range(0, 50):
# tbody = response.xpath(".//*[@id='promotedAd']/tbody")
# href = tbody.xpath("tr["+str(i)+"]/td/table/tbody/tr[1]/td[2]/h3/a/@href").extract()
# if len(href) > 0:
# print(href)
# href = self._rstrip(href)[0]
# req = scrapy.Request(href, callback=self.parse_job_details)
# req.meta['url'] = href
# req.meta['premium'] = True
# yield req
#
# # normal
# for i in range(0, 100):
# tbody = response.xpath(".//*[@id='offers_table']/tbody")
# href = tbody.xpath("tr["+str(i)+"]/td/table/tbody/tr[1]/td[2]/h3/a/@href").extract()
# if len(href) > 0:
# href = self._rstrip(href)[0]
# req = scrapy.Request(href, callback=self.parse_job_details)
# req.meta['url'] = href
# yield req
#
# base_url = response.url.split('?')[0]
# try:
# query_params = response.url.split('?')[1]
# current_page = query_params.split('page=')[1]
# next = int(current_page) + 1
# if str(current_page) == str(response.meta.get('previous_page_number', '')):
# return
# except IndexError:
# # first page
# current_page = 1
# next = 2
# finally:
# next_page = base_url + "?page=" + str(next)
# req = scrapy.Request(next_page, callback=self.parse)
# req.meta['previous_page_number'] = current_page
# yield req
def parse_job_details(self, response):
job_title = response.xpath(".//*[@id='offer_active']/div[4]/div[1]/div[1]/div[1]/h1/text()").extract_first()
job_title = self._rstrip([job_title])
salary = response.xpath(".//*[@id='offeractions']/div/div[1]/div[1]/strong/span/text()").extract()
salary = self._join(salary).encode('utf-8')
name = response.xpath(".//*[@id='offeractions']/div/div[1]/div[2]/div/p/span[1]/text()").extract_first().encode('utf-8')
phone_no = response.xpath(".//*[@id='contact_methods']/li[3]/div[2]/strong[1]/text()").extract_first().encode('utf-8')
jd = response.xpath(".//*[@id='textContent']/p/text()").extract()
job_desc = self._join(jd)
location = response.xpath(".//*[@id='offer_active']/div[4]/div[1]/div[1]/div[1]/p[1]/span/span[2]/strong/text()").extract()
location = self._join(location)
job = JobItem()
job['url'] = response.meta['url']
job['title'] = job_title
job['location'] = location
job['description'] = job_desc
job['address'] = location
job['telephone'] = phone_no
job['recruiter_name'] = name
job['premium'] = response.meta.get('premium', '')
print("#"*15)
print("Job title: {}".format(job_title))
print("Name: {}".format(name))
print("Salary: {}".format(salary))
print("Phone No: {}".format(phone_no))
print("Location: {}".format(location))
print("JD: {}...".format(job_desc[:15]))
print("URL: {}".format(job["url"]))
print("#"*15)
return job
def _rstrip(self, l):
return [x.strip().replace("\r\n,", "") for x in l]
def _join(self, l, delimeter=' '):
return delimeter.join(self._rstrip(l)) # to remove \r\n characters
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,277
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/IndeedSpider.py
|
import scrapy
import requests
import json
import pprint
import re
from datetime import datetime
from datetime import timedelta
from crawler.items import JobItem
class IndeedScrapy(scrapy.Spider):
name = "indeed"
allowed_domains=["indeed.co.in"]
start_urls = [
# 'http://www.indeed.co.in/jobs?q=office+boy&l=Thane&start=0',
# 'http://www.indeed.co.in/jobs?q=office+administrative&l=Thane&start=0',
# 'http://www.indeed.co.in/jobs?q=back+office&l=Thane&start=0',
# 'http://www.indeed.co.in/jobs?q=data+entry&l=Thane&start=0',
# 'http://www.indeed.co.in/jobs?q=computer+operator&l=Thane&start=0',
# 'http://www.indeed.co.in/jobs?q=data+operator&l=Thane&start=0',
# 'http://www.indeed.co.in/jobs?q=field+work&l=Thane&start=0',
#
# 'http://www.indeed.co.in/jobs?q=office+boy&l=Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=office+administrative&l=Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=back+office&l=Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=data+entry&l=Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=computer+operator&l=Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=data+operator&l=Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=field+work&l=Mumbai&start=0',
#
# 'http://www.indeed.co.in/jobs?q=office+boy&l=Navi+Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=office+administrative&l=Navi+Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=back+office&l=Navi+Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=data+entry&l=Navi+Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=computer+operator&l=Navi+Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=data+operator&l=Navi+Mumbai&start=0',
# 'http://www.indeed.co.in/jobs?q=field+work&l=Navi+Mumbai&start=0',
]
def parse(self, response):
pass
# if response.xpath('//div[@itemtype="http://schema.org/JobPosting"]').extract_first() is not None:
#
# for each in response.xpath('//div[@itemtype="http://schema.org/JobPosting"]'):
# if each.xpath('table/tr/td/div[@class="result-link-bar-container"]/div[@class="result-link-bar"]/span[@class="date"]/text()').extract_first() == '1 day ago':
# if each.xpath('table/tr/td/div[@class="result-link-bar-container"]/div[@class="result-link-bar"]/span[@class="result-link-source"]/text()').extract_first() is None:
# href = "http://www.indeed.co.in" + each.xpath('h2/a/@href').extract_first()
# url = response.urljoin(href)
# req = scrapy.Request(url, callback=self.parse_job_details)
# req.meta['url'] = url
# yield req
#
#
# for x in response.xpath('//div[@class="pagination"]/a'):
# try:
# if "Next" in x.xpath('span/span/text()').extract_first():
# nextUrl = "http://www.indeed.co.in" + x.xpath('@href').extract_first()
# paginate_req = scrapy.Request(nextUrl, callback=self.parse)
# yield paginate_req
# except:
# continue
def parse_job_details(self, response):
job = JobItem()
job['url'] = response.meta['url']
job['title'] = response.xpath('//div[@id="job_header"]/b/font/text()').extract_first()
job['company_name'] = response.xpath('//div[@id="job_header"]/span[@class="company"]/text()').extract_first()
job['location'] = response.xpath('//div[@id="job_header"]/span[@class="location"]/text()').extract_first()
job['salary'] = response.xpath('//div[@id="job_header"]/span[@style="white-space: nowrap"]/text()').extract_first()
job['description'] = response.xpath('//span[@id="job_summary"]//text()').extract()
for i in job['description']:
if re.match('(http\:\/\/|https\:\/\/)?([a-z0-9][a-z0-9\-]*\.)+[a-z0-9][a-z0-9\-]*$', i):
job['website'] = i
job['posted_date'] = response.xpath('//div[@class="result-link-bar"]/span[@class="date"]/text()').extract_first()
job['experience_requirements'] = 'NA'
job['industry'] = 'NA'
job['role'] = 'NA'
job['address'] = 'NA'
job['telephone'] = 'NA'
job['email_id'] = 'NA'
job['recruiter_name'] = 'NA'
job['reference_id'] = 'NA'
job['contact_dump'] = 'NA'
yield job
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,278
|
sahilgandhi94/jobs-crawler
|
refs/heads/master
|
/crawler/spiders/Zauba.py
|
import scrapy
from crawler.items import ZaubaItem
class Zauba(scrapy.Spider):
name = "zaubacorp"
allowed_domains = ["zaubacorp.com"]
start_urls = []
def __init__(self, cin=None, **kwargs):
cins = cin.split(',')
cins = [str(c).strip() for c in cins]
company_url = "https://www.zaubacorp.com/company/companyname/__CIN__"
[self.start_urls.append(company_url.replace('__CIN__', cin)) for cin in cins]
def parse(self, response):
data = ZaubaItem()
rx = response.xpath
cin = rx(".//*[@id='block-system-main']/div[2]/div[1]/div[1]/div[1]/table/thead/tr/td[2]/p/a/text()").extract_first()
company_name = rx(".//*[@id='block-system-main']/div[2]/div[1]/div[1]/div[1]/table/tbody/tr[1]/td[2]/p/text()").extract_first()
roc = rx(".//*[@id='block-system-main']/div[2]/div[1]/div[1]/div[1]/table/tbody/tr[3]/td[2]/p/text()").extract_first()
registration_number = rx(".//*[@id='block-system-main']/div[2]/div[1]/div[1]/div[1]/table/tbody/tr[4]/td[2]/p/text()").extract_first()
date_of_incorporation = rx(".//*[@id='block-system-main']/div[2]/div[1]/div[1]/div[1]/table/tbody/tr[8]/td[2]/p/text()").extract_first()
email = rx(".//*[@id='block-system-main']/div[2]/div[1]/div[6]/div/div[1]/p[1]/text()").extract_first()
website = rx(".//*[@id='block-system-main']/div[2]/div[1]/div[6]/div/div[1]/p[2]/span/text()").extract_first()
address = rx(".//*[@id='block-system-main']/div[2]/div[1]/div[6]/div/div[1]/p[4]/text()").extract_first()
directors = []
for i in range(0, 10):
try:
d = rx(".//*[@id='package{}']/td[2]/p/a/text()".format(i)).extract_first().encode('utf-8')
except AttributeError:
continue
directors.append(d.strip()) if d is not None else ''
try:
directors = directors.remove('Click here')
except ValueError:
pass
directors = self._join(directors, ', ')
data['CIN'] = self._encode(cin),
data['CompanyName'] = self._encode(company_name),
data['RoC'] = self._encode(roc),
data['RegistrationNumber'] = self._encode(registration_number),
data['DateofIncorporation'] = self._encode(date_of_incorporation),
data['Email'] = self._encode(email),
data['Website'] = self._encode(website.strip()),
data['Address'] = self._encode(address),
data['Directors'] = self._encode(directors)
print(data)
return data
def _encode(self, ob=None):
return ob.encode('utf-8') if ob is not None else ''
def _rstrip(self, l):
return [x.strip().replace("\r\n,", "") for x in l]
def _join(self, l, delimeter=' '):
return delimeter.join(self._rstrip(l)) # to remove \r\n characters
|
{"/crawler/spiders/BabaJob_Mumbai_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/BabaJobsSpider.py": ["/crawler/items.py"], "/crawler/spiders/Sector.py": ["/crawler/items.py"], "/crawler/spiders/ShineSpider.py": ["/crawler/items.py"], "/crawler/spiders/NaukriSpider.py": ["/crawler/items.py"], "/crawler/spiders/BabaJob_Thane_Teacher.py": ["/crawler/items.py"], "/crawler/spiders/Sector1.py": ["/crawler/items.py"], "/crawler/spiders/ExhaustiveSpider.py": ["/crawler/items.py"], "/crawler/spiders/OlxComplete.py": ["/crawler/items.py"], "/crawler/spiders/IndeedSpider.py": ["/crawler/items.py"], "/crawler/spiders/Zauba.py": ["/crawler/items.py"]}
|
41,279
|
oyjs1989/printbarcode
|
refs/heads/master
|
/ui/body.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'body.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Login(object):
def setupUi(self, Login):
Login.setObjectName("Login")
Login.resize(388, 205)
Login.setStyleSheet("")
self.buttonBox = QtWidgets.QDialogButtonBox(Login)
self.buttonBox.setGeometry(QtCore.QRect(220, 170, 156, 23))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayoutWidget = QtWidgets.QWidget(Login)
self.gridLayoutWidget.setGeometry(QtCore.QRect(40, 30, 311, 74))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.gridLayoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.password = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.password.setEchoMode(QtWidgets.QLineEdit.Password)
self.password.setObjectName("password")
self.gridLayout.addWidget(self.password, 2, 1, 1, 1)
self.name = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.name.setObjectName("name")
self.gridLayout.addWidget(self.name, 1, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 0, 0, 1, 1)
self.net = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.net.setObjectName("net")
self.gridLayout.addWidget(self.net, 0, 1, 1, 1)
self.retranslateUi(Login)
self.buttonBox.accepted.connect(Login.accept)
self.buttonBox.rejected.connect(Login.reject)
QtCore.QMetaObject.connectSlotsByName(Login)
Login.setTabOrder(self.net, self.name)
Login.setTabOrder(self.name, self.password)
def retranslateUi(self, Login):
_translate = QtCore.QCoreApplication.translate
Login.setWindowTitle(_translate("Login", "登录"))
self.label.setText(_translate("Login", "用户名"))
self.label_2.setText(_translate("Login", "密码"))
self.label_3.setText(_translate("Login", "网址"))
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,280
|
oyjs1989/printbarcode
|
refs/heads/master
|
/myean13/ean13generate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""EAN-13 barcode encoder
All needed by the user is done via the EAN13Encoder class:
Implemented by Helen Taylor for HUDORA GmbH.
Updated and ported to Python 3 by Michael Mulqueen for Method B Ltd.
Detailed documentation on the format here:
http://www.barcodeisland.com/ean13.phtml
You may use this under a BSD License.
"""
try:
from . import encoding
from .renderer import EAN13Renderer
except ImportError:
import encoding
from renderer import EAN13Renderer
# handling movement of reduce to functools python >= 2.6
try:
from functools import reduce
from PIL import ImageFont
except ImportError:
pass
GUARDS = ("101", "01010", "101")
class EAN13Generate:
"""Top-level class which handles the overall process of
encoding input number and outputting the result"""
def __init__(self, code, img, font, **options):
"""Set up the encoder with the concatenated input values.
code must be 12 digits long in the following format
nnmmmmmppppp
where n is the number system
m is the manufacturing code
p is the product code"""
# Make sure it's 12 digits long
if len(code) == 13:
# cut of check digit
code = code[:-1]
if code.isdigit() and len(code) == 12:
self.img = img
self.font = font
self.code = code
self.check_digit = self.calculate_check_digit()
self.full_code = self.code + str(self.check_digit)
self.left_bars = ""
self.right_bars = ""
self.height = 0
self.width = 0
self.encode()
self.options = options
else:
raise Exception("code must be 12 digits long")
def encode(self):
"""Encode the barcode number and return the left and right
data strings"""
parity_values = self.get_parity()
self.left_bars = ""
self.right_bars = ""
# Exclude the first number system digit, this was
# for determining the left parity
for parity, digit in zip(parity_values, self.full_code[1:7]):
self.left_bars += encoding.get_left_encoded(int(digit), parity)
for digit in self.full_code[7:]:
self.right_bars += encoding.get_right_encoded(int(digit))
return self.left_bars, self.right_bars
def get_parity(self):
"""Return the parity mappings applicable to this code"""
return encoding.parity_table[int(self.code[0])]
def calculate_check_digit(self):
"""Modulo-10 calculation of the barcode check digit
First, we take the rightmost digit of the value and consider it to be
an "odd" character. We then move right-to-left, alternating between
odd and even. We then sum the numeric value of all the even positions,
and sum the numeric value multiplied by three of all the
odd positions."""
def sum_str(total, digit):
"""add a stringified digit to the total sum"""
return total + int(digit)
# sum the "odd" digits (1,3,5,7,9,11) and multiply by 3
oddsum = reduce(sum_str, self.code[1::2], 0)
# sum the "even" digits (0,2,4,6,8,10)
evensum = reduce(sum_str, self.code[:12:2], 0)
# add them up
total = oddsum * 3 + evensum
# check digit is the number that can be added to the total
# to get to a multiple of 10
return (10 - (total % 10)) % 10
def write_image(self):
"""Write the barcode out to a PNG bytestream"""
barcode = EAN13Renderer(self.full_code, self.left_bars, self.right_bars, GUARDS, self.font, self.img,
self.options)
barcode.write_image()
def get_pilimage(self, barcode_width, barcode_height, bar_width=4):
barcode = EAN13Renderer(self.full_code, self.left_bars, self.right_bars, GUARDS, self.font, self.img,
self.options)
im = barcode.get_pilimage(barcode_width, barcode_height, bar_width)
return im
if __name__ == '__main__':
font = ImageFont.truetype("方正兰亭黑简.TTF", 30)
encoder = EAN13Generate("6934177714108", font, {'height': 1})
encoder.save('pyStrich.png', 5)
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,281
|
oyjs1989/printbarcode
|
refs/heads/master
|
/app/logger/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: __init__.py
# Author : lumi
# date: 2019/10/31
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
import logging
import logging.handlers
logger = logging.getLogger("logger")
handler1 = logging.StreamHandler()
handler2 = logging.FileHandler(filename="test.log")
logger.setLevel(logging.DEBUG)
handler1.setLevel(logging.WARNING)
handler2.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger.addHandler(handler1)
logger.addHandler(handler2)
# 分别为 10、30、30
# print(handler1.level)
# print(handler2.level)
# print(logger.level)
logger = logging.getLogger("logger")
logger.debug('This is a customer debug message')
logger.info('This is an customer info message')
logger.warning('This is a customer warning message')
logger.error('This is an customer error message')
logger.critical('This is a customer critical message')
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,282
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/image_demo1.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: image_demo1
# Author : lumi
# date: 2019/10/23
# Description :
'''
# - Custom package
# - Third party module of python
print(__name__)
# - import odoo package
from PIL import Image, ImageDraw, ImageFont
import textwrap
width = 255
height = 255
im = Image.new('L', (round(width), round(height)), 255)
draw = ImageDraw.Draw(im)
x, y = 100, 100
align = 'right'
align = 'right'
words = '床前明月光,\n疑是地上霜'
font_style = '方正兰亭黑.TTF'
font_style = 'c:/windows/fonts/Arial.ttf'
# lines = textwrap.wrap(words, width=40)
# print(lines)
# font = ImageFont.truetype(font_style, 10)
# draw.text((round(x), y), words, font=font, fill=0, align=align)
# draw.rectangle([10, 20, 40, 30], width=2)
# im.save('text.png')
font = ImageFont.truetype(font_style, 10)
# print(font.getsize_multiline('222222\nasdsadsssssssssssssssssssadsa')) #(152, 24)
# print(font.getsize_multiline('222222\nasdsadsssssssssssssssssssadsa\n')) #(152, 38)
print(font.getsize_multiline('222222\nasdsadsssssssssssssssssssadsan')) #(158, 24)
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,283
|
oyjs1989/printbarcode
|
refs/heads/master
|
/app_v1/common.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: common
# Author : lumi
# date: 2020/1/13
# Description :
'''
WAITING_FOR_PRINT_QUEUE_DIR = './print_queue'
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,284
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/demo6.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: demo6
# Author : lumi
# date: 2019/10/15
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
from PIL import Image, ImageDraw
from myqrcode import QrcodeGenerate
width = 2530
height = 2580
barcode_width = 1280
barcode_height = 1280
x, y = 450, 20
im = Image.new('L', (round(width), round(height)), 255)
qr = QrcodeGenerate('G$M:65766$S:326S00005678$D:100982743%Z$Al0123456789ABCDEF$I:023047432043AF3456FEB234524234234567',
'Q')
image = qr.get_pilimage(10)
new_image = image.resize((round(barcode_width), round(barcode_height)), Image.ANTIALIAS)
box = (x, y, x + barcode_width, y + barcode_height)
im.paste(new_image, box)
ImageDraw.Draw(im)
im.save('haha.jpg')
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,285
|
oyjs1989/printbarcode
|
refs/heads/master
|
/update.py
|
# -*- coding: utf-8 -*-
import datetime
import os
new_verison_context = ''
for line in open('file_version_info.txt', 'r'):
new_line = line.encode('gbk').decode('utf-8')[:-1]
if 'VersionCode' in new_line:
versioncode = new_line.strip(':')[-1]
new_verison_context += "# VersionCode:%s\n" % versioncode
continue
if 'ReleaseCode' in new_line:
releasecode = '%s' % (int(new_line.strip(':')[-1]) + 1)
new_verison_context += "# ReleaseCode:%s\n" % releasecode
continue
if 'filevers=' in new_line:
new_verison_context += ' filevers=(%s, %s, %s, 0),\n' % (
datetime.datetime.today().year, datetime.datetime.today().month, datetime.datetime.today().day)
continue
if 'prodvers=' in new_line:
new_verison_context += ' prodvers = (%s, %s, %s, 0),\n' % (
datetime.datetime.today().year, datetime.datetime.today().month, datetime.datetime.today().day)
continue
if 'FileVersion' in new_line:
new_verison_context += " StringStruct(u'FileVersion', u'2019.10.30.v%s.%s'),\n" % (
versioncode, releasecode)
continue
if 'ProductVersion' in new_line:
new_verison_context += " StringStruct(u'ProductVersion', u'2019.10.30.v%s.%s')])\n" % (
versioncode, releasecode)
continue
new_verison_context += '%s\n' % new_line
with open('tmp.txt', 'w') as f:
f.write(new_verison_context.encode('utf-8').decode('gbk'))
os.remove('file_version_info.txt')
os.rename('tmp.txt', 'file_version_info.txt')
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,286
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/get_printer_info.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: get_printer_info
# Author : lumi
# date: 2019/10/23
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
import win32print
import win32ui
from decimal import *
printer_name = win32print.GetDefaultPrinter()
times = Decimal('2.54')
# HORZRES / VERTRES = printable area
HORZRES = 8
VERTRES = 10
#
# LOGPIXELS = dots per inch
#
LOGPIXELSX = 88
LOGPIXELSY = 90
#
# PHYSICALWIDTH/HEIGHT = total area
#
PHYSICALWIDTH = 110
PHYSICALHEIGHT = 111
#
# PHYSICALOFFSETX/Y = left / top margin
#
PHYSICALOFFSETX = 112
PHYSICALOFFSETY = 113
printer_name = win32print.GetDefaultPrinter()
#
# You can only write a Device-independent bitmap
# directly to a Windows device context; therefore
# we need (for ease) to use the Python Imaging
# Library to manipulate the image.
#
# Create a device context from a named printer
# and assess the printable size of the paper.
#
hDC = win32ui.CreateDC()
hDC.CreatePrinterDC(printer_name)
printable_area = hDC.GetDeviceCaps(HORZRES), hDC.GetDeviceCaps(VERTRES) # 可打印的物理长宽
printer_size = hDC.GetDeviceCaps(PHYSICALWIDTH), hDC.GetDeviceCaps(PHYSICALHEIGHT) # 物理总长宽 = 可打印的物理长宽+物理偏移
printer_margins = hDC.GetDeviceCaps(PHYSICALOFFSETX), hDC.GetDeviceCaps(PHYSICALOFFSETY) # 物理偏移
print(printable_area, printer_size, printer_margins)
print(hDC.GetDeviceCaps(LOGPIXELSX), hDC.GetDeviceCaps(LOGPIXELSY))
print(Decimal(hDC.GetDeviceCaps(HORZRES)) / Decimal(hDC.GetDeviceCaps(LOGPIXELSX)) * times,
Decimal(hDC.GetDeviceCaps(VERTRES)) / Decimal(hDC.GetDeviceCaps(LOGPIXELSY)) * times)
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,287
|
oyjs1989/printbarcode
|
refs/heads/master
|
/code128/code128generate.py
|
"""Code-128 barcode encoder
All needed by the user is done via the Code128Encoder class:
>>> encoder = Code128Encoder("HuDoRa")
>>> encoder.save("test.png")
Implemented by Helen Taylor for HUDORA GmbH.
Updated and ported to Python 3 by Michael Mulqueen for Method B Ltd.
Detailed documentation on the format here:
http://www.barcodeisland.com/code128.phtml
http://www.adams1.com/pub/russadam/128code.html
You may use this under a BSD License.
"""
from .textencoder import TextEncoder
import logging
from io import BytesIO
from PIL import Image, ImageFont, ImageDraw
log = logging.getLogger("code128")
# maps bar width against font size
FONT_SIZES = {
1: 8,
2: 14,
3: 18,
4: 24
}
class Code128Generate:
"""Top-level class which handles the overall process of
encoding input string and outputting the result"""
def __init__(self, text, img, options=None):
""" The options hash currently supports three options:
* ttf_font: absolute path to a truetype font file used to render the label
* ttf_fontsize: the size the label is drawn in
* label_border: number of pixels space between the barcode and the label
* bottom_border: number of pixels space between the label and the bottom border
* height: height of the image in pixels """
self.options = options
self.text = text
self.img = img
encoder = TextEncoder()
self.encoded_text = encoder.encode(self.text)
log.debug("Encoded text is %s", self.encoded_text)
self.checksum = self.calculate_check_sum()
log.debug("Checksum is %d", self.checksum)
self.bars = encoder.get_bars(self.encoded_text, self.checksum)
log.debug("Bars: %s", self.bars)
def calculate_check_sum(self):
"""Calculate the check sum of the encoded text.
Checksum is based on the input text and the starting code,
and a mod103 algorithm is used"""
checksum = self.encoded_text[0]
for index, char in enumerate(self.encoded_text):
if index > 0:
checksum += (index * char)
return checksum % 103
def write_image(self):
"""Write the matrix out to an PNG bytestream"""
barcode = Code128Renderer(self.bars, self.text, self.img, self.options)
imagedata = barcode.write_image()
return imagedata
class Code128Renderer:
"""Rendering class for code128 - given the bars and the original
text, it will render an image of the barcode, including edge
zones and text."""
def __init__(self, bars, text, img, options=None):
""" The options hash currently supports three options:
* ttf_font: absolute path to a truetype font file used to render the label
* ttf_fontsize: the size the label is drawn in
* label_border: number of pixels space between the barcode and the label
* bottom_border: number of pixels space between the label and the bottom border
* height: height of the image in pixels
* show_label: whether to show the label below the barcode (defaults to True) """
self.options = options or {}
self.bars = bars
self.text = text
self.img = img
self.set_args()
def set_args(self):
self.margin_left = self.options.get('margin_left', 0)
self.margin_right = self.options.get('margin_right', 0)
if self.margin_left + self.margin_right > self.img[1]:
raise OverflowError('margin left and margin right over width in total')
self.margin_top = self.options.get('margin_top', 0)
self.margin_bottom = self.options.get('margin_bottom', 0)
if self.margin_top + self.margin_bottom > self.img[0]:
raise OverflowError('margin top and margin bottom over height in total')
self.image_width = self.options.get('width', self.img[1] - self.margin_left - self.margin_right)
if self.img[1] < self.margin_left + self.margin_right + self.image_width:
self.image_width = self.img[1] - self.margin_left - self.margin_right
self.bar_width = int(self.image_width/len(self.bars))
self.image_height = self.options.get('height', self.img[0] - self.margin_top - self.margin_bottom)
if self.img[0] < self.margin_top + self.margin_bottom + self.image_height:
self.image_height = self.img[0] - self.margin_top - self.margin_bottom
def write_image(self):
"""Return the barcode as a PIL object"""
bar_width = self.bar_width
img = self.img
bar_height = self.image_height
class BarWriter:
"""Class which moves across the image, writing out bars"""
def __init__(self, bar_height):
self.current_x = 0
self.bar_height = bar_height
def write_bar(self, value):
"""Draw a bar at the current position,
if the value is 1, otherwise move on silently"""
# only write anything to the image if bar value is 1
if value == 1:
for ypos in range(self.bar_height):
for xpos in range(bar_width):
img.putpixel((xpos, ypos), 0)
self.current_x += bar_width
def write_bars(self, bars):
"""write all bars to the image"""
for bar in bars:
self.write_bar(int(bar))
# draw the barcode bars themselves
writer = BarWriter(img, bar_height)
writer.write_bars(self.bars)
if __name__ == '__main__':
MULTIPLE = 50
width = MULTIPLE * 33.8
height = MULTIPLE * 40.0
image = Image.new('L', (round(width), round(height)), 255)
cg = ('693417771408', image, {'margin_left': 150, 'margin_right': 150})
cg.write_image()
draw = ImageDraw.Draw(image)
image.save('code.jpg', 'jpeg')
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,288
|
oyjs1989/printbarcode
|
refs/heads/master
|
/ui/sn.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'sn.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SN(object):
def setupUi(self, SN):
SN.setObjectName("SN")
SN.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(SN)
self.centralwidget.setObjectName("centralwidget")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(320, 140, 160, 40))
font = QtGui.QFont()
font.setPointSize(30)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(230, 220, 356, 44))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
SN.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(SN)
self.statusbar.setObjectName("statusbar")
SN.setStatusBar(self.statusbar)
self.menuBar = QtWidgets.QMenuBar(SN)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menuBar.setObjectName("menuBar")
self.menuLogin = QtWidgets.QMenu(self.menuBar)
self.menuLogin.setObjectName("menuLogin")
self.menu_print_method = QtWidgets.QMenu(self.menuBar)
self.menu_print_method.setObjectName("menu_print_method")
SN.setMenuBar(self.menuBar)
self.actionLogin = QtWidgets.QAction(SN)
self.actionLogin.setObjectName("actionLogin")
self.menuLogin.addAction(self.actionLogin)
self.menuBar.addAction(self.menuLogin.menuAction())
self.menuBar.addAction(self.menu_print_method.menuAction())
self.retranslateUi(SN)
QtCore.QMetaObject.connectSlotsByName(SN)
def retranslateUi(self, SN):
_translate = QtCore.QCoreApplication.translate
SN.setWindowTitle(_translate("SN", "Odoo"))
self.label_2.setText(_translate("SN", "打印"))
self.label.setText(_translate("SN", "输入:"))
self.menuLogin.setTitle(_translate("SN", "用户"))
self.menu_print_method.setTitle(_translate("SN", "选择打印方法"))
self.actionLogin.setText(_translate("SN", "登录"))
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,289
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/demo4.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: demo4
# Author : lumi
# date: 2019/10/9
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
import clr
import time
# clr.FindAssembly(r"D:\git_repertory\pythonDemo\qt_demo\barcode\PrintDll.dll") ## 加载c#dll文件
s = clr.FindAssembly(r'D:\git_repertory\pythonDemo\qt_demo\barcode\Seagull.BarTender.Print.dll') ## 加载c#dll文件
print(s)
# from PrintDll import * # 导入命名空间
from LabelPainter_SDK import * # 导入命名空间
# instance = Print()
# instance.PrintBarcode('123456789123', r'C:\Program Files (x86)\Seagull\BarTender Suite', r'C:\Users\lumi\Desktop', 'demo.btw') # class1是dll里面的类
# instance.PrintBarcode('123456789123', r'C:\Program Files (x86)\Seagull\BarTender Suite', r'C:\Users\lumi\Desktop', 'demo.btw') # class1是dll里面的类
# instance.PrintBarcode('123456789123', r'C:\Program Files (x86)\Seagull\BarTender Suite', r'C:\Users\lumi\Desktop', 'demo.btw') # class1是dll里面的类
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,290
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/demo7_login.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: demo7_login
# Author : lumi
# date: 2019/10/21
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
from odoorpc import ODOO
import xmlrpc
import requests
import json
import odoorpc
protocol = 'jsonrpc+ssl'
port = 443
odoo = odoorpc.ODOO(host='http://erp.aqara.com', protocol=protocol, port=port)
print(odoo.db.list())
# client = xmlrpc.client()
# api = '/web/session/authenticate'
# api = '/web/login'
# api = '/web'
# api = '/web/webclient/version_info'
# login = 'http://127.0.0.1:8069/web/session/authenticate'
# get_session_info = 'http://127.0.0.1:8069/web/session/get_session_info'
# http = 'http://127.0.0.1:8069/web?db'
# http = 'https://erp.aqara.com/web?db'
# request_data = {
# 'params': {
# }
# }
# http = 'https://erp.aqara.com/web/database/list'
# http = 'http://127.0.0.1:8069/web/database/list'
# headers = {'Content-Type': 'application/json'}
# response = requests.post(http, data=json.dumps(request_data), headers=headers)
# print(response.cookies.get_dict())
# print(response.text)
# cookies = response.cookies.get_dict()
#
# check = 'http://127.0.0.1:8069/web/session/check'
# request_data = {
# 'params': {
# # 'db': 'erp',
# # 'login': 'admin',
# # 'login_sucuess': 'admin',
# # 'password': '123456',
# }
# }
# headers = {'Content-Type': 'application/json'}
# response = requests.get(check, data=json.dumps(request_data), headers=headers, cookies=cookies)
# print(response.cookies.get_dict())
# print(response.text)
# request_data = {
# 'params': {
# 'db': 'erp',
# 'login': 'admin',
# # 'login_sucuess': 'admin',
# 'password': '123456',
# # 'login_sucuess': False,
# }
# }
# response = requests.post(login, data=json.dumps(request_data), headers=headers)
# print(response.cookies.get_dict())
# print(response.text)
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,291
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/demo8_print_pdf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: demo8_print_pdf
# Author : lumi
# date: 2020/1/13
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
import tempfile
import win32api
import win32print
def printer_loading(filename):
win32api.ShellExecute(0, "printto", filename, '"%s"' % win32print.GetDefaultPrinter(), ".", 0)
import os
path = r'D:\git_repertory\pythonDemo\qt_demo\barcode\print_demo'
for a, b, c in os.walk(path):
print(c)
# for i in c:
# f = os.path.join(path, i)
# printer_loading(f)
f = os.path.join(path, '【需求】为产品类别添加默认损耗率v1.2.pdf')
printer_loading(f)
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,292
|
oyjs1989/printbarcode
|
refs/heads/master
|
/main.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt5 import QtWidgets
from app.application import MainWindow
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,293
|
oyjs1989/printbarcode
|
refs/heads/master
|
/app/printwidgets/print_model.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: print_model
# Author : lumi
# date: 2019/10/15
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
import os
import json
from mycode128 import Code128Generate
from myean13 import EAN13Generate
from myqrcode import QrcodeGenerate
from PyQt5.QtGui import QPainter, QPixmap
from PyQt5.QtPrintSupport import QPrinter, QPrinterInfo
from PyQt5.QtCore import QRect
from decimal import *
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
import requests
import logging
from crccheck.crc import Crc
import re
import qrcode
import time
logger = logging.getLogger('logger')
LOCAK_TMP_IMAGE = 'print_image'
PT_TO_MM_DECIMAL = Decimal("25.4") / Decimal("72")
def get_font_path(file_path):
if os.path.isfile(file_path):
return file_path
try:
ImageFont.truetype(file_path, 10)
return file_path
except Exception:
raise Exception('%s not found' % file_path)
def calc_crc(data):
"""
计算crc值
:param str data: 待校验的16进制数据
Usage:
>> calc_crc("AD8D69F1163A6ECDD546506D2E1F2FBB")
6ee0
"""
inst = Crc(16, 0x1021, initvalue=0xFFFF, xor_output=0xFFFF,
reflect_input=True, reflect_output=True)
inst.process(bytearray.fromhex(data))
crc = inst.finalhex()
return crc
def generate_new_zigbee(zigbee):
zigbee = zigbee.replace('$s:','$S:')
new_zigbee, DID, end = re.search('(.*\$D:)(\w*?)(%Z\$A:.*)', zigbee).groups()
newdid = '%016x' % int(DID) # 000000000F343FEE
new_zigbee += newdid.upper()
head, install_code = re.search('(.*\$I:)(\w*)', end).groups()
new_zigbee += head
new_zigbee += install_code[0:-4]
CRC = calc_crc(install_code[0:-4]).upper()
new_zigbee += CRC[-2:]
new_zigbee += CRC[-4:-2]
return new_zigbee
class Printer(object):
'''
打印机基类:
0.获取打印配置 读取配置参数获取打印的内容格式
1.生成打印模型 根据打印格式生成打印模型
2.获取打印数据 向服务器/本地获取数据
3.生成打印图像 填充数据生成图像
4.调用打印机 调用打印机打印
'''
def __init__(self):
'''
init
'''
self.font_style = get_font_path('./Fonts/Arial.ttf')
self.virtual_multiple = Decimal("100") # 虚拟图像放大倍数
self.virtual_width = self.virtual_multiple * Decimal("25.3")
self.virtual_height = self.virtual_multiple * Decimal("25.8")
self.reality_multiple = Decimal("3.78") # 打印机放大倍数
self.reality_heigh = round(Decimal('25.8') * self.reality_multiple)
self.reality_width = round(Decimal('25.4') * self.reality_multiple)
self.image = Image.new('L', (round(self.virtual_width), round(self.virtual_height)), 255)
self.draw = ImageDraw.Draw(self.image)
def get_print_info(self, input_context):
'''
:param input_context:
:return:
'''
pass
def generate_image(self):
pass
def print_pdf(self):
'''
QPdfWriter
:return:
'''
pass
def print_word(self):
pass
def print_execl(self):
pass
def print_image(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
logger.info(self.p)
logger.info(self.print_device)
tmp = BytesIO()
self.image.save(tmp, format='BMP')
self.image.save(LOCAK_TMP_IMAGE, format='BMP')
image = QPixmap()
image.loadFromData(tmp.getvalue()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(0, 0, self.reality_width, self.reality_heigh), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
def print(self):
pass
def run(self, input_context=None):
start_time = time.time()
self.get_print_info(self, input_context)
self.generate_image(self)
self.print_image()
logger.info('%s: finished cost %s' % self.__class__, time.time() - start_time)
class NetPrinter(Printer):
'''互联网信息打印'''
pass
class LocalPrinter(Printer):
'''
本地打印,不需要互联网信息
'''
pass
class CloudPrinter(Printer):
'''
云打印 打印PDF/WORD/EXECL
'''
def loopping(self):
self.run()
class ZigbeeQrcodeOnlyBig(LocalPrinter):
MULTIPLE = Decimal("100")
width = MULTIPLE * Decimal("16.0")
height = MULTIPLE * Decimal("22.3")
PT_TO_MM_DECIMAL = Decimal("25.4") / Decimal("72")
ZIGBEE_WIDTH = MULTIPLE * Decimal("14")
ZIGBEE_HEIGHT = MULTIPLE * Decimal("14")
def __init__(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
try:
self.FONT_STYLE = get_font_path('./Fonts/方正兰亭黑.ttf')
except Exception as e:
raise e
def print_(self, odoo, input_raw=None):
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
if not input_raw:
return
request_data = {
'params': {'db': odoo.env.db,
'login': odoo._login,
'password': odoo._password,
'sn': input_raw}
}
headers = {
'Content-Type': 'application/json',
}
host = odoo.host
protocol = odoo.protocol
port = odoo.port
if protocol == 'jsonrpc':
scheme = 'http'
else:
scheme = 'https'
url = '%s://%s:%s/api/post/iface/get/zigbee' % (scheme, host, port)
response = requests.post(url, data=json.dumps(request_data), headers=headers)
if not response:
return
response_json = json.loads(response.text)
if response_json.get('error'):
raise Exception(response_json.get('error').get('data').get('message'))
result = json.loads(response_json.get('result'))
# response = odoo.env['lumi.zigbee.information.wiazrd'].scan_sn_for_zigbee(input_raw)
if result.get('state', -1) != 0:
raise Exception(result.get('msg'))
data = result.get('printcontent')
zigbee_info = data.get('zigbee_info')
# new_zigbee = generate_new_zigbee(zigbee_info)
self.zigbee_draw(zigbee_info)
TIMES = Decimal("3.78")
heigh = round(Decimal('22.6') * TIMES)
width = round(Decimal('16') * TIMES)
x1 = 0
y1 = 0
x2 = x1 + width
y2 = y1 + heigh
image = QPixmap()
tmp = BytesIO()
self.image.save(LOCAK_TMP_IMAGE, format='BMP')
self.image.save(tmp, format='BMP')
image.loadFromData(tmp.getvalue()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(x1, y1, x2, y2), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
logger.info('%s:%s' % (input_raw, result.get('printcontent')))
def write_word(self, words, font, top=0, margin_left=0, margin_right=0):
y = top * self.MULTIPLE
text_width, text_height = font.getsize(words)
if margin_left:
x = margin_left * self.MULTIPLE
elif margin_right:
x = self.width - margin_right * self.MULTIPLE - text_width
else:
x = 0
self.draw.text((round(x), y), words, font=font, fill=0)
return Decimal(text_height) / self.MULTIPLE
def zigbee_draw(self, zigbee):
x = Decimal("1") * self.MULTIPLE
y = Decimal("4.2") * self.MULTIPLE
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=0)
qr.add_data(zigbee)
qr.make(fit=True)
image = qr.make_image(fill_color="black", back_color="white")
im = image.resize((round(self.ZIGBEE_WIDTH), round(self.ZIGBEE_HEIGHT)), Image.ANTIALIAS)
box = (x, y, x + self.ZIGBEE_WIDTH, y + self.ZIGBEE_HEIGHT)
self.image.paste(im, box)
class SNPrintRectangle(LocalPrinter):
'''
SN本地打印矩形框
'''
multiple = Decimal("100")
width = multiple * Decimal("34.5")
height = multiple * Decimal("9.5")
PT_TO_MM_DECIMAL = Decimal("25.4") / Decimal("72")
image = Image.new('L', (round(width), round(height)), 255)
draw = ImageDraw.Draw(image)
def __init__(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
try:
self.FONT_STYLE = get_font_path('./Fonts/Arial.ttf')
except Exception as e:
raise e
def print_(self, odoo, input_raw=None):
if not input_raw:
return
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
self.sn_draw(input_raw)
TIMES = Decimal("3.78")
heigh = round(Decimal('9.5') * TIMES)
width = round(Decimal('34.5') * TIMES)
x1 = 0
y1 = 0
x2 = x1 + width
y2 = y1 + heigh
image = QPixmap()
tmp = BytesIO()
self.image.save(tmp, format='BMP')
self.image.save(LOCAK_TMP_IMAGE, format='BMP')
image.loadFromData(tmp.getvalue()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(x1, y1, x2, y2), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
def write_word(self, words, font, top=0, margin_left=0, margin_right=0, center=False):
y = top * self.multiple
text_width, text_height = font.getsize(words)
if margin_left:
x = margin_left * self.multiple
elif margin_right:
x = self.width - margin_right * self.multiple - text_width
elif center:
x = (self.width - text_width) / 2
else:
x = 0
self.draw.text((round(x), y), words, font=font, fill=0)
def sn_draw(self, sn):
x = Decimal("2.2") * self.multiple
y = Decimal("1.25") * self.multiple
width = self.multiple * Decimal("30.1")
height = self.multiple * Decimal("5")
cg = Code128Generate(sn, self.image, MULTIPLE=self.multiple)
im = cg.get_pilimage(width, height)
im = im.resize((round(width), round(height)), Image.ANTIALIAS)
box = (x, y, x + width, y + height)
self.image.paste(im, box)
font_style = self.FONT_STYLE
font_szie = self.PT_TO_MM_DECIMAL * Decimal("5.5")
font = ImageFont.truetype(font_style, round(font_szie * self.multiple))
self.write_word('SN:%s' % sn, font, top=Decimal('6.75'), center=True)
class SNPrintOval(LocalPrinter):
'''
SN本地打印36*10mm
'''
multiple = Decimal("100")
width = multiple * Decimal("36")
height = multiple * Decimal("10")
PT_TO_MM_DECIMAL = Decimal("25.4") / Decimal("72")
image = Image.new('L', (round(width), round(height)), 255)
draw = ImageDraw.Draw(image)
def __init__(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
try:
self.FONT_STYLE = get_font_path('./Fonts/Arial.ttf')
except Exception as e:
raise e
def print_(self, odoo, input_raw=None):
if not input_raw:
return
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
self.sn_draw(input_raw)
TIMES = Decimal("3.78")
heigh = round(Decimal('10') * TIMES)
width = round(Decimal('36') * TIMES)
x1 = 0
y1 = 0
x2 = x1 + width
y2 = y1 + heigh
image = QPixmap()
tmp = BytesIO()
self.image.save(tmp, format='BMP')
self.image.save(LOCAK_TMP_IMAGE, format='BMP')
image.loadFromData(tmp.getvalue()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(x1, y1, x2, y2), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
def write_word(self, words, font, top=0, margin_left=0, margin_right=0, center=False):
y = top * self.multiple
text_width, text_height = font.getsize(words)
if margin_left:
x = margin_left * self.multiple
elif margin_right:
x = self.width - margin_right * self.multiple - text_width
elif center:
x = (self.width - text_width) / 2
else:
x = 0
self.draw.text((round(x), y), words, font=font, fill=0)
def sn_draw(self, sn):
x = Decimal("4.1") * self.multiple
y = Decimal("1") * self.multiple
width = self.multiple * Decimal("27.8")
height = self.multiple * Decimal("5.8")
cg = Code128Generate(sn, self.image, MULTIPLE=self.multiple)
im = cg.get_pilimage(width, height)
im = im.resize((round(width), round(height)), Image.ANTIALIAS)
box = (x, y, x + width, y + height)
self.image.paste(im, box)
font_style = self.FONT_STYLE
font_szie = self.PT_TO_MM_DECIMAL * Decimal("5.5")
font = ImageFont.truetype(font_style, round(font_szie * self.multiple))
self.write_word(sn, font, top=Decimal('7.2'), center=True)
class ZigbeeQrcode(object):
MULTIPLE = Decimal("100")
width = MULTIPLE * Decimal("25.3")
height = MULTIPLE * Decimal("25.8")
PT_TO_MM_DECIMAL = Decimal("25.4") / Decimal("72")
ZIGBEE_WIDTH = MULTIPLE * Decimal("12.5")
ZIGBEE_HEIGHT = MULTIPLE * Decimal("12.5")
FONT_SZIE = PT_TO_MM_DECIMAL * Decimal("3.88")
def __init__(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
try:
self.FONT_STYLE = get_font_path('./Fonts/Arial.ttf')
except Exception as e:
raise e
def print_(self, odoo, input_raw=None):
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
if not input_raw:
return
request_data = {
'params': {'db': odoo.env.db,
'login': odoo._login,
'password': odoo._password,
'sn': input_raw}
}
headers = {
'Content-Type': 'application/json',
}
host = odoo.host
protocol = odoo.protocol
port = odoo.port
if protocol == 'jsonrpc':
scheme = 'http'
else:
scheme = 'https'
url = '%s://%s:%s/api/post/iface/get/zigbee' % (scheme, host, port)
response = requests.post(url, data=json.dumps(request_data), headers=headers)
if not response:
return
response_json = json.loads(response.text)
if response_json.get('error'):
raise Exception(response_json.get('error').get('data').get('message'))
result = json.loads(response_json.get('result'))
if result.get('state', -1) != 0:
raise Exception(result.get('msg'))
data = result.get('printcontent')
zigbee_info = data.get('zigbee_info')
# new_zigbee = generate_new_zigbee(zigbee_info)
self.zigbee_draw(zigbee_info)
self.sn_draw(input_raw)
TIMES = Decimal("3.78")
heigh = round(Decimal('25.8') * TIMES)
width = round(Decimal('25.4') * TIMES)
x1 = 0
y1 = 0
x2 = x1 + width
y2 = y1 + heigh
image = QPixmap()
tmp = BytesIO()
self.image.save(tmp, format='BMP')
image.loadFromData(tmp.getvalue()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(x1, y1, x2, y2), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
logger.info('%s:%s' % (input_raw, result.get('printcontent')))
def zigbee_draw(self, zigbee):
x = Decimal("6.4") * self.MULTIPLE
y = Decimal("0.5") * self.MULTIPLE
# qr = QrcodeGenerate(zigbee, 'l')
# image = qr.get_pilimage(10, width=0)
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=0)
qr.add_data(zigbee)
qr.make(fit=True)
image = qr.make_image(fill_color="black", back_color="white")
im = image.resize((round(self.ZIGBEE_WIDTH), round(self.ZIGBEE_HEIGHT)), Image.ANTIALIAS)
box = (x, y, x + self.ZIGBEE_WIDTH, y + self.ZIGBEE_HEIGHT)
self.image.paste(im, box)
def sn_draw(self, sn):
cd = Code128Generate(sn, self.image, MULTIPLE=self.MULTIPLE)
barcode_width = Decimal("20") * self.MULTIPLE
barcode_height = Decimal("4.8") * self.MULTIPLE
x = (self.width - barcode_width) / 2
y = Decimal("13.5") * self.MULTIPLE
box = (x, y, x + barcode_width, y + barcode_height)
im = cd.get_pilimage(barcode_width, barcode_height)
self.image.paste(im, box)
font_style = self.FONT_STYLE
font_szie = self.FONT_SZIE
font = ImageFont.truetype(font_style, round(font_szie * self.MULTIPLE))
self.write_word(sn, font, top=Decimal("18.5"), center=True)
def write_word(self, words, font, top=0, margin_left=0, margin_right=0, center=False):
y = top * self.MULTIPLE
text_width, text_height = font.getsize(words)
if margin_left:
x = margin_left * self.MULTIPLE
elif margin_right:
x = self.width - margin_right * self.MULTIPLE - text_width
elif center:
x = (self.width - text_width) / 2
else:
x = 0
self.draw.text((round(x), y), words, font=font, fill=0)
class ZigbeeQrcodeOnly(object):
MULTIPLE = Decimal("100")
width = MULTIPLE * Decimal("16.0")
height = MULTIPLE * Decimal("22.6")
PT_TO_MM_DECIMAL = Decimal("25.4") / Decimal("72")
ZIGBEE_WIDTH = MULTIPLE * Decimal("12.5")
ZIGBEE_HEIGHT = MULTIPLE * Decimal("12.5")
FONT_SZIE = PT_TO_MM_DECIMAL * Decimal("3.4")
def __init__(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
try:
self.FONT_STYLE = get_font_path('./Fonts/方正兰亭黑.ttf')
except Exception as e:
raise e
def print_(self, odoo, input_raw=None):
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
if not input_raw:
return
request_data = {
'params': {'db': odoo.env.db,
'login': odoo._login,
'password': odoo._password,
'sn': input_raw}
}
headers = {
'Content-Type': 'application/json',
}
host = odoo.host
protocol = odoo.protocol
port = odoo.port
if protocol == 'jsonrpc':
scheme = 'http'
else:
scheme = 'https'
url = '%s://%s:%s/api/post/iface/get/zigbee' % (scheme, host, port)
response = requests.post(url, data=json.dumps(request_data), headers=headers)
if not response:
return
response_json = json.loads(response.text)
if response_json.get('error'):
raise Exception(response_json.get('error').get('data').get('message'))
result = json.loads(response_json.get('result'))
# response = odoo.env['lumi.zigbee.information.wiazrd'].scan_sn_for_zigbee(input_raw)
if result.get('state', -1) != 0:
raise Exception(result.get('msg'))
data = result.get('printcontent')
zigbee_info = data.get('zigbee_info')
# new_zigbee = generate_new_zigbee(zigbee_info)
self.zigbee_draw(zigbee_info)
TIMES = Decimal("3.78")
heigh = round(Decimal('22.6') * TIMES)
width = round(Decimal('16') * TIMES)
x1 = 0
y1 = 0
x2 = x1 + width
y2 = y1 + heigh
image = QPixmap()
tmp = BytesIO()
self.image.save(LOCAK_TMP_IMAGE, format='BMP')
self.image.save(tmp, format='BMP')
image.loadFromData(tmp.getvalue()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(x1, y1, x2, y2), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
logger.info('%s:%s' % (input_raw, result.get('printcontent')))
def write_word(self, words, font, top=0, margin_left=0, margin_right=0):
y = top * self.MULTIPLE
text_width, text_height = font.getsize(words)
if margin_left:
x = margin_left * self.MULTIPLE
elif margin_right:
x = self.width - margin_right * self.MULTIPLE - text_width
else:
x = 0
self.draw.text((round(x), y), words, font=font, fill=0)
return Decimal(text_height) / self.MULTIPLE
def zigbee_draw(self, zigbee):
x = Decimal("1.75") * self.MULTIPLE
y = Decimal("5.9") * self.MULTIPLE
# qr = QrcodeGenerate(zigbee, 'l')
# image = qr.get_pilimage(10, width=0)
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=0)
qr.add_data(zigbee)
qr.make(fit=True)
image = qr.make_image(fill_color="black", back_color="white")
im = image.resize((round(self.ZIGBEE_WIDTH), round(self.ZIGBEE_HEIGHT)), Image.ANTIALIAS)
box = (x, y, x + self.ZIGBEE_WIDTH, y + self.ZIGBEE_HEIGHT)
self.image.paste(im, box)
font_style = self.FONT_STYLE
font_szie = self.FONT_SZIE
font = ImageFont.truetype(font_style, round(font_szie * self.MULTIPLE))
self.write_word('Install Code', font, top=Decimal('20'), margin_left=Decimal('4.35'))
class ZigbeeQrcodeBig(object):
MULTIPLE = Decimal("100")
width = MULTIPLE * Decimal("16.0")
height = MULTIPLE * Decimal("22.6")
PT_TO_MM_DECIMAL = Decimal("25.4") / Decimal("72")
ZIGBEE_WIDTH = MULTIPLE * Decimal("14")
ZIGBEE_HEIGHT = MULTIPLE * Decimal("14")
FONT_SZIE = PT_TO_MM_DECIMAL * Decimal("3.4")
def __init__(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
try:
self.FONT_STYLE = get_font_path('./Fonts/方正兰亭黑.ttf')
except Exception as e:
raise e
def print_(self, odoo, input_raw=None):
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
if not input_raw:
return
request_data = {
'params': {'db': odoo.env.db,
'login': odoo._login,
'password': odoo._password,
'sn': input_raw}
}
headers = {
'Content-Type': 'application/json',
}
host = odoo.host
protocol = odoo.protocol
port = odoo.port
if protocol == 'jsonrpc':
scheme = 'http'
else:
scheme = 'https'
url = '%s://%s:%s/api/post/iface/get/zigbee' % (scheme, host, port)
response = requests.post(url, data=json.dumps(request_data), headers=headers)
if not response:
return
response_json = json.loads(response.text)
if response_json.get('error'):
raise Exception(response_json.get('error').get('data').get('message'))
result = json.loads(response_json.get('result'))
# response = odoo.env['lumi.zigbee.information.wiazrd'].scan_sn_for_zigbee(input_raw)
if result.get('state', -1) != 0:
raise Exception(result.get('msg'))
data = result.get('printcontent')
zigbee_info = data.get('zigbee_info')
# new_zigbee = generate_new_zigbee(zigbee_info)
self.zigbee_draw(zigbee_info)
TIMES = Decimal("3.78")
heigh = round(Decimal('22.6') * TIMES)
width = round(Decimal('16') * TIMES)
x1 = 0
y1 = 0
x2 = x1 + width
y2 = y1 + heigh
image = QPixmap()
tmp = BytesIO()
self.image.save(LOCAK_TMP_IMAGE, format='BMP')
self.image.save(tmp, format='BMP')
image.loadFromData(tmp.getvalue()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(x1, y1, x2, y2), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
logger.info('%s:%s' % (input_raw, result.get('printcontent')))
def write_word(self, words, font, top=0, margin_left=0, margin_right=0, x_center=False):
y = top * self.MULTIPLE
text_width, text_height = font.getsize(words)
if margin_left:
x = margin_left * self.MULTIPLE
elif margin_right:
x = self.width - margin_right * self.MULTIPLE - text_width
else:
x = 0
self.draw.text((round(x), y), words, font=font, fill=0)
return Decimal(text_height) / self.MULTIPLE
def zigbee_draw(self, zigbee):
x = Decimal("1") * self.MULTIPLE
y = Decimal("4.2") * self.MULTIPLE
# qr = QrcodeGenerate(zigbee, 'l')
# image = qr.get_pilimage(10, width=0)
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=0)
qr.add_data(zigbee)
qr.make(fit=True)
image = qr.make_image(fill_color="black", back_color="white")
im = image.resize((round(self.ZIGBEE_WIDTH), round(self.ZIGBEE_HEIGHT)), Image.ANTIALIAS)
box = (x, y, x + self.ZIGBEE_WIDTH, y + self.ZIGBEE_HEIGHT)
self.image.paste(im, box)
font_style = self.FONT_STYLE
font_szie = self.FONT_SZIE
font = ImageFont.truetype(font_style, round(font_szie * self.MULTIPLE))
self.write_word('Install Code', font, top=Decimal('19.8'), margin_left=Decimal('4.35'))
class XiaoMiPrinter_69(object):
# 1in = 2.54cm = 25.4 mm = 72pt = 6pc
MULTIPLE = Decimal("50")
width = MULTIPLE * Decimal("33.8")
height = MULTIPLE * Decimal("40.0")
PT_TO_MM_DECIMAL = Decimal("25.4") / Decimal("72")
FONT_SZIE_HEAD = Decimal("4") * PT_TO_MM_DECIMAL
FONT_SZIE_MID = Decimal("4.5") * PT_TO_MM_DECIMAL
FONT_SZIE_CODE = Decimal("10") * PT_TO_MM_DECIMAL
FONT_SZIE_BUTTOM_LIFT = Decimal("4") * PT_TO_MM_DECIMAL
FONT_SZIE_BUTTOM_RIGHT = Decimal("3.18") * PT_TO_MM_DECIMAL
def __init__(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
try:
self.FONT_STYLE_BUTTOM = get_font_path('./Fonts/方正兰亭黑_GBK.TTF')
self.FONT_STYLE_HEAD = get_font_path('./Fonts/方正兰亭黑_GBK.TTF')
self.FONT_STYLE_CODE = get_font_path('./Fonts/Arial Unicode MS.TTF')
self.FONT_STYLE_MID = get_font_path('./Fonts/Arial.ttf')
except Exception as e:
raise e
# self.print_device.setPaperSize(QSizeF(400, 338), QPrinter.Point) #设置打印机数据
def write_word(self, words, font, top=0, margin_left=0, margin_right=0):
y = top * self.MULTIPLE
text_width, text_height = font.getsize(words)
if margin_left:
x = margin_left * self.MULTIPLE
elif margin_right:
x = self.width - margin_right * self.MULTIPLE - text_width
else:
x = 0
self.draw.text((round(x), y), words, font=font, fill=0)
return Decimal(text_height) / self.MULTIPLE
def name_draw(self, first_name, second_name):
first_name_y = Decimal("1.5")
font_sytle = self.FONT_STYLE_HEAD
font_szie = self.FONT_SZIE_HEAD
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
text_width, text_height = font.getsize(first_name)
self.write_word(first_name, font, top=first_name_y, margin_left=Decimal("2.5"))
second_name_y = first_name_y + Decimal(text_height) / self.MULTIPLE
self.write_word(second_name, font, top=second_name_y, margin_left=Decimal("2.5"))
def color_draw(self, color):
font_sytle = self.FONT_STYLE_HEAD
font_szie = self.FONT_SZIE_HEAD
color_y = Decimal("1.5")
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
self.write_word(color, font, top=color_y, margin_right=Decimal("3.5"))
def sn_draw(self, sn):
cd = Code128Generate(sn, self.image, MULTIPLE=self.MULTIPLE)
barcode_width = Decimal("27.8") * self.MULTIPLE
barcode_height = Decimal("5") * self.MULTIPLE
x = Decimal("2.5") * self.MULTIPLE
y = Decimal("5") * self.MULTIPLE
box = (x, y, x + barcode_width, y + barcode_height)
im = cd.get_pilimage(barcode_width, barcode_height)
self.image.paste(im, box)
def sn_sku_draw(self, sn, sku):
font_sytle = self.FONT_STYLE_MID
font_szie = self.FONT_SZIE_MID
y = Decimal("10.2")
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
sn = 'SN:%s' % sn
self.write_word(sn, font, top=y, margin_left=Decimal("2.5"))
self.write_word(sku, font, top=y, margin_right=Decimal("3.5"))
def barcode_draw(self, barcode):
font_sytle = self.FONT_STYLE_CODE
font_szie = self.FONT_SZIE_CODE
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
cd = EAN13Generate(barcode, self.image, font, MULTIPLE=self.MULTIPLE)
barcode_width = Decimal("27.8") * self.MULTIPLE
barcode_height = Decimal("21") * self.MULTIPLE
x = Decimal("2.5") * self.MULTIPLE
y = Decimal("12.3") * self.MULTIPLE
box = (x, y, x + barcode_width, y + barcode_height)
im = cd.get_pilimage(barcode_width, barcode_height)
self.image.paste(im, box)
def address_date_draw(self, address, date):
address = address.strip()
# 地址超过 14个换行
if len(address) <= 14:
font_sytle = self.FONT_STYLE_BUTTOM
font_szie = self.FONT_SZIE_BUTTOM_LIFT
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
self.write_word(address, font, top=Decimal("32.6"), margin_left=Decimal("2.5"))
self.write_word(date, font, top=Decimal("34.1"), margin_left=Decimal("2.5"))
else:
font_sytle = self.FONT_STYLE_BUTTOM
font_szie = self.FONT_SZIE_BUTTOM_LIFT
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
first_add = address[0:15]
sec_add = "\t\t\t\t %s" % address[15:]
self.write_word(first_add, font, top=Decimal("32.1"), margin_left=Decimal("2.5"))
self.write_word(sec_add, font, top=Decimal("33.6"), margin_left=Decimal("2.5"))
self.write_word(date, font, top=Decimal("35.3"), margin_left=Decimal("2.5"))
def certificate_draw(self):
self.draw.rectangle((round(Decimal("25.3") * self.MULTIPLE), round(Decimal("32.8") * self.MULTIPLE),
round(Decimal("30.5") * self.MULTIPLE), (round(Decimal("36") * self.MULTIPLE))),
outline="black",
width=round(Decimal("0.07") * self.MULTIPLE))
font_sytle = self.FONT_STYLE_BUTTOM
font_szie = self.FONT_SZIE_BUTTOM_RIGHT
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
self.write_word('合格证', font, top=Decimal("33.2"), margin_right=Decimal("4.35"))
self.write_word('已检验', font, top=Decimal("34.5"), margin_right=Decimal("4.35"))
def print_(self, odoo, input_raw=None):
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
if not input_raw:
return
response_json = odoo.env['wizard.mrp.commodity.barcode'].print_commodity_barcode(input_raw)
if not response_json:
return
if response_json.get('state') != 0:
raise Exception(response_json.get('msg'))
def get_head_name(name):
'''
产品名称截取
:param name:
:return:
'''
count = 0
first_name = name
second_name = ''
for char in name:
if char in ('/', '-', ' '):
first_name = name[0:count]
second_name = name[count + 1:]
break
else:
count += 1
return first_name, second_name
data = response_json.get('printcontent')
first_name, second_name = get_head_name(data.get('product_name'))
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
self.name_draw(first_name, second_name)
self.color_draw(data.get('color'))
self.sn_draw(data.get('sn'))
self.sn_sku_draw(data.get('sn'), data.get('SKU'))
self.barcode_draw(data.get('barcode'))
self.address_date_draw(data.get('address'), data.get('datetime'))
self.certificate_draw()
TIMES = Decimal("0.4")
heigh = round(400 * TIMES)
width = round(338 * TIMES)
x1 = 0
y1 = 0
x2 = x1 + width
y2 = y1 + heigh
image = QPixmap()
tmp = BytesIO()
self.image.save(tmp, format='BMP')
self.image.save(LOCAK_TMP_IMAGE, format='BMP')
image.loadFromData(tmp.getvalue()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(x1, y1, x2, y2), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
logger.info('%s:%s' % (input_raw, response_json.get('printcontent')))
class AqaraPrinter_69(object):
# 1in = 2.54cm = 25.4 mm = 72pt = 6pc
MULTIPLE = Decimal("50")
width = MULTIPLE * Decimal("33.8")
height = MULTIPLE * Decimal("40.0")
PT_TO_MM_DECIMAL = Decimal("25.4") / Decimal("72")
FONT_SZIE_HEAD = Decimal("4") * PT_TO_MM_DECIMAL
FONT_SZIE_MID = Decimal("4.5") * PT_TO_MM_DECIMAL
FONT_SZIE_CODE = Decimal("10") * PT_TO_MM_DECIMAL
FONT_SZIE_BUTTOM_LIFT = Decimal("4") * PT_TO_MM_DECIMAL
FONT_SZIE_BUTTOM_RIGHT = Decimal("3.18") * PT_TO_MM_DECIMAL
image = Image.new('L', (round(width), round(height)), 255)
draw = ImageDraw.Draw(image)
def __init__(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
try:
self.FONT_STYLE_BUTTOM = get_font_path('./Fonts/方正兰亭黑_GBK.TTF')
self.FONT_STYLE_HEAD = get_font_path('./Fonts/方正兰亭黑_GBK.TTF')
self.FONT_STYLE_CODE = get_font_path('./Fonts/Arial Unicode MS.TTF')
self.FONT_STYLE_MID = get_font_path('./Fonts/Arial.ttf')
except Exception as e:
raise e
# self.print_device.setPaperSize(QSizeF(400, 338), QPrinter.Point) #设置打印机数据
def write_word(self, words, font, top=0, margin_left=0, margin_right=0):
y = top * self.MULTIPLE
text_width, text_height = font.getsize(words)
if margin_left:
x = margin_left * self.MULTIPLE
elif margin_right:
x = self.width - margin_right * self.MULTIPLE - text_width
else:
x = 0
self.draw.text((round(x), y), words, font=font, fill=0)
return Decimal(text_height) / self.MULTIPLE
def name_draw(self, first_name, second_name):
first_name_y = Decimal("1.5")
font_sytle = self.FONT_STYLE_HEAD
font_szie = self.FONT_SZIE_HEAD
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
text_width, text_height = font.getsize(first_name)
self.write_word(first_name, font, top=first_name_y, margin_left=Decimal("2.5"))
second_name_y = first_name_y + Decimal(text_height) / self.MULTIPLE
self.write_word(second_name, font, top=second_name_y, margin_left=Decimal("2.5"))
def color_draw(self, color):
font_sytle = self.FONT_STYLE_HEAD
font_szie = self.FONT_SZIE_HEAD
color_y = Decimal("1.5")
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
self.write_word(color, font, top=color_y, margin_right=Decimal("3.5"))
def sn_draw(self, sn):
cd = Code128Generate(sn, self.image, MULTIPLE=self.MULTIPLE)
barcode_width = Decimal("27.8") * self.MULTIPLE
barcode_height = Decimal("5") * self.MULTIPLE
x = Decimal("2.5") * self.MULTIPLE
y = Decimal("4.7") * self.MULTIPLE
box = (x, y, x + barcode_width, y + barcode_height)
im = cd.get_pilimage(barcode_width, barcode_height)
self.image.paste(im, box)
def sn_sku_draw(self, sn, sku):
font_sytle = self.FONT_STYLE_MID
font_szie = self.FONT_SZIE_MID
y1 = Decimal("9.9")
y2 = Decimal("11.9")
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
self.write_word(sn, font, top=y1, margin_right=Decimal("3.5"))
self.write_word(sku, font, top=y2, margin_right=Decimal("3.5"))
def barcode_draw(self, barcode):
font_sytle = self.FONT_STYLE_CODE
font_szie = self.FONT_SZIE_CODE
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
cd = EAN13Generate(barcode, self.image, font, MULTIPLE=self.MULTIPLE)
barcode_width = Decimal("27.8") * self.MULTIPLE
barcode_height = Decimal("19") * self.MULTIPLE
x = Decimal("2.5") * self.MULTIPLE
y = Decimal("13.8") * self.MULTIPLE
box = (x, y, x + barcode_width, y + barcode_height)
im = cd.get_pilimage(barcode_width, barcode_height)
self.image.paste(im, box)
def address_date_draw(self, address, date):
address = address.strip()
# 地址超过 14个换行
if len(address) <= 14:
font_sytle = self.FONT_STYLE_BUTTOM
font_szie = self.FONT_SZIE_BUTTOM_LIFT
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
self.write_word(address, font, top=Decimal("32.6"), margin_left=Decimal("2.5"))
self.write_word(date, font, top=Decimal("34.1"), margin_left=Decimal("2.5"))
else:
font_sytle = self.FONT_STYLE_BUTTOM
font_szie = self.FONT_SZIE_BUTTOM_LIFT
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
first_add = address[0:15]
sec_add = "\t\t\t\t %s" % address[15:]
self.write_word(first_add, font, top=Decimal("32.1"), margin_left=Decimal("2.5"))
self.write_word(sec_add, font, top=Decimal("33.6"), margin_left=Decimal("2.5"))
self.write_word(date, font, top=Decimal("35.3"), margin_left=Decimal("2.5"))
def certificate_draw(self):
self.draw.rectangle((round(Decimal("25.3") * self.MULTIPLE), round(Decimal("32.8") * self.MULTIPLE),
round(Decimal("30.5") * self.MULTIPLE), (round(Decimal("36") * self.MULTIPLE))),
outline="black",
width=round(Decimal("0.07") * self.MULTIPLE))
font_sytle = self.FONT_STYLE_BUTTOM
font_szie = self.FONT_SZIE_BUTTOM_RIGHT
font = ImageFont.truetype(font_sytle, round(font_szie * self.MULTIPLE))
self.write_word('合格证', font, top=Decimal("33.2"), margin_right=Decimal("4.35"))
self.write_word('已检验', font, top=Decimal("34.5"), margin_right=Decimal("4.35"))
def print_(self, odoo, input_raw=None):
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
if not input_raw:
return
response_json = odoo.env['wizard.mrp.commodity.barcode'].print_commodity_barcode(input_raw)
if not response_json:
return
if response_json.get('state') != 0:
raise Exception(response_json.get('msg'))
def get_head_name(name):
'''
产品名称截取
:param name:
:return:
'''
num = 0
count = 0
first_name = name
second_name = ''
for char in name:
if char in (' '):
if num == 1:
first_name = name[0:count]
second_name = name[count + 1:]
break
else:
num = 1
count += 1
return first_name, second_name
data = response_json.get('printcontent')
first_name, second_name = get_head_name(data.get('product_name'))
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
self.name_draw(first_name, second_name)
self.color_draw(data.get('color'))
self.sn_draw(data.get('sn'))
self.sn_sku_draw(data.get('sn'), data.get('SKU'))
self.barcode_draw(data.get('barcode'))
self.address_date_draw(data.get('address'), data.get('datetime'))
self.certificate_draw()
TIMES = Decimal("0.4")
heigh = round(400 * TIMES)
width = round(338 * TIMES)
x1 = 0
y1 = 0
x2 = x1 + width
y2 = y1 + heigh
image = QPixmap()
tmp = BytesIO()
self.image.save(tmp, format='BMP')
self.image.save(LOCAK_TMP_IMAGE, format='BMP')
image.loadFromData(tmp.getvalue()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(x1, y1, x2, y2), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
logger.info('%s:%s' % (input_raw, response_json.get('printcontent')))
if __name__ == '__main__':
s = generate_new_zigbee('G$M:1694$S:456SS111992900009$D:255167749%Z$A:04CF8CDF3C765017$I:163AF41829724ED328243F8A91C5179C8CF8')
s = 'G$M:1694$S:456SS111992900009$D:000000000F358D05%Z$A:04CF8CDF3C765017$I:163AF41829724ED328243F8A91C5179CC548'
qr = QrcodeGenerate(s, 'l')
image = qr.get_pilimage(10, width=0)
image.save('qrcode.png', 'png')
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,294
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/demo3.py
|
# -*- coding:utf-8 -*-
from decimal import *
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QObject, pyqtSlot, QUrl, QSizeF,QRect,Qt,QSize
from PyQt5.QtPrintSupport import QPrinter, QPrinterInfo
from PyQt5.QtGui import QPainter, QImage,QPagedPaintDevice,QPixmap
import sys, base64
from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageWin
class Printer:
def __init__(self):
self.p = QPrinterInfo.defaultPrinter()
self.print_device = QPrinter(self.p)
self.print_device.setPaperSize(QSizeF(400, 338), QPrinter.Point) #设置打印机数据
def print_(self):
TIMES = Decimal('0.4')
heigh = round(400*TIMES)
width = round(338*TIMES)
x1 = 0
y1 = 0
x2 = x1+width
y2 = y1+heigh
image = QPixmap()
image = image.scaled(QSize(width, heigh), Qt.KeepAspectRatio, Qt.SmoothTransformation)
image.loadFromData(open('./new.jpg','rb').read()) # 使用QImage构造图片
painter = QPainter(self.print_device) # 使用打印机作为绘制设备
painter.drawPixmap(QRect(x1, y1, x2, y2), image) # 进行绘制(即调起打印服务)
painter.end() # 打印结束
class Print(QObject):
def __init__(self):
super().__init__()
self.printer = Printer()
@pyqtSlot(str, result=str)
def print_(self):
# 去除头部标识
self.printer.print_()
return
if __name__ == '__main__':
app = QApplication(sys.argv)
# browser = QWebEngineView()
# browser.setWindowTitle('使用PyQt5打印热敏小票')
# browser.resize(900, 600)
# channel = QWebChannel()
printer = Print()
printer.print_()
# channel.registerObject('printer', printer)
# browser.page().setWebChannel(channel)
url_string = "file:///python/print/webprint.html" # 内置的网页地址
# browser.load(QUrl(url_string))
# browser.show()
sys.exit(app.exec_())
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,295
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/demo5.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: demo5
# Author : lumi
# date: 2019/10/15
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
# lib = ctypes.cdll.LoadLibrary('./LabelPainter_SDK.dll')
import ctypes
# program_path = ctypes.c_char_p("C:\\Program Files (x86)\\中琅条码标签打印软件\\")
program_path = "D:\\printer\\"
demo_path = r"D:\git_repertory\pythonDemo\qt_demo\barcode\demo1.zhl"
dll = ctypes.windll.LoadLibrary(r"D:\git_repertory\pythonDemo\qt_demo\barcode\LabelPainter_SDK.dll")
# dll = ctypes.cdll.LoadLibrary(r"D:\git_repertory\pythonDemo\qt_demo\barcode\LabelPainter_SDK.dll")
s = dll.ZL_Initialization(program_path)
print(s)
# dll.ZL_OpenDoc(demo_path, "")
# dll.ZL_OutputToPrinter()
# dll.ZL_SetDataCustom()
# dll.ZL_StartOutputCustom()
# dll.ZL_StopOutput()
# dll.ZL_CloseDoc()
# dll.ZL_Release()
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,296
|
oyjs1989/printbarcode
|
refs/heads/master
|
/app_v1/network/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: __init__.py
# Author : lumi
# date: 2019/10/24
# Description :
'''
import hashlib
import json
import requests
from Crypto.Cipher import AES, PKCS1_v1_5
from Crypto.PublicKey import RSA
from base64 import b64decode, b64encode
import uuid
import time
from hmac import HMAC
MAC = uuid.UUID(int=uuid.getnode()).hex[-12:]
WAITING_FOR_PRINT_QUEUE_DIR = 'print_queue'
class OdooClient(object):
'''
通讯方式 client 登录->记录数据 api 接口调用
'''
__local_key = MAC
def __init__(self, host, port, protocol, login, password, app_id=None, app_key=None):
if protocol == 'jsonrpc':
scheme = 'http'
else:
scheme = 'https'
self.url = '%s://%s:%s' % (scheme, host, port)
self.login = login
self.password = password
self.app_id = app_id
self.app_key = app_key
def login(self):
params = {
'login': self.login,
'password': self.password
}
pass
def crontab(self):
pass
def loop(self):
api = '/api/post/remote/print'
while True:
time.sleep(2)
data = self.jsonpost(api)
def jsonpost(self, api, **kwargs):
t_str = str(int(time.time()))
request_data = {
'params': {kwargs}
}
headers = {
'Content-Type': 'application/json',
"X-Appid": self.app_id,
"X-Deviceid": "windows os client",
"X-Timestamp": t_str
}
if self.app_id:
headers['Authorization'] = self.authorization(api, request_data, headers)
url = '%s/%s'(self.url, api)
response = requests.post(url, data=json.dumps(request_data), headers=headers)
if not response:
return
response_json = json.loads(response.text)
if response_json.get('error'):
raise Exception(response_json.get('error').get('data').get('message'))
result = json.loads(response_json.get('result'))
if result.get('state', -1) != 0:
raise Exception(result.get('msg'))
return result
def pkcs7padding(self, text):
"""
明文使用PKCS7填充
最终调用AES加密方法时,传入的是一个byte数组,要求是16的整数倍,因此需要对明文进行处理
:param text: 待加密内容(明文)
:return:
"""
bs = AES.block_size # 16
length = len(text)
bytes_length = len(bytes(text, encoding='utf-8'))
# tips:utf-8编码时,英文占1个byte,而中文占3个byte
padding_size = length if (bytes_length == length) else bytes_length
padding = bs - padding_size % bs
# tips:chr(padding)看与其它语言的约定,有的会使用'\0'
padding_text = chr(padding) * padding
return text + padding_text
def pkcs7unpadding(self, text):
"""
处理使用PKCS7填充过的数据
:param text: 解密后的字符串
:return:
"""
length = len(text)
unpadding = ord(text[length - 1])
return text[0:length - unpadding]
def encrypt(self, key, content):
"""
AES加密
key,iv使用同一个
模式cbc
填充pkcs7
:param key: 密钥
:param content: 加密内容
:return:
"""
key_bytes = bytes(key, encoding='utf-8')
iv = key_bytes
cipher = AES.new(key_bytes, AES.MODE_CBC, iv)
# 处理明文
content_padding = self.pkcs7padding(content)
# 加密
encrypt_bytes = cipher.encrypt(bytes(content_padding, encoding='utf-8'))
# 重新编码
result = str(b64encode(encrypt_bytes), encoding='utf-8')
return result
def decrypt(self, key, content):
"""
AES解密
key,iv使用同一个
模式cbc
去填充pkcs7
:param key:
:param content:
:return:
"""
key_bytes = bytes(key, encoding='utf-8')
iv = key_bytes
cipher = AES.new(key_bytes, AES.MODE_CBC, iv)
# base64解码
encrypt_bytes = b64decode(content)
# 解密
decrypt_bytes = cipher.decrypt(encrypt_bytes)
# 重新编码
result = str(decrypt_bytes, encoding='utf-8')
# 去除填充内容
result = self.pkcs7unpadding(result)
return result
def rsa_long_decrypt(self, msg, priv_key_str, default_length=128):
"""
RSA分段解密公有方法,返回时会进行base64 encode
1024bit的证书用128,priv_key需带有-----BEGIN RSA PRIVATE KEY-----开头及-----END RSA PRIVATE KEY-----结尾
"""
msg = b64decode(msg)
if "-----BEGIN RSA PRIVATE KEY-----" not in priv_key_str:
priv_key_str = b64decode(priv_key_str)
pri = RSA.importKey(priv_key_str)
privobj = PKCS1_v1_5.new(pri)
length = len(msg)
if length < default_length:
return b64encode("".join(privobj.decrypt(msg, 'xyz')))
res = []
offset = 0
while length - offset > 0:
if length - offset >= default_length:
res.append(privobj.decrypt(msg[offset:offset + default_length], 'xyz'))
else:
res.append(privobj.decrypt(msg[offset:], 'xyz'))
offset += default_length
return b64encode("".join(res))
def rsa_long_encrypt(self, msg, pub_key, default_length=117):
"""
分段加密,返回时会进行base64encode
:param msg:加密报文
:param pub_key: 公钥(1024bit),需要带有-----BEGIN PUBLIC KEY-----开头及-----END PUBLIC KEY-----结尾
:param default_length: 默认长度,1024bit使用117
"""
if "-----BEGIN PUBLIC KEY-----" not in pub_key:
pub_key = b64decode(pub_key)
pub_rsa = RSA.importKey(pub_key)
pub_obj = PKCS1_v1_5.new(pub_rsa)
msg_length = len(msg)
if msg_length < default_length:
return b64encode("".join(pub_obj.encrypt(msg.encode('utf-8'))))
offset = 0
res = []
while msg_length - offset > 0:
if msg_length - offset > default_length:
res.append((pub_obj.encrypt(msg[offset:offset + default_length])))
else:
res.append(pub_obj.encrypt(msg[offset:]))
offset += default_length
return b64encode("".join(res))
def encrypt_all_attr(self):
result = {}
for k, v in self.__dict__.items():
if 'cipher' in k:
result[k] = self.encrypt(self.__key, v)
else:
result[k] = v
return result
def decrypt_all_attr(self, items):
result = {}
for k, v in items():
if 'cipher' in k:
result[k] = self.decrypt(self.__key, v)
else:
result[k] = v
return result
def authorization(self, api, request_json, headers):
request_data = json.dumps(request_json, encoding="utf-8")
body_hash = hashlib.sha256(request_data).hexdigest()
# 生成签名
data = "POST\n"
data = data + api + "\n"
data = data + "\n"
# 必须将header中的key转为小写,保证签名正确
headers_l = dict((k.lower(), v) for k, v in headers.iteritems())
# 校验的header为 Content-Type, X-Appid, X-Timestamp, X-Deviceid
if headers_l.has_key("x-appid") and headers_l.has_key("x-timestamp") and headers_l.has_key(
"x-deviceid") and headers_l.has_key("content-type"):
request_headers = {}
request_headers["x-appid"] = headers_l.get("x-appid").strip()
request_headers["x-timestamp"] = headers_l.get("x-timestamp").strip()
request_headers["x-deviceid"] = headers_l.get("x-deviceid").strip()
request_headers["content-type"] = headers_l.get("content-type").strip()
sorted_headers = sorted(request_headers.iteritems(), key=lambda d: d[0])
else:
print("missing required headers")
exit(-1)
s_header = ""
for (key, value) in sorted_headers:
data = data + key + ":" + value + "\n"
if s_header == "":
s_header = key
else:
s_header = s_header + ";" + key
data = data + s_header + "\n"
data = data + body_hash
# 生成签名
hash_data = hashlib.sha256(data).hexdigest()
return HMAC(self.app_key, hash_data, hashlib.sha256).hexdigest()
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,297
|
oyjs1989/printbarcode
|
refs/heads/master
|
/myean13/renderer.py
|
"""Rendering code for EAN-13 barcode"""
from functools import reduce
from io import BytesIO
from decimal import *
from PIL import Image, ImageFont, ImageDraw
# maps bar width against font size
font_sizes = {
1: 8,
2: 14,
3: 18,
4: 24
}
class EAN13Renderer:
"""Rendering class - given the code and corresponding
bar encodings and guard bars,
it will add edge zones and render to an image"""
width = None
height = None
def __init__(self, code, left_bars, right_bars, guards, font, img, options):
self.code = code
self.left_bars = left_bars
self.right_bars = right_bars
self.guards = guards
self.font = font
self.img = img
self.options = options
self.set_args()
def set_args(self):
def sum_len(total, item):
"""add the length of a given item to the total"""
return total + len(item)
num_bars = (7 * 12) + reduce(sum_len, self.guards, 0)
MULTIPLE = self.options.get('MULTIPLE')
self.margin_left = self.options.get('margin_left', Decimal('0')) * MULTIPLE
self.margin_right = self.options.get('margin_right', Decimal('0')) * MULTIPLE
if self.margin_left + self.margin_right > self.img.size[1]:
raise OverflowError('margin left and margin right over width in total')
self.margin_top = self.options.get('margin_top', Decimal('0')) * MULTIPLE
self.margin_bottom = self.options.get('margin_bottom', Decimal('0')) * MULTIPLE
if self.margin_top + self.margin_bottom > self.img.height:
raise OverflowError('margin top and margin bottom over height in total')
self.image_width = self.options.get('width', Decimal(
'0')) * MULTIPLE or self.img.width - self.margin_left - self.margin_right
if self.img.width < self.margin_left + self.margin_right + self.image_width:
self.image_width = self.img.size[1] - self.margin_left - self.margin_right
self.image_height = self.options.get('height', Decimal(
'0')) * MULTIPLE or self.img.height - self.margin_top - self.margin_bottom
if self.img.height < self.margin_top + self.margin_bottom + self.image_height:
self.image_height = self.img.height - self.margin_top - self.margin_bottom
width, height = self.font.getsize(self.code[0])
self.bar_width = round((self.image_width - width) / num_bars)
self.current = self.margin_top + self.margin_bottom + self.image_height
def write_image(self):
bar_width = self.bar_width
img = self.img
bar_height = self.image_height
margin_left = self.margin_left
margin_top = self.margin_top
width, height = self.font.getsize(self.code[0])
class BarWriter:
"""Class which moves across the image, writing out bars"""
def __init__(self, img):
self.img = img
self.current_x = margin_left + width
self.symbol_top = margin_top
self.bar_height = bar_height
def write_bar(self, value, full=False):
"""Draw a bar at the current position,
if the value is 1, otherwise move on silently"""
# only write anything to the image if bar value is 1
bar_height = round(self.bar_height * Decimal(full and 0.88 or 0.8))
if value == 1:
for ypos in range(round(self.symbol_top), round(bar_height + self.symbol_top)):
for xpos in range(round(self.current_x),
round(self.current_x + bar_width)):
img.putpixel((xpos, ypos), 0)
self.current_x += bar_width
def write_bars(self, bars, full=False):
"""write all bars to the image"""
for bar in bars:
self.write_bar(int(bar), full)
# Draw the bars
writer = BarWriter(self.img)
writer.write_bars(self.guards[0], full=True)
writer.write_bars(self.left_bars)
writer.write_bars(self.guards[1], full=True)
writer.write_bars(self.right_bars)
writer.write_bars(self.guards[2], full=True)
# Draw the text
draw = ImageDraw.Draw(self.img)
draw.text((margin_left, round(margin_top + bar_height * Decimal(0.75))), self.code[0], font=self.font)
draw.text((21 * bar_width, round(margin_top + bar_height * Decimal(0.75))), self.code[1:7], font=self.font)
draw.text((67 * bar_width, round(margin_top + bar_height * Decimal(0.75))), self.code[7:], font=self.font)
def get_pilimage(self, barcode_width, barcode_height, bar_width):
def sum_len(total, item):
"""add the length of a given item to the total"""
return total + len(item)
width, height = self.font.getsize(self.code[0])
num_bars = (7 * 12) + reduce(sum_len, self.guards, 0)
image_width = num_bars * bar_width
font_width = round(width / barcode_width * image_width)
image_width = round(width / barcode_width * image_width + image_width)
image_height = round(image_width / barcode_width * barcode_height)
img = Image.new('L', (image_width, image_height), 255)
class BarWriter:
"""Class which moves across the image, writing out bars"""
def __init__(self, img):
self.img = img
self.current_x = font_width
self.symbol_top = 0
def write_bar(self, value, full=False):
"""Draw a bar at the current position,
if the value is 1, otherwise move on silently"""
# only write anything to the image if bar value is 1
bar_height = int(image_height * (full and 0.9 or 0.8))
if value == 1:
for ypos in range(self.symbol_top, bar_height):
for xpos in range(self.current_x, self.current_x + bar_width):
img.putpixel((xpos, ypos), 0)
self.current_x += bar_width
def write_bars(self, bars, full=False):
"""write all bars to the image"""
for bar in bars:
self.write_bar(int(bar), full)
# Draw the bars
writer = BarWriter(img)
writer.write_bars(self.guards[0], full=True)
writer.write_bars(self.left_bars)
writer.write_bars(self.guards[1], full=True)
writer.write_bars(self.right_bars)
writer.write_bars(self.guards[2], full=True)
# to set new pix
img = img.resize((round(barcode_width), round(barcode_height)))
# Draw the text
draw = ImageDraw.Draw(img)
draw.text((Decimal(0.0) * barcode_width, round(barcode_height * Decimal(0.75))), self.code[0], font=self.font)
draw.text((Decimal(0.095) * barcode_width, round(barcode_height * Decimal(0.75))), self.code[1:7], font=self.font)
draw.text((Decimal(0.55) * barcode_width, round(barcode_height * Decimal(0.75))), self.code[7:], font=self.font)
return img
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,298
|
oyjs1989/printbarcode
|
refs/heads/master
|
/myqrcode/qrcodegenerate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: qrcodegenerate
# Author : lumi
# date: 2019/10/15
# Description :
'''
# - Custom package
# - Third party module of python
try:
from .textencoder import TextEncoder
from .renderer import QRCodeRenderer
except ImportError:
from textencoder import TextEncoder
from renderer import QRCodeRenderer
try:
from PIL import Image
except ImportError:
import Image
class QrcodeGenerate(object):
"""Top-level class which handles the overall process of
encoding input data, placing it in the matrix and
outputting the result"""
def __init__(self, text, ecl=None, **option):
'''
ecl 是纠错等级相关,可选如下:
ErrorCorrectionLevel.L
ErrorCorrectionLevel.M
ErrorCorrectionLevel.Q
ErrorCorrectionLevel.H
:param text:
:param ecl:
'''
"""Set up the encoder with the input text.
This will encode the text,
and create a matrix with the resulting codewords"""
enc = TextEncoder()
self.matrix = enc.encode(text, ecl)
self.height = 0
self.width = 0
self.option = option
def save(self, filename, cellsize=5):
"""Write the matrix out to an image file"""
qrc = QRCodeRenderer(self.matrix)
qrc.write_file(cellsize, filename)
def get_imagedata(self, cellsize=5):
"""Write the matrix out to a PNG bytestream"""
qrc = QRCodeRenderer(self.matrix)
imagedata = qrc.get_imagedata(cellsize)
self.height = qrc.mtx_size
self.width = qrc.mtx_size
return imagedata
def get_ascii(self):
"""Return an ascii representation of the matrix"""
qrc = QRCodeRenderer(self.matrix)
return qrc.get_ascii()
def get_pilimage(self, cellsize=5, colour=0, width=4):
qrc = QRCodeRenderer(self.matrix)
return qrc.get_pilimage(cellsize, colour=colour, width=width)
if __name__ == '__main__':
s = 'G$M:1694$S:456SS111992900009$D:000000000F358D05%Z$A:04CF8CDF3C765017$I:163AF41829724ED328243F8A91C5179CC548'
qr = QrcodeGenerate(s, 'l')
image = qr.get_pilimage(10, width=0)
image.save('qrcode.png','png')
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,299
|
oyjs1989/printbarcode
|
refs/heads/master
|
/app/tender/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: __init__.py
# Author : lumi
# date: 2019/10/24
# Description :
'''
if __name__ == '__main__':
MODE = 'debug'
else:
MODE = 'produce'
from decimal import *
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
from mycode128 import Code128Generate
from myean13 import EAN13Generate
from myqrcode import QrcodeGenerate
from qrcode import QRCode, constants
DEFAULT_MULTIPLE = Decimal('50')
OBJ_REFERENCE_POINT = {
'LEFT_TOP': (0, 0),
'RIGHT_TOP': (2, 0),
'CENTER_TOP': (1, 0),
'LEFT_MID': (0, 1),
'RIGHT_MID': (2, 1),
'CENTER_MID': (1, 1),
'LEFT_BOTTOM': (0, 2),
'RIGHT_BOTTOM': (2, 2),
'CENTER_BOTTOM': (1, 2)}
TMP_REFERENCE_POINT = {
'LEFT_TOP': (0, 0),
'RIGHT_TOP': (2, 0),
'CENTER_TOP': (1, 0),
'LEFT_MID': (0, 1),
'RIGHT_MID': (2, 1),
'CENTER_MID': (1, 1),
'LEFT_BOTTOM': (0, 2),
'RIGHT_BOTTOM': (2, 2),
'CENTER_BOTTOM': (1, 2)}
class BaseTender(object):
def __init__(self, image, draw, width=0, height=0, x=0, y=0, obj_reference_point=OBJ_REFERENCE_POINT['LEFT_TOP'],
tmp_reference_point=TMP_REFERENCE_POINT['LEFT_TOP'], horizontal_center=False, vertical_center=False,
multiple=DEFAULT_MULTIPLE, rotate=0, **kwargs):
self.image = image
self.draw = draw
self.x = Decimal(x) * multiple
self.y = Decimal(y) * multiple
self.width = Decimal(width) * multiple
self.height = Decimal(height) * multiple
self.horizontal_center = horizontal_center # 水平居中为高级选项,固定为水平居中
self.vertical_center = vertical_center # 垂直居中为高级选项,固定为垂直居中
self.obj_reference_point = obj_reference_point
self.tmp_reference_point = tmp_reference_point
self.multiple = multiple
self.rotate = rotate
self.characteristic = kwargs
def verify_width_height(self):
'''
1.计算长宽(二维码,条码,文本) 输入的值(非图形类)
如果文本长度少于 width 则已为本长度为准
如果文本长度高于 width 则换行已width为准
如果
:return:
'''
pass
def calculation_absolute_position(self):
'''根据目标点,对照点 xy 计算绝对位置'''
pass
def calculation_relative_position(self):
'''
计算左上角的位置 PIL固定位置
:return:
'''
pass
def calculation_position(self):
# 先计算长宽
self.verify_width_height()
# 计算相对位置
self.calculation_relative_position()
# 计算绝对位置
self.calculation_absolute_position()
def prev(self, context):
self.context = context
self.calculation_position()
def after(self):
pass
def drawing(self):
pass
def run(self, context):
# 布局
self.prev(context)
# 打印
self.drawing()
# 收尾
self.after()
class TextTender(BaseTender):
'''文本打印'''
def __init__(self, *arg, **kwargs):
super(TextTender, self).__init__(*arg, **kwargs)
self.font_size = Decimal(self.characteristic.get('font_size'))
self.font_style = self.characteristic.get('font_style')
self.direction = self.characteristic.get('direction') # direction: 文字的書寫方向rtl 從右開始寫到左邊 ltr 從左邊到右邊 ttb 從上到下
self.spacing = self.characteristic.get('spacing') # 行距
self.align = self.characteristic.get('align') # align: 設定文字的對齊方式,left/center/right 主要针对有换行数据,因为长短不一
self.font = ImageFont.truetype(self.font_style, round(self.font_size * self.multiple))
def drawing(self):
self.draw.text((round(self.x), round(self.y)), self.context, font=self.font, fill=0)
class MultilineTender(TextTender):
'''多行文本,高度是动态的 单行'''
def verify_width_height(self):
'''
处理对行文本
:return:
'''
self.multi_spilt()
def multi_spilt(self):
'''
多行文本根据宽度换行,并重新计算长宽
:return:
'''
new_context = ''
line = ''
total_height = 0
for w in self.context:
text_width, text_height = self.font.getsize(line)
if text_width > self.width:
new_context += line[0:-1]
new_context += '\n'
line = w
total_height += text_height
else:
line += w
self.context = new_context
self.width, self.height = self.font.getsize_multiline(new_context)
class CodeTender(BaseTender):
def get_image(self):
pass
def drawing(self):
image = self.get_image()
im = image.resize((round(self.width), round(self.height)), Image.ANTIALIAS)
box = (self.x, self.y, self.x + self.ZIGBEE_WIDTH, self.y + self.ZIGBEE_HEIGHT)
self.image.paste(im, box)
class BarcodeTender(CodeTender):
'''
条码打印
'''
def __init__(self, *arg, **kwargs):
super(BarcodeTender, self).__init__(*arg, **kwargs)
self.font_style = self.characteristic.get('font_style')
self.font = ImageFont.truetype(self.font_style, round(self.font_size * self.multiple))
class QRcodeTender(CodeTender):
def __init__(self, *arg, **kwargs):
super(QRcodeTender, self).__init__(*arg, **kwargs)
self.error_correction = self.characteristic.get('error_correction', constants.ERROR_CORRECT_L)
self.box_size = self.characteristic.get('box_size', 10)
self.border = self.characteristic.get('border', 0)
self.fill_color = self.characteristic.get('fill_color', 'black')
self.back_color = self.characteristic.get('back_color', 'white')
def get_image(self):
qr = QRCode(version=1, error_correction=self.error_correction, box_size=self.box_size, border=self.border)
qr.add_data(self.context)
qr.make(fit=True)
image = qr.make_image(fill_color=self.fill_color, back_color=self.back_color)
return image
class PictureTender(BaseTender):
'''
嵌入图片
'''
def __init__(self, *arg, **kwargs):
super(PictureTender, self).__init__(*arg, **kwargs)
self.image_path = self.characteristic.get('image_path')
class ImagesTender(BaseTender):
'''
绘图
'''
def __init__(self, *arg, **kwargs):
super(ImagesTender, self).__init__(*arg, **kwargs)
self.shape = self.characteristic.get('shape')
self.thickness = self.characteristic.get('thickness')
TENDERS = {
'singleline': TextTender,
'multiline': MultilineTender,
'pic': PictureTender,
'img': ImagesTender,
'qrcode': QRcodeTender,
'qrcode': QRcodeTender,
}
class BackGround(object):
def __init__(self, width, height, components, multiple=DEFAULT_MULTIPLE):
'''
一个背景应该拥有:
1.长宽 2 背景色 内部内容 绘制图片功能
'''
self.width = width * multiple
self.height = height * multiple
self.image = Image.new('L', (round(self.width), round(self.height)), 255)
self.draw = ImageDraw.Draw(self.image)
self.components = self.after_init(multiple, components)
def after_init(self, multiple, components):
'''
生成所有的部件对象
:param multiple:
:param components:
:return:
'''
result = {}
for component in components:
component_type = component.get('type')
component_name = component.get('name')
if component_type not in TENDERS:
raise Exception('%s类型type不存在')
result[component_name] = TENDERS[component_type](self.image, self.draw, **component, multiple=multiple)
return result
def drawing(self, contexts):
for name, obj in self.components.items():
obj.run(contexts.get(name))
tmp = BytesIO()
if MODE == 'debug':
self.image.save('debug.png', format='BMP')
self.image.save(tmp, format='BMP')
return tmp.getvalue()
if __name__ == '__main__':
kwargs = {
'width': 20,
'height': 20,
'components': [
{
'type': 'singleline',
'x': 2,
'y': 2,
'height': 3,
'width': 3,
'name': '字段1',
'font_style': 'Arial.ttf',
'font_size': 4.5,
}
]
}
bg = BackGround(**kwargs)
bg.drawing({'字段1': 'haha'})
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,300
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/demo_post.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: demo_post
# Author : lumi
# date: 2019/11/26
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
import requests
import json
import time
host = '127.0.0.1'
protocol = 'jsonrpc'
port = '8069'
host = 'erp.aqara.com'
protocol = 'https'
port = '443'
request_data = {
'params': {'db': 'erp',
'login': 'miao.yu@aqara.com',
'password': '123123123',
'sn': '325/00000522'}
}
headers = {
'Content-Type': 'application/json',
}
if protocol == 'jsonrpc':
scheme = 'http'
else:
scheme = 'https'
url = '%s://%s:%s/api/post/iface/get/zigbee' % (scheme, host, port)
print(url)
start_time = time.time()
response = requests.post(url, data=json.dumps(request_data), headers=headers, timeout=100)
if not response:
exit()
response_json = json.loads(response.text)
print(response_json)
if response_json.get('error'):
raise Exception(response_json.get('error').get('data').get('message'))
result = json.loads(response_json.get('result'))
end_time = time.time()
print(end_time-start_time)
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,301
|
oyjs1989/printbarcode
|
refs/heads/master
|
/unittest_barcode/test_api.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: test_communication
# Author : lumi
# date: 2019/10/31
# Description : 测试连接,测试
'''
# - Custom package
# - Third party module of python
# - import odoo package
from unittest import TestCase, main
import requests
import json
import jsonrpc
PRO_ERP = {'host': 'https://erp.aqara.com', 'username': 'miao.yu@aqara.com', 'password': '123123123',
'db': 'erp'}
# XWD
PRO_MES = {'host': 'http://218.17.28.106:8069', 'username': 'junsong.ouyang@aqara.com', 'password': 'junsong123',
'db': 'XWD_0411'}
# HTL
PRO_MES = {'host': 'http://202.104.22.108:8069', 'username': 'junsong.ouyang@aqara.com', 'password': 'junsong123',
'db': '10c_mes'}
DEV_ERP = {'host': 'http://127.0.0.1:8069', 'username': 'admin', 'password': '123456', 'db': 'erp'}
DEV_MES = {'host': 'http://127.0.0.1:8069', 'username': '1111', 'password': '123456', 'db': ''}
MODE = 'pro'
SYSTEM = 'erp'
ERP_FUNC = ['zigbee']
MES_FUNC = ['barcode69']
class TestAPI(TestCase):
def setUp(self) -> None:
self.mode = MODE
self.system = SYSTEM
if self.mode == 'dev' and self.system == 'erp':
self.db = DEV_ERP.get('db')
self.username = DEV_ERP.get('username')
self.password = DEV_ERP.get('password')
self.host = DEV_ERP.get('host')
elif self.mode == 'dev' and self.system == 'mes':
self.db = DEV_MES.get('db')
self.username = DEV_MES.get('username')
self.password = DEV_MES.get('password')
self.host = DEV_MES.get('host')
elif self.mode == 'pro' and self.system == 'erp':
self.db = PRO_ERP.get('db')
self.username = PRO_ERP.get('username')
self.password = PRO_ERP.get('password')
self.host = PRO_ERP.get('host')
elif self.mode == 'pro' and self.system == 'mes':
self.db = PRO_MES.get('db')
self.username = PRO_MES.get('username')
self.password = PRO_MES.get('password')
self.host = PRO_MES.get('host')
else:
return
if self.system == 'mes':
self.test_func = MES_FUNC
else:
self.test_func = ERP_FUNC
def post_api(self, api, input_raw):
request_data = {
'params': {'db': self.db,
'login': self.username,
'password': self.password,
'sn': input_raw}
}
headers = {
'Content-Type': 'application/json',
}
url = '%s%s' % (self.host, api)
print(url)
print(request_data)
response = requests.post(url, data=json.dumps(request_data), headers=headers)
response_json = json.loads(response.text)
print(response_json)
return response_json
def zigbee(self):
api = '/api/post/iface/get/zigbee'
response_json = self.post_api(api, '457/S1119A0800102')
assert response_json.get('error') is None
result = json.loads(response_json.get('result'))
assert result.get('state') == 0
data = result.get('printcontent')
assert data != ''
assert data.get('zigbee_info') == 'G$M:1695$S:457SS1119A0800102$D:255168460%Z$A:04CF8CDF3C7652DE$I:E5BDAB9AB74C5F6678AF9276DD20FFAA65DB'
def barcode69(self):
api = '/api/post/iface/get/zigbee'
response_json = self.post_api(api, '457/S1119A0800102')
if response_json.get('error'):
raise Exception(response_json.get('error').get('data').get('message'))
result = json.loads(response_json.get('result'))
if result.get('state', -1) != 0:
raise Exception(result.get('msg'))
data = result.get('printcontent')
assert data != ''
def test_even(self):
for func in self.test_func:
if hasattr(self, func):
getattr(self, func)()
if __name__ == '__main__':
main()
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,302
|
oyjs1989/printbarcode
|
refs/heads/master
|
/app_v1/application.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: application
# Author : lumi
# date: 2019/9/25
# Description :
'''
# - Custom package
import os
import sys
import odoorpc
from urllib.parse import urlparse
import base64
from Crypto.Cipher import AES
from copy import deepcopy
import uuid
# - Third party module of python
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSlot, Qt, QRegExp
from PyQt5.QtGui import QFont, QIcon, QRegExpValidator
# - import odoo package
from ui.body import Ui_Login
from ui.sn import Ui_SN
import resourcedata.images
from app.printwidgets.print_model import AqaraPrinter_69, XiaoMiPrinter_69, ZigbeeQrcode, ZigbeeQrcodeOnly, \
SNPrintRectangle, SNPrintOval, ZigbeeQrcodeBig,ZigbeeQrcodeOnlyBig
import json
import logging.handlers
logger = logging.getLogger("logger")
handler1 = logging.StreamHandler()
handler2 = logging.FileHandler(filename="print.log")
logger.setLevel(logging.DEBUG)
handler1.setLevel(logging.WARNING)
handler2.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger.addHandler(handler1)
logger.addHandler(handler2)
MAC = uuid.UUID(int=uuid.getnode()).hex[-12:]
class LocalConfig(object):
# 本地缓存持久化
PATH = os.path.join(os.environ.get('TEMP'), 'odoo')
FILE = os.path.join(PATH, 'tmp')
__key = bytes(MAC, encoding='utf-8')
def __init__(self):
self.get_file_info()
def get_file_info(self):
if not os.path.exists(self.PATH):
os.makedirs(self.PATH)
if not os.path.exists(self.FILE):
with open(self.FILE, 'w') as f:
f.write(json.dumps(dict()))
return
else:
with open(self.FILE, 'r') as f:
context = json.load(f)
for k, v in context.items():
setattr(self, k, v)
def set_file_info(self):
if not os.path.exists(self.PATH):
os.makedirs(self.PATH)
with open(self.FILE, 'w') as f:
f.write(json.dumps(dict()))
with open(self.FILE, 'w') as f:
json.dump(self.__dict__, f)
def pkcs7padding(self, text):
"""
明文使用PKCS7填充
最终调用AES加密方法时,传入的是一个byte数组,要求是16的整数倍,因此需要对明文进行处理
:param text: 待加密内容(明文)
:return:
"""
bs = AES.block_size # 16
length = len(text)
bytes_length = len(bytes(text, encoding='utf-8'))
# tips:utf-8编码时,英文占1个byte,而中文占3个byte
padding_size = length if (bytes_length == length) else bytes_length
padding = bs - padding_size % bs
# tips:chr(padding)看与其它语言的约定,有的会使用'\0'
padding_text = chr(padding) * padding
return text + padding_text
def pkcs7unpadding(self, text):
"""
处理使用PKCS7填充过的数据
:param text: 解密后的字符串
:return:
"""
length = len(text)
unpadding = ord(text[length - 1])
return text[0:length - unpadding]
def encrypt(self, key, content):
"""
AES加密
key,iv使用同一个
模式cbc
填充pkcs7
:param key: 密钥
:param content: 加密内容
:return:
"""
key_bytes = bytes(key, encoding='utf-8')
iv = key_bytes
cipher = AES.new(key_bytes, AES.MODE_CBC, iv)
# 处理明文
content_padding = self.pkcs7padding(content)
# 加密
encrypt_bytes = cipher.encrypt(bytes(content_padding, encoding='utf-8'))
# 重新编码
result = str(base64.b64encode(encrypt_bytes), encoding='utf-8')
return result
def decrypt(self, key, content):
"""
AES解密
key,iv使用同一个
模式cbc
去填充pkcs7
:param key:
:param content:
:return:
"""
key_bytes = bytes(key, encoding='utf-8')
iv = key_bytes
cipher = AES.new(key_bytes, AES.MODE_CBC, iv)
# base64解码
encrypt_bytes = base64.b64decode(content)
# 解密
decrypt_bytes = cipher.decrypt(encrypt_bytes)
# 重新编码
result = str(decrypt_bytes, encoding='utf-8')
# 去除填充内容
result = self.pkcs7unpadding(result)
return result
def encrypt_all_attr(self):
result = {}
for k, v in self.__dict__.items():
if 'cipher' in k:
result[k] = self.encrypt(self.__key, v)
else:
result[k] = v
return result
def decrypt_all_attr(self, items):
result = {}
for k, v in items():
if 'cipher' in k:
result[k] = self.decrypt(self.__key, v)
else:
result[k] = v
return result
class SNInput(QtWidgets.QLineEdit):
def __init__(self, main):
super(SNInput, self).__init__()
regx = QRegExp("^[0-9A-Za-z/]{0,20}$")
validator = QRegExpValidator(regx, self)
self.setValidator(validator)
self.main = main
def keyPressEvent(self, event):
key = event.key()
if key in (Qt.Key_Return, Qt.Key_Enter):
input_raw = self.text()
self.setText('')
if not self.main.odoo:
QtWidgets.QMessageBox.information(self, '提示', '请先登录服务器')
return
if not input_raw:
QtWidgets.QMessageBox.information(self, '提示', '输入为空')
return
logger.info('%s:%s' % (self.main.config.print_method, input_raw))
self.main.print(input_raw.strip())
else:
QtWidgets.QLineEdit.keyPressEvent(self, event)
class MainWindow(QtWidgets.QMainWindow, Ui_SN):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
self.sn_input = SNInput(self)
self.sn_input.setObjectName("sn_input")
font = QFont()
font.setPointSize(25)
self.sn_input.setFont(font)
self.horizontalLayout.addWidget(self.sn_input)
self.setFixedSize(self.width(), self.height())
self.setWindowIcon(QIcon(':/images/logo.png'))
try:
self.printers = {
'69码打印:米家': XiaoMiPrinter_69(),
'69码打印:Aqara': AqaraPrinter_69(),
'Zigbee:SN同页打印': ZigbeeQrcode(),
'Zigbee:Install Code': ZigbeeQrcodeOnly(),
'Zigbee:Install Code 14*14': ZigbeeQrcodeOnlyBig(),
'Zigbee:14*14': ZigbeeQrcodeBig(),
'SN打印:34.5*9.5mm': SNPrintRectangle(),
'SN打印:36*10mm': SNPrintOval(),
}
except Exception as e:
self.show()
QtWidgets.QMessageBox.warning(self, '异常', '字体文件未安装,%s' % e)
sys.exit()
self.actionLogin.triggered.connect(self.open_login)
self.odoo = None
self.config = LocalConfig()
self.print_method_actions = self.create_menu_groups(self.menu_print_method, self.printers.keys(),
self.select_print)
self.loading_historical_data()
def loading_historical_data(self):
if hasattr(self.config, 'print_method'):
for action in self.print_method_actions.actions():
if action.text() == self.config.print_method:
action.setChecked(True)
def create_menu_groups(self, menu: QtWidgets.QAction, actions, func) -> QtWidgets.QAction:
action_groups = QtWidgets.QActionGroup(self)
for action in actions:
qtaction = QtWidgets.QAction(self)
qtaction.setObjectName(action)
qtaction.setText(action)
qtaction.setCheckable(True)
menu.addAction(action_groups.addAction(qtaction))
action_groups.isExclusive()
action_groups.triggered.connect(func)
return action_groups
def select_print(self, action):
self.config.print_method = action.text()
self.config.set_file_info()
def open_login(self):
self.login_dialog = LoginDialog(self)
self.login_dialog.show()
def print(self, input_raw):
try:
if not hasattr(self.config, 'print_method'):
QtWidgets.QMessageBox.warning(self, '异常', '未选择打印方法')
return
printer = self.printers.get(self.config.print_method)
printer.print_(self.odoo, input_raw)
except Exception as e:
QtWidgets.QMessageBox.warning(self, '异常', '打印错误:%s' % e)
def login(self, name, password, url):
if not all([name, password, url]):
QtWidgets.QMessageBox.warning(self, '异常', '填写内容不完整')
raise Exception
url_parse = urlparse(url)
if ":" in url_parse.netloc:
host, port = url_parse.netloc.split(':')
else:
host = url_parse.netloc
port = 80
if url_parse.scheme == 'http':
protocol = 'jsonrpc'
else:
protocol = 'jsonrpc+ssl'
port = 443
try:
self.config.url = '%s://%s:%s' % (url_parse.scheme, host, port)
self.config.set_file_info()
odoo = odoorpc.ODOO(host=host, protocol=protocol, port=port)
db = odoo.db.list()
if hasattr(self.config, 'urls'):
self.config.urls.append(self.config.url)
self.config.urls = list(set(self.config.urls))
else:
self.config.urls = [self.config.url]
except Exception as e:
QtWidgets.QMessageBox.warning(self, '异常', '连接服务器失败:%s' % str(e))
return
try:
# erp获取不了数据库名称
if 'Nothing here' in db[0]:
db = ['erp']
odoo.login(db[0], name, password)
self.odoo = odoo
self.config.name = name
if hasattr(self.config, 'names'):
self.config.names.append(self.config.name)
self.config.names = list(set(self.config.names))
else:
self.config.names = [self.config.name]
self.config.set_file_info()
if hasattr(self, 'login_info'):
self.login_info.setText('用户名:%s 访问地址:%s' % (self.config.name, self.config.url))
else:
self.login_info = QtWidgets.QLabel()
self.login_info.setText('用户名:%s 访问地址:%s' % (self.config.name, self.config.url))
self.statusbar.addWidget(self.login_info)
QtWidgets.QMessageBox.warning(self, '提示', '登录成功')
except Exception as e:
QtWidgets.QMessageBox.warning(self, '异常', '登录错误:%s' % e)
raise e
class LoginDialog(QtWidgets.QDialog, Ui_Login):
def __init__(self, main):
super(LoginDialog, self).__init__()
self.setupUi(self)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.main = main
# 历史url和账号
if hasattr(self.main.config, 'url'):
self.net.setText(self.main.config.url)
if hasattr(self.main.config, 'name'):
self.name.setText(self.main.config.name)
if hasattr(self.main.config, 'urls'):
urls = self.main.config.urls
self.urlcompleter = QtWidgets.QCompleter(urls)
self.net.setCompleter(self.urlcompleter)
if hasattr(self.main.config, 'names'):
names = self.main.config.names
self.namecompleter = QtWidgets.QCompleter(names)
self.namecompleter.setCaseSensitivity(Qt.CaseSensitive) # 设置区分大小写
self.name.setCompleter(self.namecompleter)
# self.setWindowFlags(Qt.WindowMinimizeButtonHint | # 使能最小化按钮
# Qt.WindowCloseButtonHint | # 使能关闭按钮
# Qt.WindowStaysOnTopHint)
self.setFixedSize(self.width(), self.height())
self.setWindowIcon(QIcon(':/images/logo.png'))
btns = self.buttonBox.buttons()
btns[0].setText('确认')
btns[1].setText('取消')
@pyqtSlot()
def accept(self):
password = self.password.text()
name = self.name.text()
url = self.net.text()
try:
self.main.login(name, password, url)
except Exception:
pass
self.hide()
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,303
|
oyjs1989/printbarcode
|
refs/heads/master
|
/myqrcode/textencoder.py
|
"""Text encoder for QR Code encoder"""
import logging
try:
from . import isodata
except ImportError:
import isodate
LOG = logging.getLogger("qrcode")
class BitStream:
"""Simple Bit stream implementation"""
def __init__(self):
self.data = []
# end def __init__
def append(self, value, bitsnum):
"""Append 'bitsnum' bits to the end of bit stream"""
if bitsnum < 1:
raise ValueError("Wrong value for number of bits (%d)" % bitsnum)
# end if
for i in range(bitsnum - 1, -1, -1):
self.data.append((value >> i) & 0x01)
# end for
# end def append
def prepend(self, value, bitsnum):
"""Prepend 'bitsnum' bits to the begining of bit stream"""
if bitsnum < 1:
raise ValueError("Wrong value for number of bits (%d)" % bitsnum)
# end if
for i in range(0, bitsnum, 1):
self.data.insert(0, (value >> i) & 0x01)
# end for
# end def prepend
# end class BitStream
class TextEncoder:
"""Text encoder class for QR Code"""
def __init__(self):
self.version = None
self.ecl = None
self.codewords = []
self.matrix = None
self.mtx_size = 0
self.minfo = None
self.max_data_codewords = None
# end def __init__
def encode(self, text, ecl=None):
"""Encode the given text and add padding and error codes
also set up the correct matrix size for the resulting codewords"""
self.__init__()
if ecl is None:
ecl = 'M'
# end if
str2ecl = {"L": 1, "l": 1, "M": 0, "m": 0, "Q": 3, "q": 3, "H": 2, "h": 2}
self.ecl = str2ecl[ecl]
self.encode_text(text)
self.pad()
self.minfo = isodata.MatrixInfo(self.version, self.ecl)
self.append_error_codes()
LOG.debug(
"Codewords: " + ' '.join([str(codeword) for codeword in self.codewords]))
self.create_matrix()
return self.matrix
# end def encode
def encode_text(self, text):
"""Encode the given text into bitstream"""
char_count_num = 8
result_len = 4 + 8 * len(text)
terminator_len = 4
# Calculate smallest symbol version
for self.version in range(1, 42):
if self.version == 10:
char_count_num = 16
result_len += 8
elif self.version == 41:
raise ValueError("QRCode cannot store %d bits" % result_len)
# end if
max_bits = isodata.MAX_DATA_BITS[self.version - 1 + 40 * self.ecl]
if max_bits >= result_len:
if max_bits - result_len < 4:
terminator_len = max_bits - result_len
# end if
self.max_data_codewords = max_bits >> 3
break
# end if
# end for
bitstream = BitStream()
for char in text:
bitstream.append(ord(char), 8)
# end for
bitstream.prepend(len(text), char_count_num)
# write 'byte' mode
bitstream.prepend(4, 4)
# add terminator
bitstream.append(0, terminator_len)
# convert bitstream into codewords
byte = 0
bit_num = 7
for bit in bitstream.data:
byte |= bit << bit_num
bit_num -= 1
if bit_num == -1:
self.codewords.append(byte)
bit_num = 7
byte = 0
# end if
# end for
# end def encode_text
def pad(self):
"""Pad out the encoded text to the correct word length"""
pads = [236, 17]
pad_idx = 0
for _ in range(len(self.codewords), self.max_data_codewords):
self.codewords.append(pads[pad_idx])
pad_idx = 1 - pad_idx
# end for
# end def pad
def append_error_codes(self):
"""Calculate the necessary number of error codes for the encoded
text and padding codewords, and append to the codeword buffer"""
i = 0
j = 0
rs_block_number = 0
rs_temp = [[]]
while i < self.max_data_codewords:
rs_temp[rs_block_number].append(self.codewords[i])
j += 1
if j >= self.minfo.rs_block_order[rs_block_number] - self.minfo.rs_ecc_codewords:
j = 0
rs_block_number += 1
rs_temp.append([])
# end if
i += 1
# end while
rs_block_number = 0
rs_block_order_num = len(self.minfo.rs_block_order)
while rs_block_number < rs_block_order_num:
rs_codewords = self.minfo.rs_block_order[rs_block_number]
rs_data_codewords = rs_codewords - self.minfo.rs_ecc_codewords
rstemp = rs_temp[rs_block_number]
j = rs_data_codewords
while j > 0:
first = rstemp[0]
if first != 0:
rstemp = rstemp[1:]
cal = self.minfo.rs_cal_table[first]
if len(rstemp) < len(cal):
rstemp, cal = cal, rstemp
# end if
cal += [0] * (len(rstemp) - len(cal))
rstemp = [x1 ^ x2 for x1, x2 in zip(rstemp, cal)]
else:
rstemp = rstemp[1:]
# end if
j -= 1
# end while
self.codewords += rstemp
rs_block_number += 1
# end while
# end def append_error_codes
def create_matrix(self):
"""Create QR Code matrix"""
matrix_content = self.minfo.create_matrix(self.version, self.codewords)
self.mtx_size = len(matrix_content)
LOG.debug("Matrix size is %d", self.mtx_size)
mask_number = self.minfo.calc_mask_number(matrix_content)
mask_content = 1 << mask_number
format_info_value = ((self.ecl << 3) | mask_number)
self.minfo.put_format_info(matrix_content, format_info_value)
self.matrix = self.minfo.finalize(matrix_content, mask_content)
# end def create_matrix
# end class TextEncoder
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,304
|
oyjs1989/printbarcode
|
refs/heads/master
|
/myqrcode/isodata.py
|
"""ISO/IEC 18004:2006 tables and functions implementation"""
import os.path
MAX_DATA_BITS = [
128, 224, 352, 512, 688, 864, 992, 1232, 1456, 1728,
2032, 2320, 2672, 2920, 3320, 3624, 4056, 4504, 5016, 5352,
5712, 6256, 6880, 7312, 8000, 8496, 9024, 9544, 10136, 10984,
11640, 12328, 13048, 13800, 14496, 15312, 15936, 16816, 17728, 18672,
152, 272, 440, 640, 864, 1088, 1248, 1552, 1856, 2192,
2592, 2960, 3424, 3688, 4184, 4712, 5176, 5768, 6360, 6888,
7456, 8048, 8752, 9392, 10208, 10960, 11744, 12248, 13048, 13880,
14744, 15640, 16568, 17528, 18448, 19472, 20528, 21616, 22496, 23648,
72, 128, 208, 288, 368, 480, 528, 688, 800, 976,
1120, 1264, 1440, 1576, 1784, 2024, 2264, 2504, 2728, 3080,
3248, 3536, 3712, 4112, 4304, 4768, 5024, 5288, 5608, 5960,
6344, 6760, 7208, 7688, 7888, 8432, 8768, 9136, 9776, 10208,
104, 176, 272, 384, 496, 608, 704, 880, 1056, 1232,
1440, 1648, 1952, 2088, 2360, 2600, 2936, 3176, 3560, 3880,
4096, 4544, 4912, 5312, 5744, 6032, 6464, 6968, 7288, 7880,
8264, 8920, 9368, 9848, 10288, 10832, 11408, 12016, 12656, 13328]
MAX_CODEWORDS = [
0, 26, 44, 70, 100, 134, 172, 196, 242,
292, 346, 404, 466, 532, 581, 655, 733, 815, 901, 991, 1085, 1156,
1258, 1364, 1474, 1588, 1706, 1828, 1921, 2051, 2185, 2323, 2465,
2611, 2761, 2876, 3034, 3196, 3362, 3532, 3706]
MATRIX_REMAIN_BIT = [0, 0, 7, 7, 7, 7, 7, 0,
0, 0, 0, 0, 0, 0, 3, 3,
3, 3, 3, 3, 3, 4, 4, 4,
4, 4, 4, 4, 3, 3, 3, 3,
3, 3, 3, 0, 0, 0, 0, 0, 0]
class MatrixInfo:
""" Provides QR Code version and Error Correction Level
dependent information necessary for creating matrix"""
def __init__(self, version, ecl):
# path = os.path.join(os.path.split(__file__)[0], 'qrcode_data')
path = os.path.join(os.getcwd(), 'qrcode_data')
self.byte_num = (MATRIX_REMAIN_BIT[version] + (MAX_CODEWORDS[version] << 3))
filename = path + "/qrv" + str(version) + "_"
filename += str(ecl) + ".dat"
unpack = list
with open(filename, "rb") as fhndl:
self.matrix_d = []
self.matrix_d.append(unpack(fhndl.read(self.byte_num)))
self.matrix_d.append(unpack(fhndl.read(self.byte_num)))
self.matrix_d.append(unpack(fhndl.read(self.byte_num)))
self.format_info = []
self.format_info.append(unpack(fhndl.read(15)))
self.format_info.append(unpack(fhndl.read(15)))
self.rs_ecc_codewords = ord(fhndl.read(1))
self.rs_block_order = unpack(fhndl.read(128))
filename = path + "/rsc" + str(self.rs_ecc_codewords) + ".dat"
with open(filename, "rb") as fhndl:
self.rs_cal_table = []
for _ in range(0, 256):
self.rs_cal_table.append(unpack(fhndl.read(self.rs_ecc_codewords)))
filename = path + "/qrvfr" + str(version) + ".dat"
with open(filename, "r") as fhndl:
frame_data_str = fhndl.read(65535)
self.frame_data = []
for line in frame_data_str.splitlines():
frame_line = []
for char in line:
if char == '1':
frame_line.append(1)
elif char == '0':
frame_line.append(0)
else:
raise ValueError("Corrupted frame data file, found char: {}".format(char))
self.frame_data.append(frame_line)
def create_matrix(self, version, codewords):
"""Create matrix based on version and fills it w/ codewords"""
mtx_size = 17 + (version << 2)
matrix = [[0 for i in range(mtx_size)] for j in range(mtx_size)]
max_codewords = MAX_CODEWORDS[version]
i = 0
while i < max_codewords:
codeword_i = codewords[i]
j = 7
while j >= 0:
codeword_bits_number = (i << 3) + j
pos_x = self.matrix_d[0][codeword_bits_number]
pos_y = self.matrix_d[1][codeword_bits_number]
mask = self.matrix_d[2][codeword_bits_number]
matrix[pos_x][pos_y] = ((255 * (codeword_i & 1)) ^ mask)
codeword_i >>= 1
j -= 1
# end while
i += 1
# end while
for matrix_remain in range(MATRIX_REMAIN_BIT[version], 0, -1):
remain_bit_temp = matrix_remain + (max_codewords << 3) - 1
pos_x = self.matrix_d[0][remain_bit_temp]
pos_y = self.matrix_d[1][remain_bit_temp]
mask = self.matrix_d[2][remain_bit_temp]
matrix[pos_x][pos_y] = (255 ^ mask)
# end for
return matrix
# end def create_matrix
def put_format_info(self, matrix, format_info_value):
"""Put format information into the matrix"""
format_info = ["101010000010010", "101000100100101",
"101111001111100", "101101101001011",
"100010111111001", "100000011001110",
"100111110010111", "100101010100000",
"111011111000100", "111001011110011",
"111110110101010", "111100010011101",
"110011000101111", "110001100011000",
"110110001000001", "110100101110110",
"001011010001001", "001001110111110",
"001110011100111", "001100111010000",
"000011101100010", "000001001010101",
"000110100001100", "000100000111011",
"011010101011111", "011000001101000",
"011111100110001", "011101000000110",
"010010010110100", "010000110000011",
"010111011011010", "010101111101101"]
format_info_x1 = [0, 1, 2, 3, 4, 5, 7, 8, 8, 8, 8, 8, 8, 8, 8]
format_info_y1 = [8, 8, 8, 8, 8, 8, 8, 8, 7, 5, 4, 3, 2, 1, 0]
for i in range(15):
content = int(format_info[format_info_value][i]) * 255
matrix[format_info_x1[i]][format_info_y1[i]] = content
matrix[self.format_info[0][i]][self.format_info[1][i]] = content
# end for
# end def put_format_info
def finalize(self, matrix_content, mask_content):
"""Create final matrix and put frame data into it"""
mtx_size = len(matrix_content)
matrix = [[0 for i in range(mtx_size)] for j in range(mtx_size)]
for i in range(mtx_size):
for j in range(mtx_size):
if (int(matrix_content[j][i]) & mask_content) != 0:
matrix[i][j] = 1
else:
matrix[i][j] = self.frame_data[i][j]
# end if
# end for
# end for
return matrix
# end def finalize
def calc_demerit_score(self, bit_r, dem_data):
"""Calculate demerit score"""
n1_search = (chr(255) * 5) + "+|" + (bit_r * 5) + "+"
n3_search = bit_r + chr(255) + bit_r * 3 + chr(255) + bit_r
import re
demerit = [0, 0, 0, 0]
demerit[2] = len(re.findall(n3_search, dem_data[0])) * 40
demerit[3] = dem_data[1].count(bit_r) * len(bit_r) * 100
demerit[3] /= self.byte_num
demerit[3] -= 50
demerit[3] = abs(int(demerit[3] / 5)) * 10
ptn_temp = re.findall(bit_r + bit_r + "+", dem_data[2])
demerit[1] += sum([len(x) - 1 for x in ptn_temp])
ptn_temp = re.findall(chr(255) + chr(255) + "+", dem_data[3])
demerit[1] += sum([len(x) - 1 for x in ptn_temp])
demerit[1] *= 3
ptn_temp = re.findall(n1_search, dem_data[0])
demerit[0] += sum([len(x) - 2 for x in ptn_temp])
return sum(demerit)
# end def calc_demerit_score
def calc_mask_number(self, matrix_content):
"""Calculate mask number for matrix"""
mtx_size = len(matrix_content)
mask_number = 0
min_demerit_score = 0
hor_master = ""
ver_master = ""
for i in range(0, mtx_size):
for k in range(0, mtx_size):
hor_master += chr(matrix_content[k][i])
ver_master += chr(matrix_content[i][k])
# end for
# end for
for i in range(0, 8):
bit_r = chr((~(1 << i)) & 255)
bit_mask = chr(1 << i) * mtx_size * mtx_size
dem_data = ["", "", "", ""]
dem_data[0] = strings_and(hor_master, bit_mask)
dem_data[1] = strings_and(ver_master, bit_mask)
dem_data[2] = strings_and(
((chr(170) * mtx_size) + dem_data[1]),
(dem_data[1] + (chr(170) * mtx_size)))
dem_data[3] = strings_or(
((chr(170) * mtx_size) + dem_data[1]),
(dem_data[1] + (chr(170) * mtx_size)))
dem_data = [string_not(x) for x in dem_data]
str_split = lambda x, a: [x[p:p + a] for p in range(0, len(x), a)]
dem_data = [chr(170).join(str_split(x, mtx_size)) for x in dem_data]
dem_data[0] += chr(170) + dem_data[1]
demerit_score = self.calc_demerit_score(bit_r, dem_data)
if demerit_score <= min_demerit_score or i == 0:
mask_number = i
min_demerit_score = demerit_score
# end if
# end for
return mask_number
# end def calc_mask_number
# end class MatrixInfo
def strings_and(str1, str2):
"""Apply logical 'and' to strings"""
if len(str1) < len(str2):
str1, str2 = str2, str1
# end if
str2 += '\0' * (len(str1) - len(str2))
return "".join([chr(ord(x1) & ord(x2)) for x1, x2 in zip(str1, str2)])
# end def strings_and
def strings_or(str1, str2):
"""Apply logical 'or' to strings"""
if len(str1) < len(str2):
str1, str2 = str2, str1
# end if
str2 += '\0' * (len(str1) - len(str2))
return "".join([chr(ord(x1) | ord(x2)) for x1, x2 in zip(str1, str2)])
# end def strings_or
def string_not(str1):
"""Apply logical 'not' to every symbol of string"""
return "".join([chr(256 + ~ord(x)) for x in str1])
# end def string_not
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,305
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/crc_demo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: crc_demo
# Author : lumi
# date: 2019/11/5
# Description :
'''
# - Custom package
# - Third party module of python
# - import odoo package
import binascii
import crc16
# print('0x6EE0') # AD8D69F1163A6ECDD546506D2E1F2F BB 6EE0
# input_data = b'0x83FED3407A939723A5C639B26916D505'# B5C3
input_data = b'0xAD8D69F1163A6ECDD546506D2E1F2FBB' # 6EE0
print(hex(crc16.crc16xmodem(b'AD8D69F1163A6ECDD546506D2E1F2F',int(b'BB',16))))
# import crcmod
# crc16_gen_code = crcmod.Crc(0x13D65, 0xFFFF, True, 0xFFFF)
# print(crc16_gen_code)
# select CRC-16-DNP
# crc16 = crcmod.mkCrcFun(0x8408, 0xFFFF, True, 0xFFFF)
# test
# print(hex(crc16(input_data)))
s = int(input_data, 16)
# a = char2hex(input_data)
# print(a)
# ret = crc16.crc16xmodem(a)
# print(hex(int(ret)))
# crc = crc16.crc16xmodem(b'1234')
# crc = crc16.crc16xmodem(b'56789', crc)
# print(crc)
def checkCRC(message):
#CRC-16-CITT poly, the CRC sheme used by ymodem protocol
poly = 0x1021
#16bit operation register, initialized to zeros
reg = 0x0000
#pad the end of the message with the size of the poly
message += '\x00\x00'
#for each bit in the message
for byte in message:
mask = 0x80
while(mask > 0):
#left shift by one
reg<<=1
#input the next bit from the message into the right hand side of the op reg
if ord(byte) & mask:
reg += 1
mask>>=1
#if a one popped out the left of the reg, xor reg w/poly
if reg > 0xffff:
#eliminate any one that popped out the left
reg &= 0xffff
#xor with the poly, this is the remainder
reg ^= poly
return reg
# print(checkCRC(input_data))
def char2hex(data):
# binascii.b2a_hex(data)
output = binascii.hexlify(data)
return output
def crc16(data: bytes, poly=0x1201):
'''
CRC-16-CCITT Algorithm
'''
data = bytearray(data)
crc = 0xFFFF
for b in data:
cur_byte = 0xFF & b
for _ in range(0, 8):
if (crc & 0x0001) ^ (cur_byte & 0x0001):
crc = (crc >> 1) ^ poly
else:
crc >>= 1
cur_byte >>= 1
crc = (~crc & 0xFFFF)
crc = (crc << 8) | ((crc >> 8) & 0xFF)
return crc & 0xFFFF
print(hex(crc16(input_data)))
print(hex(crc16(input_data)))
# def crc16(data: bytearray, offset, length=16):
# if data is None or offset < 0 or offset > len(data) - 1 and offset + length > len(data):
# return 0
# crc = 0xFFFF
# for i in range(0, length):
# crc ^= data[offset + i] << 8
# for j in range(0, 8):
# if (crc & 0x8000) > 0:
# crc = (crc << 1) ^ 0x1021
# else:
# crc = crc << 1
# return crc & 0xFFFF
# ret = crc16(input_data)
# print(hex(int(ret)))
# print(hex(int('b5c3',16)))
def crc16_ccitt_r(data, crc):
ccitt16 = 0x8408
crc = crc ^ int(data)
for _ in range(8):
if (crc & 1):
crc >>= 1
crc ^= ccitt16
else:
crc >>= 1
return crc
# crc16_ccitt_r(b'a',0xFFFF)
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,306
|
oyjs1989/printbarcode
|
refs/heads/master
|
/dll/printimage.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: prroundimage
# Author : lumi
# date: 2019/9/20
# Description :
'''
# - Custom package
from decimal import *
# - Third party module of python
# - import odoo package
from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageWin
import win32print
import win32ui
import win32con
import win32gui
from mycode128 import Code128Generate
from myean13 import EAN13Generate
import os
def get_font_path(file_path):
if os.path.isfile(file_path):
return file_path
try:
ImageFont.truetype(file_path,10)
return file_path
except Exception as e:
raise Exception('%s not found'%file_path)
# HORZRES / VERTRES = printable area
#
HORZRES = 8
VERTRES = 10
#
# LOGPIXELS = dots per inch
#
LOGPIXELSX = 88
LOGPIXELSY = 90
#
# PHYSICALWIDTH/HEIGHT = total area
#
PHYSICALWIDTH = 110
PHYSICALHEIGHT = 111
#
# PHYSICALOFFSETX/Y = left / top margin
#
PHYSICALOFFSETX = 0
PHYSICALOFFSETY = 0
HORZSIZE = 25.4 * HORZRES/LOGPIXELSX
# print size
TIMES = 2.5
PRINT_WIDTH = round(338*TIMES)
PRINT_HEIGHT = round(400*TIMES)
printer_name = win32print.GetDefaultPrinter()
p = win32print.OpenPrinter(printer_name)
def print_image(image):
hDC = win32ui.CreateDC()
hDC.CreatePrinterDC(printer_name)
printable_area = hDC.GetDeviceCaps(HORZRES), hDC.GetDeviceCaps(VERTRES)
printer_size = hDC.GetDeviceCaps(PHYSICALWIDTH), hDC.GetDeviceCaps(PHYSICALHEIGHT)
printer_margins = hDC.GetDeviceCaps(PHYSICALOFFSETX), hDC.GetDeviceCaps(PHYSICALOFFSETY)
# print(printable_area)
# print(printer_size)
# print(printer_margins)
if image.size[0] > image.size[1]:
image = image.rotate(90)
ratios = [1.0 * printable_area[0] / image.size[0], 1.0 * printable_area[1] / image.size[1]]
scale = min(ratios)
# Start the print job, and draw the bitmap to
# the printer device at the scaled size.
hDC.StartDoc(file_name)
hDC.StartPage()
scaled_width, scaled_height = PRINT_WIDTH, PRINT_HEIGHT
image = image.resize((scaled_width, scaled_height))
dib = ImageWin.Dib(image)
print(printer_margins)
print(printer_size)
print(printable_area)
# scaled_width, scaled_height = [int(scale * i) for i in image.size]
x1 = int((printer_size[0] - scaled_width) / 2)
y1 = int((printer_size[1] - scaled_height) / 2)
y1 = 0
x1=0
x2 = x1 + scaled_width
y2 = y1 + scaled_height
# x1, x2, y1, y2 = 700, 1500, 330, 1800
# dib.draw(hDC.GetHandleOutput(), (x1, y1, x2, y2))
#
# hDC.EndPage()
# hDC.EndDoc()
# hDC.DeleteDC()
# 1in = 2.54cm = 25.4 mm = 72pt = 6pc
MULTIPLE = Decimal('50')
width = MULTIPLE * Decimal('33.8')
height = MULTIPLE * Decimal('40.0')
PT_TO_MM_DECIMAL = Decimal('25.4') / Decimal('72')
FONT_STYLE_HEAD = get_font_path('方正兰亭黑_GBK.TTF')
FONT_SZIE_HEAD = Decimal('4') * PT_TO_MM_DECIMAL
# FONT_STYLE_MID = 'ArialRegular.ttf'
FONT_STYLE_MID = get_font_path('Arial.ttf')
FONT_SZIE_MID = Decimal('4.5') * PT_TO_MM_DECIMAL
FONT_STYLE_CODE = get_font_path('Arial Unicode MS.TTF')
FONT_SZIE_CODE = Decimal('10') * PT_TO_MM_DECIMAL
FONT_STYLE_BUTTOM = get_font_path('方正兰亭黑.TTF')
FONT_SZIE_BUTTOM_LIFT = Decimal('4') * PT_TO_MM_DECIMAL
FONT_SZIE_BUTTOM_RIGHT = Decimal('3.18') * PT_TO_MM_DECIMAL
file_name = 'code.jpg'
image = Image.new('L', (round(width), round(height)), 255)
draw = ImageDraw.Draw(image)
def write_word(words, font, top=0, margin_left=0, margin_right=0):
y = top * MULTIPLE
text_width, text_height = font.getsize(words)
if margin_left:
x = margin_left * MULTIPLE
elif margin_right:
x = width - margin_right * MULTIPLE - text_width
else:
x = 0
draw.text((round(x), y), words, font=font, fill=0)
return Decimal(text_height) / MULTIPLE
def name_draw(first_name, second_name):
first_name_y = Decimal('3')
font_sytle = FONT_STYLE_HEAD
font_szie = FONT_SZIE_HEAD
font = ImageFont.truetype(font_sytle, round(font_szie * MULTIPLE))
text_width, text_height = font.getsize(first_name)
write_word(first_name, font, top=first_name_y, margin_left=3)
second_name_y = first_name_y + Decimal(text_height)/MULTIPLE
write_word(second_name, font, top=second_name_y, margin_left=3)
def color_draw(color):
font_sytle = FONT_STYLE_HEAD
font_szie = FONT_SZIE_HEAD
color_y = Decimal('3')
font = ImageFont.truetype(font_sytle, round(font_szie * MULTIPLE))
write_word(color, font, top=color_y, margin_right=3)
def sn_draw(sn):
cd = Code128Generate(sn, image, options={'MULTIPLE': MULTIPLE})
barcode_width = Decimal('27.8') * MULTIPLE
barcode_height = Decimal('5') * MULTIPLE
x = Decimal('3') * MULTIPLE
y = Decimal('6.5') * MULTIPLE
box = (x, y, x + barcode_width, y + barcode_height)
im = cd.get_pilimage(barcode_width, barcode_height)
im.save('sss.jpg')
image.paste(im, box)
def sn_sku_draw(sn, sku):
font_sytle = FONT_STYLE_MID
font_szie = FONT_SZIE_MID
y = Decimal('11.7')
font = ImageFont.truetype(font_sytle, round(font_szie * MULTIPLE))
write_word(sn, font, top=y, margin_left=3)
write_word(sku, font, top=y, margin_right=3)
def barcode_draw(barcode):
font_sytle = FONT_STYLE_CODE
font_szie = FONT_SZIE_CODE
font = ImageFont.truetype(font_sytle, round(font_szie * MULTIPLE))
cd = EAN13Generate(barcode, image, font, options={'MULTIPLE': MULTIPLE})
barcode_width = Decimal('27.8') * MULTIPLE
barcode_height = Decimal('21') * MULTIPLE
x = Decimal('3') * MULTIPLE
y = Decimal('14.1') * MULTIPLE
box = (x, y, x + barcode_width, y + barcode_height)
im = cd.get_pilimage(barcode_width, barcode_height)
image.paste(im, box)
def address_date_draw(address, date):
font_sytle = FONT_STYLE_BUTTOM
font_szie = FONT_SZIE_BUTTOM_LIFT
font = ImageFont.truetype(font_sytle, round(font_szie * MULTIPLE))
write_word(address, font, top=Decimal('33.6'), margin_left=3)
write_word(date, font, top=Decimal('35.1'), margin_left=3)
def certificate_draw():
draw.rectangle((round(Decimal('25') * MULTIPLE), round(Decimal('33.89') * MULTIPLE),
round(Decimal('30.8') * MULTIPLE), (round(Decimal('37') * MULTIPLE))), outline="black",
width=round(Decimal('0.07') * MULTIPLE))
font_sytle = FONT_STYLE_BUTTOM
font_szie = FONT_SZIE_BUTTOM_RIGHT
font = ImageFont.truetype(font_sytle, round(font_szie * MULTIPLE))
write_word('合格证', font, top=Decimal('34.2'), margin_right=Decimal('4.25'))
write_word('已检验', font, top=Decimal('35.5'), margin_right=Decimal('4.25'))
name_draw(u'小米米家智能门锁', u'颜色:碳素黑')
color_draw('颜色:碳素黑')
sn_draw('25311/99999999')
sn_sku_draw('25311/99999999', 'SKU:SZB4022CN')
barcode_draw('6934177714108')
address_date_draw('生产地址:广东省佛山市南海区', '生产日期:2019.09')
certificate_draw()
image.save(file_name, 'jpeg')
print_image(image)
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,307
|
oyjs1989/printbarcode
|
refs/heads/master
|
/app/printer/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# File Name: __init__.py
# Author : lumi
# date: 2019/10/24
# Description :
'''
# - Custom package
from io import BytesIO
from decimal import *
import win32print
import win32ui
from PIL import Image, ImageWin
# Inches to centimeters
INC_TO_CM = Decimal('2.54')
# HORZRES / VERTRES = printable area
HORZRES = 8
VERTRES = 10
#
# LOGPIXELS = dots per inch
#
LOGPIXELSX = 88
LOGPIXELSY = 90
#
# PHYSICALWIDTH/HEIGHT = total area
#
PHYSICALWIDTH = 110
PHYSICALHEIGHT = 111
#
# PHYSICALOFFSETX/Y = left / top margin
#
PHYSICALOFFSETX = 112
PHYSICALOFFSETY = 113
printer_name = win32print.GetDefaultPrinter()
hDC = win32ui.CreateDC()
hDC.CreatePrinterDC(printer_name)
printable_area = hDC.GetDeviceCaps(HORZRES), hDC.GetDeviceCaps(VERTRES) # 可打印的物理长宽
printer_size = hDC.GetDeviceCaps(PHYSICALWIDTH), hDC.GetDeviceCaps(PHYSICALHEIGHT) # 物理总长宽 = 可打印的物理长宽+物理偏移
printer_margins = hDC.GetDeviceCaps(PHYSICALOFFSETX), hDC.GetDeviceCaps(PHYSICALOFFSETY) # 物理偏移
print(printable_area, printer_size, printer_margins)
print(hDC.GetDeviceCaps(LOGPIXELSX), hDC.GetDeviceCaps(LOGPIXELSY))
print(Decimal(hDC.GetDeviceCaps(HORZRES)) / Decimal(hDC.GetDeviceCaps(LOGPIXELSX)) * INC_TO_CM,
Decimal(hDC.GetDeviceCaps(VERTRES)) / Decimal(hDC.GetDeviceCaps(LOGPIXELSY)) * INC_TO_CM)
class Printer(object):
'''打印'''
def __init__(self, file_name, image):
if image.size[0] > image.size[1]:
image = image.rotate(90)
ratios = [1.0 * printable_area[0] / image.size[0], 1.0 * printable_area[1] / image.size[1]]
scale = min(ratios)
#
# Start the print job, and draw the bitmap to
# the printer device at the scaled size.
#
hDC.StartDoc(file_name)
hDC.StartPage()
dib = ImageWin.Dib(image)
scaled_width, scaled_height = [int(scale * i) for i in image.size]
x1 = int((printer_size[0] - scaled_width) / 2)
y1 = int((printer_size[1] - scaled_height) / 2)
x2 = x1 + scaled_width
y2 = y1 + scaled_height
dib.draw(hDC.GetHandleOutput(), (x1, y1, x2, y2))
hDC.EndPage()
hDC.EndDoc()
hDC.DeleteDC()
|
{"/myean13/ean13generate.py": ["/myean13/renderer.py"], "/myqrcode/qrcodegenerate.py": ["/myqrcode/textencoder.py"], "/app_v1/application.py": ["/ui/body.py", "/ui/sn.py", "/app/printwidgets/print_model.py"]}
|
41,346
|
jacbeekers/devops_fitnesse_tools
|
refs/heads/master
|
/test/test_walk.py
|
import unittest
from test.walk import Walk
class TestWalk(unittest.TestCase):
def test_walk_current_directory(self):
self.item = '.'
walk = Walk()
self.directory_counter, self.file_counter = walk.walk_through(self.item)
self.assertGreater(self.file_counter, 0, 'walk through for >' + self.item + '< did not return any values')
def test_walk_specific_file(self):
self.filename = 'walk.py'
walk = Walk()
self.directory_counter, self.file_counter = walk.walk_through(self.filename)
self.assertEqual(self.file_counter, 0,
'walk through for >' + self.filename + '< should not have returned a result.')
def walk_suite(self):
suite = unittest.TestSuite()
suite.addTest(TestWalk("test_walk_current_directory"))
suite.addTest(TestWalk("test_walk_specific_file"))
return suite
|
{"/test/test_walk.py": ["/test/walk.py"], "/test/run_all_tests.py": ["/test/test_createFitNesseArtifact.py", "/test/test_walk.py"], "/create_fitnesse_artifact/createFitNesseArtifact.py": ["/create_fitnesse_artifact/helpers/artifact.py"], "/test/test_createFitNesseArtifact.py": ["/create_fitnesse_artifact/createFitNesseArtifact.py"]}
|
41,347
|
jacbeekers/devops_fitnesse_tools
|
refs/heads/master
|
/test/run_all_tests.py
|
import unittest
from test.test_createFitNesseArtifact import TestFitNesseArtifact
from test.test_walk import TestWalk
class TestAll(unittest.TestCase):
def test_all(self):
suite1 = TestWalk.walk_suite(self)
suite2 = TestFitNesseArtifact.fitnesse_suite(self)
product1_suite = TestFitNesseArtifact.fitnesse_product1_suite(self)
all_products_suite = TestFitNesseArtifact.fitnesse_all_products_suite(self)
alltests = unittest.TestSuite((suite1, suite2, product1_suite, all_products_suite))
print('#Test cases: ' + str(alltests.countTestCases()))
if __name__ == '__main__':
unittest.main()
|
{"/test/test_walk.py": ["/test/walk.py"], "/test/run_all_tests.py": ["/test/test_createFitNesseArtifact.py", "/test/test_walk.py"], "/create_fitnesse_artifact/createFitNesseArtifact.py": ["/create_fitnesse_artifact/helpers/artifact.py"], "/test/test_createFitNesseArtifact.py": ["/create_fitnesse_artifact/createFitNesseArtifact.py"]}
|
41,348
|
jacbeekers/devops_fitnesse_tools
|
refs/heads/master
|
/create_fitnesse_artifact/helpers/artifact.py
|
# MIT License
#
# Copyright (c) 2019 Jac. Beekers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
# Process deploy list for FitNesse artifacts
# @Since: 23-OCT-2019
# @Author: Jac. Beekers
# @Version: 20191023.0 - JBE - Initial
# @Version: 20200529.0 - JBE - Changes due to changes in logging
import logging
import os
from pathlib import Path
import supporting.deploylist
import supporting.errorcodes as err
from supporting import filehandling
from supporting import generalSettings
from supporting.filehandling import copy_file
from supporting.generatezip import addto_zip
from supporting.generatezip import generate_zip
from supporting.logging import customLogger
from create_fitnesse_artifact.helpers import fitnesseConstants
class BuildFitNesseArtifact:
def __init__(self, fitnesse_settings, deploylist=fitnesseConstants.DEFAULT_FITNESSE_DEPLOYLIST):
self.main_proc = 'BuildFitNesseArtifact'
# self.logger = logging.getLogger(self.main_proc)
self.logger = logging.getLogger(__name__)
self.custom_logger = customLogger.CustomLogger(self.main_proc, True)
self.entrynr = 0
self.level = 0
self.previous_schema = 'AUQW&^D*AD&FS'
self.deploylist = deploylist
self.fitnesse_settings = fitnesse_settings
def processEntry(self, deployEntry):
logger = self.logger
thisproc = "processEntry"
result = err.OK
self.custom_logger.log(logger, logging.DEBUG, thisproc, "Current directory is >" + os.getcwd() + "<.")
self.custom_logger.log(logger, logging.DEBUG, thisproc,
"Started to work on deploy entry >" + deployEntry + "<.")
directory, suppress_zip = deployEntry.split(':', 2)
self.custom_logger.log(logger, logging.DEBUG, thisproc,
'Directory is >' + directory + '< and suppress_zip is >' + suppress_zip + '<')
zipfilename = self.fitnesse_settings.target_fitnesse_directory + "/" + directory.replace('/', '_') + ".zip"
self.custom_logger.log(logger, logging.DEBUG, thisproc, 'zipfilename is >' + zipfilename + "<.")
directoryPath = Path(directory)
if directoryPath.is_dir():
self.custom_logger.log(logger, logging.DEBUG, thisproc, 'Found directory >' + directory + "<.")
sourcefitnessedir = ""
else:
sourcefitnessedir = self.fitnesse_settings.source_fitnesse_directory + "/"
self.custom_logger.log(logger, logging.DEBUG, thisproc, 'directory >' + directory + '< not found. Trying >'
+ sourcefitnessedir + directory + '<...')
prefixed_directory = sourcefitnessedir + directory
directoryPath = Path(prefixed_directory)
if directoryPath.is_dir():
self.custom_logger.log(logger, logging.DEBUG, thisproc, 'Found directory >' + prefixed_directory + "<.")
else:
self.custom_logger.log(logger, err.SQLFILE_NF.level, thisproc,
"directory checked >" + prefixed_directory + "<. " + err.DIRECTORY_NF.message)
result = err.DIRECTORY_NF
return result
if suppress_zip == 'Y':
self.custom_logger.log(logger, logging.DEBUG, thisproc, "zip files will be ignored.")
result = generate_zip(sourcefitnessedir, directory, zipfilename, '*', 'zip')
self.custom_logger.log(logger, logging.DEBUG, thisproc, "generate_zip returned: " + result.code)
self.custom_logger.log(logger, logging.DEBUG, thisproc, "Adding wiki file >" + directory + ".wiki< to zip.")
result = addto_zip(sourcefitnessedir, directory + '.wiki', zipfilename, '*', 'zip')
else:
self.custom_logger.log(logger, logging.DEBUG, thisproc, "zip files will be included.")
result = generate_zip(sourcefitnessedir, directory, zipfilename)
self.custom_logger.log(logger, logging.DEBUG, thisproc, "generate_zip returned: " + result.code)
self.custom_logger.log(logger, logging.DEBUG, thisproc, "Adding wiki file >" + directory + ".wiki< to zip.")
result = addto_zip(sourcefitnessedir, directory + '.wiki', zipfilename)
self.custom_logger.log(logger, logging.DEBUG, thisproc,
"Completed with rc >" + str(result.rc) + "< and code >" + result.code + "<.")
return result
def processList(self):
latestError = err.OK
result, deployItems = supporting.deploylist.getWorkitemList(self.deploylist)
if result.rc == err.OK.rc:
filehandling.create_directory(self.fitnesse_settings.target_fitnesse_directory)
copy_file(self.deploylist, generalSettings.artifactDir)
for deployEntry in supporting.deploylist.deployItems:
result = self.processEntry(deployEntry)
if result.rc != 0:
latestError = result
else:
# if no deploy list, then that is just fine.
if result.rc == err.IGNORE.rc:
latestError = err.OK
else:
latestError = result
return latestError
|
{"/test/test_walk.py": ["/test/walk.py"], "/test/run_all_tests.py": ["/test/test_createFitNesseArtifact.py", "/test/test_walk.py"], "/create_fitnesse_artifact/createFitNesseArtifact.py": ["/create_fitnesse_artifact/helpers/artifact.py"], "/test/test_createFitNesseArtifact.py": ["/create_fitnesse_artifact/createFitNesseArtifact.py"]}
|
41,349
|
jacbeekers/devops_fitnesse_tools
|
refs/heads/master
|
/create_fitnesse_artifact/createFitNesseArtifact.py
|
# MIT License
#
# Copyright (c) 2020 Jac. Beekers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
.. versionchanged:: 20200530.0
* moved FitNesse to its own package
* changes due to logging module changes
* documentation
"""
__since__ = '23-OCT-2019'
__version__ = '20200530.0'
__author__ = 'Jac. Beekers'
__licence__ = 'MIT'
__url__ = 'https://github.com/consag/devops_fitnesse_tools'
import argparse
import datetime
import logging
import sys
import supporting.errorcodes as err
import supporting.generalSettings as generalsettings
from supporting.logging import customLogger
import create_fitnesse_artifact.helpers.artifact
from create_fitnesse_artifact.helpers import fitnesseArtifactChecks
from create_fitnesse_artifact.helpers import fitnesseSettings
# result = err.OK
class CreateFitNesseArtifact:
def __init__(self, argv, log_on_console=True):
self.now = datetime.datetime.now()
self.arguments = argv
self.main_proc = 'CreateFitNesseArtifact'
self.log_on_console = log_on_console
self.logger = logging.getLogger(__name__)
self.custom_logger = customLogger.CustomLogger(self.main_proc, log_on_console)
self.result_logger = customLogger.CustomLogger.configurelogger(self.custom_logger)
self.fitnesse_settings = fitnesseSettings.FitNesseSettings()
self.fitnesse_checks = fitnesseArtifactChecks.FitNesseArtifactChecks()
self.result = err.OK
def parse_the_arguments(self, arguments):
"""Parses the provided arguments and exits on an error.
Use the option -h on the command line to get an overview of the required and optional arguments.
Args:
arguments: List containing command line arguments
Returns:
A list with validated command line arguments
"""
parser = argparse.ArgumentParser(prog=self.main_proc)
args = parser.parse_args(arguments)
return args
def runit(self, arguments):
"""Creates a FitNesse artifact, consisting on collected test directories and files
It uses a deploy list that contains subdirectories.
Module uses environment variables that steer the artifact creation.
Args:
arguments: The command line arguments (none actually at the moment)
"""
thisproc = "runit"
args = self.parse_the_arguments(arguments)
logger = self.logger
self.custom_logger.log(logger, logging.DEBUG, thisproc, 'Started')
self.custom_logger.log(logger, logging.DEBUG, thisproc, 'logDir is >' + generalsettings.logDir + "<.")
# Check requirements for artifact generation
generalsettings.getenvvars()
self.fitnesse_settings.getfitnesseenvvars()
self.fitnesse_settings.outfitnesseenvvars()
self.result = self.fitnesse_checks.fitnesse_artifact_checks(self.fitnesse_settings)
if self.result.rc == err.IGNORE.rc:
# deploylist is not mandatory since 2020-02-09
self.custom_logger.log(logging, self.result.level, thisproc, 'Artifact ignored.')
self.result = err.OK
else:
if self.result.rc != err.OK.rc:
self.custom_logger.log(logger, logging.ERROR, thisproc,
'FitNesse Artifact Checks failed with >' + self.result.message + "<.")
self.custom_logger.writeresult(self.result_logger, self.result)
else:
builder = create_fitnesse_artifact.helpers.artifact.BuildFitNesseArtifact(self.fitnesse_settings
,
self.fitnesse_settings.fitnessedeploylist)
self.result = builder.processList()
self.custom_logger.writeresult(self.result_logger, self.result)
self.custom_logger.log(logger, logging.DEBUG, thisproc, 'Completed with return code >' + str(self.result.rc)
+ '< and result code >' + self.result.code + "<.")
# supporting.writeresult(resultlogger, result)
# supporting.exitscript(resultlogger, result)
return self.result
if __name__ == '__main__':
fitnesse = CreateFitNesseArtifact(sys.argv[1:], log_on_console=False)
result = fitnesse.runit(fitnesse.arguments)
fitnesse.custom_logger.exitscript(fitnesse.result_logger, result)
|
{"/test/test_walk.py": ["/test/walk.py"], "/test/run_all_tests.py": ["/test/test_createFitNesseArtifact.py", "/test/test_walk.py"], "/create_fitnesse_artifact/createFitNesseArtifact.py": ["/create_fitnesse_artifact/helpers/artifact.py"], "/test/test_createFitNesseArtifact.py": ["/create_fitnesse_artifact/createFitNesseArtifact.py"]}
|
41,350
|
jacbeekers/devops_fitnesse_tools
|
refs/heads/master
|
/test/test_createFitNesseArtifact.py
|
import sys
import unittest
from pathlib import Path
from unittest.mock import patch
from supporting import errorcodes
from create_fitnesse_artifact.createFitNesseArtifact import CreateFitNesseArtifact
from create_fitnesse_artifact.helpers.fitnesseConstants import varFitNesseDeployList
from create_fitnesse_artifact.helpers.fitnesseConstants import varSourceFitNesseDir
from create_fitnesse_artifact.helpers.fitnesseConstants import varTargetFitNesseDir
from os.path import abspath
class TestFitNesseArtifact(unittest.TestCase):
def setUp(self):
self.fitnesse = CreateFitNesseArtifact(sys.argv[1:], log_on_console=True)
def test_fitnesse_artifact_all_defaults(self):
self.result = self.fitnesse.runit(self.fitnesse.arguments)
assert self.result.rc == 0
def run_test(self, deploylist='fitnesse_deploylist.txt', source_fitnesse='.', target_fitnesse='.'):
self.deploylist = deploylist
self.env = patch.dict('os.environ', {varFitNesseDeployList: self.deploylist
, varSourceFitNesseDir: source_fitnesse
, varTargetFitNesseDir: target_fitnesse})
with self.env:
self.assertTrue(Path(self.deploylist).exists(),
'File >' + self.deploylist + '< does not exist. Cannot test.')
self.assertTrue(Path(source_fitnesse).exists(),
'Source FitNesse path >' + source_fitnesse + '< with full path >'+ abspath(Path(source_fitnesse)) + '< does not exist.')
self.assertTrue(Path(target_fitnesse).exists(),
'Target FitNesse path >' + target_fitnesse + '< does not exist.')
self.result = self.fitnesse.runit(self.fitnesse.arguments)
return self.result
def test_fitnesse_artifact_valid_deploylist_nonexisting_directory(self):
self.deploylist = 'resources/fitnesse_deploylist_parsetests_nonexisting_directory.txt'
self.result = self.run_test(self.deploylist)
self.assertTrue(self.result.rc == errorcodes.DIRECTORY_NF.rc,
"The directory should not exist in this test case, which is error code="
+ str(
errorcodes.DIRECTORY_NF.rc) + ". You may also want to check the error codes in the supporting package")
def test_fitnesse_artifact_valid_deploylist_existing_directories(self):
self.deploylist = 'resources/fitnesse_deploylist_parsetests_existing_directory.txt'
self.result = self.run_test(self.deploylist)
assert self.result.rc == 0
def fitnesse_suite(self):
suite = unittest.TestSuite()
suite.addTest(TestFitNesseArtifact("test_fitnesse_artifact_all_defaults"))
suite.addTest(TestFitNesseArtifact("test_fitnesse_artifact_valid_deploylist_nonexisting_directory"))
suite.addTest(TestFitNesseArtifact("test_fitnesse_artifact_valid_deploylist_existing_directories"))
return suite
"""
Actual pages from a FitNesse installation
"""
def test_fitnesse_product1(self):
"""
Suite that is part of a folder and contains a test page
Returns: success or not
"""
self.deploylist = 'resources/fitnesse_product1_deploylist.txt'
self.result = self.run_test(self.deploylist, source_fitnesse='../fitnesse/FitNesseRoot')
assert self.result.rc == 0
def test_basic_fixtures(self):
"""
Suite that is part of a folder and contains a Suite and a Folder that contains a Suite
Returns: success or not
"""
self.deploylist = 'resources/fitnesse_allproducts_deploylist.txt'
self.result = self.run_test(self.deploylist)
assert self.result.rc == 0
def fitnesse_product1_suite(self):
suite = unittest.TestSuite()
suite.addTest(TestFitNesseArtifact("test_fitnesse_product1"))
return suite
def fitnesse_all_products_suite(self):
suite = unittest.TestSuite()
suite.addTest(TestFitNesseArtifact("test_fitnesse_product1"))
suite.addTest(TestFitNesseArtifact("test_basic_fixtures"))
return suite
|
{"/test/test_walk.py": ["/test/walk.py"], "/test/run_all_tests.py": ["/test/test_createFitNesseArtifact.py", "/test/test_walk.py"], "/create_fitnesse_artifact/createFitNesseArtifact.py": ["/create_fitnesse_artifact/helpers/artifact.py"], "/test/test_createFitNesseArtifact.py": ["/create_fitnesse_artifact/createFitNesseArtifact.py"]}
|
41,351
|
jacbeekers/devops_fitnesse_tools
|
refs/heads/master
|
/test/walk.py
|
import os
class Walk:
def walk_through(self, item):
self.file_counter = 0
self.directory_counter = 0
for root, dirs, files in os.walk(item):
for name in files:
self.file_counter = self.file_counter + 1
print("File#" + str(self.file_counter) + ": " + os.path.join(root, name))
for name in dirs:
self.directory_counter = self.directory_counter + 1
print("Directory#" + str(self.directory_counter) + ": " + os.path.join(root, name))
return self.directory_counter, self.file_counter
|
{"/test/test_walk.py": ["/test/walk.py"], "/test/run_all_tests.py": ["/test/test_createFitNesseArtifact.py", "/test/test_walk.py"], "/create_fitnesse_artifact/createFitNesseArtifact.py": ["/create_fitnesse_artifact/helpers/artifact.py"], "/test/test_createFitNesseArtifact.py": ["/create_fitnesse_artifact/createFitNesseArtifact.py"]}
|
41,352
|
ankit98040/Pydjango-Myweb
|
refs/heads/master
|
/PYDJANGO/myweb/Music/views.py
|
from django.shortcuts import render
#from django import http
from django.http import HttpResponse as hr
from django.template.loader import render_to_string
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from .models import *
from django.contrib.auth import login, authenticate, logout
from .forms import *
#def Home(request):
#data = render_to_string("index.html")
#return http.HttpResponse("<h1>hello guys</h1>")
#Dict = {"Name": ["subhashri","sanjukta", "prapty"], "Age" :[27], "url": ["http://127.0.0.1:8000/jetix"]}
#return hr(data, Dict)
#return render(request, "index.html", Dict)
#return render(request, "index.html")
def Home(request):
name = ['ankit', 'Anubhav', 'Deep']
url = ['https://www.facebook.com', 'https://google.com', 'https://linkedin.com']
data = zip(name, url)
Dict = {"names":data}
return render(request, "index.html", Dict)
def Home2(request):
nam = ["ankit", "amit","rohit", "sandeep", "animesh"]
dict2 = {"data": nam}
return render(request, "index2.html", dict2)
#def page1(request):
#d1= render_to_string("P1.html")
#return hr(d1)
#return render(request, "p1.html")
def page1(request):
return render(request, "P1.html")
def page2(request):
d1= render_to_string("p2.html")
return hr(d1)
def page3(request):
d1= render_to_string("p3.html")
return hr(d1)
def page4(request):
d2= render_to_string("p4.html")
return hr(d2)
def songnumber(request, snum):
return hr("Searching for song no {}" .format(snum))
def studentdetails(request, name):
return hr("yu want details about: {}".format(name))
#from .models import Albums as al
from .models import Album as al
def Album(request):
data = al.objects.all()
Dict={
"albums":data
}
return render(request, "music/album2.html", Dict)
#from .models import *
#def myAlbum(request):
# data = Album.objects.all()
# Dict={
# "albums":data
#}
#return render(request, "music/album.html", Dict)
def AlbumDetails(requests, a_id):
data = al.objects.get(id= a_id)
songs = Songs.objects.filter(album_id=data)
Dict = {
"album":data, "Album_songs":songs
}
return render(requests, "music/albumdetails.html", Dict)
from .models import Songs as Songs
def SongList(request):
all_songs = Songs.objects.all()
Dict = {
"songs":all_songs
}
return render(request, "music/SongsList.html", Dict)
from .models import College as Clg
def Coll(request):
all_colleges = Clg.objects.all()
Dict = {
"college": all_colleges
}
return render(request, "music/CollegeList.html", Dict)
def Add_Album_old(request):
if not request.user.is_authenticated:
return redirect("login", 'addAlbum')
else:
if request.method == "POST":
Dict = request.POST
name = Dict["album_name"]
artist = Dict["artist_name"]
banner = request.FILES["album_banner"]
Obj = al()
Obj.name = name
Obj.artist = artist
Obj.image = banner
Obj.save()
return redirect("album")
return render(request, "music/Add_Album.html")
def Add_Album(request):
if not request.user.is_authenticated:
return redirect("login", 'album')
form = Add_Album_Form()
if request.method == "POST":
form = Add_Album_Form(request.POST)
if form.is_valid():
data = form.save(commit = False)
data.artist = "Mr "+ data.artist
data.save()
return redirect('album')
Dict = {"form":form}
return render(request, "music/Add_Album.html", Dict)
def Delete_Album(request, a_id):
if not request.user.is_authenticated:
return redirect('login', 'album')
data = al.objects.get(id = a_id)
data.delete()
return redirect('album')
def Edit_Album(request, a_id):
album = al.objects.get(id = a_id)
form = Add_Album_Form(request.POST or None, request.FILES or None ,instance=album)
if form.is_valid():
form.save()
return redirect('album')
Dict = {
"form":form
}
return render(request, 'music/Add_Album.html', Dict)
import time
def SubscribeUs(request):
time.sleep(5)
#if request.method == "POST":
def Add_NewSong(request):
if request.method == "POST":
data = request.POST
song_title = data["song_title_name"]
album_id = data["album"]
song_file = request.FILES["song_file"]
album = al.objects.get(id = album_id)
Songs.objects.create(title = song_title, album_id = album, file = song_file)
#new_SONG = Songs()
#new_SONG.title = song_title
#new_SONG.album_id = album
#new_SONG.file = song_file
#new_SONG.save()
return redirect("album")
all_albums = al.objects.all().order_by("name")
Dict = {
"albums" : all_albums
}
return render(request, "music/Add_Song.html", Dict)
def Login(request, location):
error = False
last_un = ""
if request.method == "POST":
data = request.POST
un = data['un']
ps = data["ps"]
last_un = un
usr = authenticate(username = un, password = ps)
if usr != None:
login(request, usr)
return redirect(location)
error = True
Dict = {
"error":error, "last_un" : last_un
}
return render(request, "music/login.html", Dict)
def Logout(request):
logout(request)
return redirect('album')
def Register(request):
if request.method == "POST":
data = request.POST
un = data['un']
ps = data['ps']
name = data['name']
email = data['email']
usr = User.objects.create_user(un, email, ps)
usr.first_name = name
usr.save()
return redirect('login')
return render(request, "music/register.html")
def Error_404(request, exception = True):
return hr("My Error 404 Page")
|
{"/PYDJANGO/myweb/Music/views.py": ["/PYDJANGO/myweb/Music/models.py"], "/PYDJANGO/myweb/Music/admin.py": ["/PYDJANGO/myweb/Music/models.py"]}
|
41,353
|
ankit98040/Pydjango-Myweb
|
refs/heads/master
|
/PYDJANGO/myweb/Music/models.py
|
from django.db import models
# Create your models here.
class Album(models.Model):
name = models.CharField(max_length=50, blank=True)
artist = models.CharField(max_length=50, blank=True)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return self.name
class Songs(models.Model):
title = models.CharField(max_length=50)
#album_id = models.IntegerField(null=True)
album_id = models.ForeignKey(Album, on_delete=models.CASCADE, null=True)
file = models.FileField(null=True)
def __str__(self):
return self.title
class College(models.Model):
name = models.CharField(max_length = 40)
add = models.CharField(max_length=30)
def __str__(self):
return self.name
class Students(models.Model):
song = models.ForeignKey(Songs,on_delete=models.SET_NULL, null = True, blank=True)
college = models.ForeignKey(College, on_delete = models.CASCADE, null = True)
name = models.CharField(max_length = 40)
branch = models.CharField(max_length= 30)
def __str__(self):
return self.name
|
{"/PYDJANGO/myweb/Music/views.py": ["/PYDJANGO/myweb/Music/models.py"], "/PYDJANGO/myweb/Music/admin.py": ["/PYDJANGO/myweb/Music/models.py"]}
|
41,354
|
ankit98040/Pydjango-Myweb
|
refs/heads/master
|
/PYDJANGO/myweb/Music/tests.py
|
#name = ['ankit', 'swarnali', 'sanjukta']
#url=['https://www.facebook.com','https://google.com','https://linkedin.com']
#data = zip(name,url)
class College:
def __init__(self, name = "RCC"):
self.id = 2
self.name = name
self.contact = 1234
class Students:
def __init__(self, college, name):
self.college = college
self.name = name
col1 = College("ECELL IITR")
col2 = College()
stu1 = Students(name = "pratik", college=col1)
print(stu1.name)
stu2 = Students(name = "ishita", college=col2)
print(stu2.name, stu2.college.name)
|
{"/PYDJANGO/myweb/Music/views.py": ["/PYDJANGO/myweb/Music/models.py"], "/PYDJANGO/myweb/Music/admin.py": ["/PYDJANGO/myweb/Music/models.py"]}
|
41,355
|
ankit98040/Pydjango-Myweb
|
refs/heads/master
|
/PYDJANGO/myweb/Music/migrations/0007_auto_20200602_0927.py
|
# Generated by Django 3.0.6 on 2020-06-02 09:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Music', '0006_college_students'),
]
operations = [
migrations.AlterField(
model_name='album',
name='image',
field=models.ImageField(null=True, upload_to=''),
),
migrations.AlterField(
model_name='students',
name='song',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='Music.Songs'),
),
]
|
{"/PYDJANGO/myweb/Music/views.py": ["/PYDJANGO/myweb/Music/models.py"], "/PYDJANGO/myweb/Music/admin.py": ["/PYDJANGO/myweb/Music/models.py"]}
|
41,356
|
ankit98040/Pydjango-Myweb
|
refs/heads/master
|
/PYDJANGO/myweb/Music/migrations/0006_college_students.py
|
# Generated by Django 3.0.6 on 2020-06-02 07:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Music', '0005_auto_20200601_1314'),
]
operations = [
migrations.CreateModel(
name='College',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('add', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Students',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('branch', models.CharField(max_length=30)),
('college', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Music.College')),
('song', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Music.Songs')),
],
),
]
|
{"/PYDJANGO/myweb/Music/views.py": ["/PYDJANGO/myweb/Music/models.py"], "/PYDJANGO/myweb/Music/admin.py": ["/PYDJANGO/myweb/Music/models.py"]}
|
41,357
|
ankit98040/Pydjango-Myweb
|
refs/heads/master
|
/PYDJANGO/myweb/myweb/urls.py
|
"""myweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Music.views import Home
from Music.views import *
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import handler404, handler500
#imports everything from views.py
#from Music.views import p2
urlpatterns = [
path('admin/', admin.site.urls),
path('myhome/', Home, name = "h"),
path('mypage/', page1, name="pg1"),
path('jetix/', page2, name="pg2"),
path('hungama/', page3, name="pg3"),
path('cn/', page4, name="pg4"),
path('song/<int:snum>/', songnumber, name='Songs'),
path('details/<str:name>/', studentdetails, name="students"),
path('', Home2, name=""),
#myalbum
path('myalbum/', Album, name='album'),
path('albumdetails/<int:a_id>/', AlbumDetails, name='details'),
path('all_songs/', SongList, name = "Allsongs"),
path('all_colleges/', Coll , name = "Allcolleges"),
path('add_album/', Add_Album, name = "addAlbum"),
path('delete_album/<int:a_id>', Delete_Album, name = "deleteAlbum"),
path('edit_album/<int:a_id>', Edit_Album, name = "editAlbum"),
path('add_song/', Add_NewSong, name = "addSong"),
path('Login_Account/<str:location>/', Login, name = "login"),
path('Logout_Account/', Logout, name = "logout"),
path('Register_Account/', Register, name = "register"),
] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
handler404 = Error_404()
|
{"/PYDJANGO/myweb/Music/views.py": ["/PYDJANGO/myweb/Music/models.py"], "/PYDJANGO/myweb/Music/admin.py": ["/PYDJANGO/myweb/Music/models.py"]}
|
41,358
|
ankit98040/Pydjango-Myweb
|
refs/heads/master
|
/PYDJANGO/myweb/Music/admin.py
|
from django.contrib import admin
# Register your models here.
#from Music.models import Album
#from Music import *
#from Music.models import *
#from .models import Album
from .models import *
admin.site.register(Album)
admin.site.register(Songs)
admin.site.register(College)
admin.site.register(Students)
|
{"/PYDJANGO/myweb/Music/views.py": ["/PYDJANGO/myweb/Music/models.py"], "/PYDJANGO/myweb/Music/admin.py": ["/PYDJANGO/myweb/Music/models.py"]}
|
41,368
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/cmd_ypk_cpp.py
|
#! /usr/bin/python
#
from xgen.util import *
from xgen.yxsd import *
from xgen import util
import glob
import os
from jinja2 import Environment, DictLoader
# 2.7
# reload(sys)
# import sys
# sys.setdefaultencoding('utf-8')
import logging
logger = logging.getLogger(__name__)
def ypkcppname(name):
return 'Y' + cppname(name)
def nodecppname(path):
if path == 'yNode':
return path
elif path == 'yDoc':
return path
elif path.startswith('/'):
path = path[1:]
name = ''
for n in path.split(':'):
name = n
return 'node' + cppname(name)
def nsprefix(prefix):
if prefix == '':
return 'nsMod'
else:
return 'ns' + cppname(prefix)
def myindent(indent=0):
return ' '*4*indent
def mysplit(string, sep):
return string.split(sep)
def nodename(path):
if path.startswith('/'):
return path[1:]
return path
class CmdYPK(object):
def __init__(self, inXsdDir, inBundle):
self.m_mods = {}
self.m_xsd_dir = inXsdDir
self.m_bundle = inBundle
self.initenv()
def initenv(self):
mytemplates = {
"h_ymod": H_YMOD_TEMPLATE,
"cpp_ymod_base": CPP_YMOD_BASE_TEMPLATE,
"cpp_ymod": CPP_YMOD_TEMPLATE,
"h_bundle": H_BUNDLE_TEMPLATE,
"cpp_bundle": CPP_BUNDLE_TEMPLATE,
"cpp_ypk": CPP_YPK_TEMPLATE,
"h_ypk": H_YPK_TEMPLATE,
"cpp_ypb_incremental": CPP_YPBINCREMENTAL_TEMPLATE
}
self.m_env = Environment(loader=DictLoader(
mytemplates), trim_blocks=True, lstrip_blocks=True)
self.m_env.filters["ypkcppname"] = ypkcppname
self.m_env.filters["pbname"] = pbname
self.m_env.filters["cppname"] = cppname
self.m_env.filters["nodecppname"] = nodecppname
self.m_env.filters["nodename"] = nodename
self.m_env.filters["nsprefix"] = nsprefix
self.m_env.filters["myindent"] = myindent
self.m_env.filters["mysplit"] = mysplit
self.m_env.filters["cppnormalize"] = util.cppnormalize
self.m_env.tests['xSimpleTypeEnum'] = isXSimpleTypeEnum
def run(self, inDir):
if os.path.exists(inDir) is False:
os.mkdir(inDir)
path = self.m_xsd_dir + '/*.xsd'
for xsd in glob.glob(path):
mod = YModule(xsd)
mod.parse()
self.m_mods[mod.m_modname] = mod
modname = mod.m_modname
modtype = mod.m_modtype
outfile = inDir + "/" + ypkcppname(modname) + ".h"
logger.info("generate %s" % outfile)
with open(outfile, 'w') as f:
output = self.m_env.get_template('h_ymod').render(
ymod=mod, ybundle=self.m_bundle)
f.write(output)
outfile = inDir + "/" + ypkcppname(modname) + ".cpp"
logger.info("generate %s" % outfile)
with open(outfile, 'w') as f:
output = self.m_env.get_template('cpp_ymod').render(
ymod=mod, ybundle=self.m_bundle)
f.write(output)
outfile = inDir + "/" + self.m_bundle + ".h"
logger.info("generate %s" % outfile)
with open(outfile, 'w') as f:
output = self.m_env.get_template('h_bundle').render(
ybundle=self.m_bundle, ymods=self.m_mods.values())
f.write(output)
outfile = inDir + "/" + self.m_bundle + ".cpp"
logger.info("generate %s" % outfile)
with open(outfile, 'w') as f:
output = self.m_env.get_template('cpp_bundle').render(
ybundle=self.m_bundle, ymods=self.m_mods.values())
f.write(output)
outfile = inDir + "/" + "YPK.h"
logger.info("generate %s" % outfile)
with open(outfile, 'w') as f:
output = self.m_env.get_template('h_ypk').render()
f.write(output)
outfile = inDir + "/" + "YPK.cpp"
logger.info("generate %s" % outfile)
with open(outfile, 'w') as f:
output = self.m_env.get_template('cpp_ypk').render()
f.write(output)
outfile = inDir + "/" + "YPBIncremental.cpp"
logger.info("generate %s" % outfile)
with open(outfile, 'w') as f:
output = self.m_env.get_template('cpp_ypb_incremental').render()
f.write(output)
cmddescription = 'generate cpp from dev yxsd model'
def makeoptions(optparser):
optparser.add_argument(
"--bundle",
type=str,
help="specify yang bundle")
pass
def run(options):
cmd = CmdYPK(options.input, options.bundle)
cmd.run(options.output)
H_YMOD_TEMPLATE = """//auto generated by xgen toolkit, bug mail to zengmao@fiberhome.com
#pragma once
#include "YPK.h"
#include "{{ymod.m_modname|pbname}}.pb.h"
using namespace YPK;
namespace {{ybundle}}
{
class YPK_API {{ymod.m_modname | ypkcppname}} : public YPK::YPKBase
{
public:
virtual int YConfigGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc);
virtual int YFilterGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc);
virtual int YRpcGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc);
virtual int YRpcRetGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pModule);
virtual int YNotifyGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pModule);
virtual int YPBGen(xmlNodePtr yNode, YPK::YPBMessagePtr pModule);
private:
{% for complextype in ymod.m_complex_types %}
{% if complextype.m_modname %}
int YXmlGen(const {{ybundle}}PB::{{ymod.m_modname | pbname}}::{{complextype.m_name_pb}} &st{{complextype.m_name_cpp}}, xmlNodePtr yNode);
int YPBGen(const xmlNodePtr yNode, {{ybundle}}PB::{{ymod.m_modname | pbname}}::{{complextype.m_name_pb}} &st{{complextype.m_name_cpp}});
{% endif %}
{% endfor%}
{% for complextype in ymod.m_complex_types %}
{% if not complextype.m_modname %}
int YXmlGen(const {{ybundle}}PB::{{ymod.m_modname | pbname}}::{{complextype.m_name_pb}} &st{{complextype.m_name_cpp}}, xmlNodePtr yNode);
int YPBGen(const xmlNodePtr yNode, {{ybundle}}PB::{{ymod.m_modname | pbname}}::{{complextype.m_name_pb}} &st{{complextype.m_name_cpp}});
{% endif %}
{% endfor %}
public:
{%for xsimple in ymod.m_simple_types %}
{%if xsimple is xSimpleTypeEnum %}
struct {{xsimple.m_name | cppnormalize}} {
enum {
{% for xenum in xsimple.m_enums.values() %}
{{xenum.m_en | cppnormalize}} = {{xenum.m_field_index}},
{% endfor %}
};
static const char* name(int value) {
switch (value) {
{% for xenum in xsimple.m_enums.values() %}
case {{xenum.m_field_index}}:
return "{{xenum.m_en}}";
{% endfor %}
default:
assert(false);
return "invalid";
}
}
static int value(std::string name) {
{% for xenum in xsimple.m_enums.values() %}
if (name == "{{xenum.m_en}}")
return {{xenum.m_field_index}};
{% endfor %}
assert(false);
return -1;
}
};
{%endif %}
{%endfor %}
};
}
"""
CPP_YMOD_BASE_TEMPLATE = """//auto generated by xgen toolkit, bug mail to zengmao@fiberhome.com
{% set modname = ymod.m_modname %}
{% set modpbname = ymod.m_modname | pbname %}
{% set modpbtype = ymod.m_modtype | pbname %}
{% set modcppname = ymod.m_modname | ypkcppname %}
#include "{{modcppname}}.h"
using namespace YPK;
namespace {{ybundle}}
{
int {{modcppname}}::YConfigGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc)
{
xmlNodePtr nodeConfig = xmlDocGetRootElement(yDoc);
if (nodeConfig == NULL) {
nodeConfig = xmlNewDocNode(yDoc, NULL, BAD_CAST "config", NULL);
xmlDocSetRootElement(yDoc, nodeConfig);
}
{{ybundle}}PB::{{modpbname}}::{{modpbtype}} *pPtr = dynamic_cast<{{ybundle}}PB::{{modpbname}}::{{modpbtype}}*>(pModule.get());
return YXmlGen(*pPtr, nodeConfig);
}
int {{modcppname}}::YFilterGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc)
{
xmlNodePtr nodeFilter = xmlDocGetRootElement(yDoc);
if (nodeFilter == NULL) {
nodeFilter = xmlNewDocNode(yDoc, NULL, BAD_CAST "filter", NULL);
xmlDocSetRootElement(yDoc, nodeFilter);
}
{{ybundle}}PB::{{modpbname}}::{{modpbtype}} *pPtr = dynamic_cast<{{ybundle}}PB::{{modpbname}}::{{modpbtype}}*>(pModule.get());
return YXmlGen(*pPtr, nodeFilter);
}
int {{modcppname}}::YRpcGen(YPK::YPBMessagePtr pRpcPB, xmlDocPtr yDoc)
{
std::string strMsgName = pRpcPB->GetTypeName();
{%for rpc in ymod.m_rpcs.values() %}
{% if rpc.m_input %}
if (strMsgName == "{{ybundle}}PB.{{modpbname}}.{{rpc.m_input|pbname}}") {
{{ybundle}}PB::{{modpbname}}::{{rpc.m_input|pbname}} *pPtr = dynamic_cast<{{ybundle}}PB::{{modpbname}}::{{rpc.m_input|pbname}}*>(pRpcPB.get());
xmlNodePtr nodeRpc = xmlNewDocNode(yDoc, NULL, BAD_CAST "{{rpc.m_name}}", NULL);
xmlDocSetRootElement(yDoc, nodeRpc);
xmlNsPtr pNs = xmlNewNs(nodeRpc, BAD_CAST "{{rpc.m_namespace}}", NULL);
xmlSetNs(nodeRpc, pNs);
return YXmlGen(*pPtr, nodeRpc);
}
{% else %}
if (strMsgName == "{{ybundle}}PB.{{modpbname}}.{{rpc.m_name|pbname}}") {
{{ybundle}}PB::{{modpbname}}::{{rpc.m_name|pbname}} *pPtr = dynamic_cast<{{ybundle}}PB::{{modpbname}}::{{rpc.m_name|pbname}}*>(pRpcPB.get());
xmlNodePtr nodeRpc = xmlNewDocNode(yDoc, NULL, BAD_CAST "{{rpc.m_name}}", NULL);
xmlDocSetRootElement(yDoc, nodeRpc);
xmlNsPtr pNs = xmlNewNs(nodeRpc, BAD_CAST "{{rpc.m_namespace}}", NULL);
xmlSetNs(nodeRpc, pNs);
return 0;
}
{% endif %}
{%endfor %}
return 0;
}
int {{modcppname}}::YRpcRetGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pPB)
{
std::string strMsgName = pPB->GetTypeName();
{%for rpc in ymod.m_rpcs.values() %}
{% if rpc.m_output %}
if (strMsgName == "{{ybundle}}PB.{{modpbname}}.{{rpc.m_output|pbname}}") {
{{ybundle}}PB::{{modpbname}}::{{rpc.m_output|pbname}} *pPtr = dynamic_cast<{{ybundle}}PB::{{modpbname}}::{{rpc.m_output|pbname}}*>(pPB.get());
return YPBGen((xmlNodePtr)yDoc, *pPtr);
}
{% endif %}
{%endfor %}
return 0;
}
int {{modcppname}}::YNotifyGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pPB)
{
std::string strMsgName = pPB->GetTypeName();
{%for notify in ymod.m_notifys.values() %}
{% if notify.m_type %}
if (strMsgName == "{{ybundle}}PB.{{modpbname}}.{{notify.m_type|pbname}}") {
{{ybundle}}PB::{{modpbname}}::{{notify.m_type|pbname}} *pPtr = dynamic_cast<{{ybundle}}PB::{{modpbname}}::{{notify.m_type|pbname}}*>(pPB.get());
return YPBGen((xmlNodePtr)yDoc, *pPtr);
}
{% endif %}
{%endfor %}
return 0;
}
int {{modcppname}}::YPBGen(xmlNodePtr yNode, YPK::YPBMessagePtr pModule)
{
{{ybundle}}PB::{{modpbname}}::{{modpbtype}} *pPtr = dynamic_cast<{{ybundle}}PB::{{modpbname}}::{{modpbtype}}*>(pModule.get());
return YPBGen(yNode, *pPtr);
}
{%for complextype in ymod.m_complex_types %}
{%set stmsg = 'st' + complextype.m_name_cpp %}
int {{modcppname}}::YXmlGen(const {{ybundle}}PB::{{modpbname}}::{{complextype.m_name_pb}} &{{stmsg}}, xmlNodePtr yNode)
{
xmlNodePtr nodeLeaf = NULL;
{% block mandgen scoped%}
{% endblock %}
{% block parentnoshared scoped%}
{% endblock %}
{% block xnode scoped%}
{% endblock %}
return 0;
}
int {{modcppname}}::YPBGen(xmlNodePtr yNode, {{ybundle}}PB::{{modpbname}}::{{complextype.m_name_pb}} &{{stmsg}})
{
std::vector<xmlNodePtr> vecNodes;
xmlNodePtr childNode;
{% for field in complextype.m_fields %}
{% if field.m_path and field.m_path[0] == '/' %}
{% set fromself = 'true' %}
{% set ypath = field.m_path[1:] %}
{% else %}
{% set fromself = 'false' %}
{% set ypath = field.m_path %}
{% endif %}
{% if field.m_list %}
YPK::selectNodes(yNode, "{{ypath}}", {{fromself}}, vecNodes);
for (int i = 0; i < vecNodes.size(); i++) {
{% if field.m_pbtype in ('int32', 'uint32', 'int64') %}
{{stmsg}}.add_{{field.m_pbname}}(YInteger((const char*)getLeafNodeValue(vecNodes[i])));
{% elif field.m_pbtype in ('uint64') %}
{{stmsg}}.add_{{field.m_pbname}}(YUInteger((const char*)getLeafNodeValue(vecNodes[i])));
{% elif field.m_pbtype == 'bytes' %}
{{stmsg}}.add_{{field.m_pbname}}((const char*)getLeafNodeValue(vecNodes[i]));
{% elif field.m_pbtype == 'enum' %}
{{stmsg}}.add_{{field.m_pbname}}({{field.m_type | cppnormalize}}::value((const char*)getLeafNodeValue(vecNodes[i])));
{% else %}
xmlNodePtr xNode = vecNodes[i];
YPBGen(xNode, *{{stmsg}}.add_{{field.m_pbname}}());
{% endif %}
}
{% elif field.m_leaf %}
{% if not ypath %}
childNode = YPK::selectNode(yNode, "{{field.m_leafname}}", {{fromself}});
{% elif ypath[-1] == '/' %}
childNode = YPK::selectNode(yNode, "{{(ypath, field.m_leafname) | join}}", {{fromself}});
{% else %}
childNode = YPK::selectNode(yNode, "{{(ypath, '/', field.m_leafname) | join}}", {{fromself}});
{% endif %}
if (NULL != childNode) {
{% if field.m_typename in ('int32', 'uint32', 'int64') %}
{{stmsg}}.set_{{field.m_pbname}}(YInteger((const char*)getLeafNodeValue(childNode)));
{% elif field.m_typename in ('uint64') %}
{{stmsg}}.set_{{field.m_pbname}}(YUInteger((const char*)getLeafNodeValue(childNode)));
{% elif field.m_typename == 'string' %}
{{stmsg}}.set_{{field.m_pbname}}((const char*)getLeafNodeValue(childNode));
{% elif field.m_typename == 'enum' %}
{{stmsg}}.set_{{field.m_pbname}}({{field.m_type | cppnormalize}}::value((const char*)getLeafNodeValue(childNode)));
{% else %}
unkown leaf type {{field.m_typename}}
{% endif %}
}
{% else %}
childNode = selectNode(yNode, "{{ypath}}", {{fromself}});
if (NULL != childNode) {
YPBGen(childNode, *{{stmsg}}.mutable_{{field.m_pbname}}());
}
{% endif %}
{% endfor %}
return 0;
}
{%endfor %}
}
"""
CPP_YMOD_TEMPLATE = """{% extends 'cpp_ymod_base' %}
{% macro createrootnode(path, indent) %}
{% set createrootnode | indent(indent*4, True )%}
xmlNodePtr {{path|nodecppname }} = xmlNewChild(yNode, NULL, BAD_CAST "{{path|nodename}}", NULL);
{% for prefix,ns in ymod.m_namespaces.items() %}
{% if prefix == ''%}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs({{path|nodecppname}}, BAD_CAST "{{ns}}", NULL);
{% else %}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs({{path|nodecppname}}, BAD_CAST "{{ns}}", BAD_CAST "{{prefix}}");
{%endif %}
xmlSetNs({{path|nodecppname}}, {{prefix|nsprefix}});
{% endfor %}
{% endset %}
{{createrootnode-}}
{% endmacro %}
{% macro createparentpriv(nodeparent, field, indent) %}
{% set createparentpriv | indent(indent*4, True)%}
{% for path in field.m_path_priv %}
{% if loop.first %}
{% if path[0] == '/' %}
{{-createrootnode(path, 0)}}
{% else %}
xmlNodePtr {{path|nodecppname}} = xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{path}}", NULL);
{% endif %}
{% else %}
xmlNodePtr {{path|nodecppname}} = xmlNewChild({{loop.previtem|nodecppname}}, NULL, BAD_CAST "{{path}}", NULL);
{% endif %}
{% if loop.last and not field.m_list and not field.m_leaf%}
{% for prefix, ns in field.m_namespaces.items() %}
{% if prefix == ''%}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs({{path|nodecppname}}, BAD_CAST "{{ns}}", NULL);
{% else %}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs({{path|nodecppname}}, BAD_CAST "{{ns}}", BAD_CAST "{{prefix}}");
{% endif %}
xmlSetNs({{path|nodecppname}}, {{prefix|nsprefix}});
{% endfor %}
{% endif %}
{% endfor %}
{% endset %}
{{createparentpriv-}}
{% endmacro %}
{% macro createkeyleaf(stmsg, field, nodeparent, indent) %}
{% set createkeyleaf | indent(indent*4, True)%}
{%if field.m_nodeopr or field.m_namespaces%}
nodeLeaf = {% endif -%}
{%if field.m_typename in ('int32', 'uint32', 'int64') -%}
if ({{stmsg}}.{{field.m_pbname}}() != 0)
{
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_leafname}}", BAD_CAST YLexicalInteger({{stmsg}}.{{field.m_pbname}}()).c_str());
}
{%elif field.m_typename in ('uint64') -%}
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_leafname}}", BAD_CAST YLexicalUInteger({{stmsg}}.{{field.m_pbname}}()).c_str());
{%elif field.m_typename == 'string' -%}
if ({{stmsg}}.{{field.m_pbname}}().length() > 0)
{
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_leafname}}", BAD_CAST {{stmsg}}.{{field.m_pbname}}().c_str());
}
{%elif field.m_typename == 'enum' -%}
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_leafname}}", BAD_CAST {{field.m_type | cppnormalize}}::name({{stmsg}}.{{field.m_pbname}}()));
{%else -%}
unkown leaf type {{field.m_typename}}
{%endif -%}
{%if field.m_namespaces %}
{% for prefix, ns in field.m_namespaces.items() %}
{% if prefix == ''%}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs(nodeLeaf, BAD_CAST "{{ns}}", NULL);
{% else %}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs(nodeLeaf, BAD_CAST "{{ns}}", BAD_CAST "{{prefix}}");
{% endif %}
xmlSetNs(nodeLeaf, {{prefix|nsprefix}});
{% endfor %}
{%endif %}
{%if field.m_nodeopr -%}
if ({{stmsg}}.{{field.m_name|pbname}}_opr() != YPK::EDataNodeOpr::NONE) {
xmlSetProp(nodeLeaf, BAD_CAST NC_OPERATION, BAD_CAST YPK::EDataNodeOpr::name({{stmsg}}.{{field.m_name|pbname}}_opr()));
}
{%endif -%}
{%endset -%}
{{createkeyleaf-}}
{% endmacro %}
{% macro createkeyleaf_key(stmsg, field, nodeparent, indent) %}
{% set createkeyleaf_key | indent(indent*4, True)%}
{%if field.m_nodeopr or field.m_namespaces%}
nodeLeaf = {% endif -%}
{%if field.m_typename in ('int32', 'uint32', 'int64') -%}
if ({{stmsg}}.{{field.m_pbname}}() != 0) {
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_leafname}}", BAD_CAST YLexicalInteger({{stmsg}}.{{field.m_pbname}}()).c_str());
}
{%elif field.m_typename in ('uint64') -%}
if ({{stmsg}}.{{field.m_pbname}}() != 0) {
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_leafname}}", BAD_CAST YLexicalUInteger({{stmsg}}.{{field.m_pbname}}()).c_str());
}
{%elif field.m_typename == 'string' -%}
if (!{{stmsg}}.{{field.m_pbname}}().empty()) {
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_leafname}}", BAD_CAST {{stmsg}}.{{field.m_pbname}}().c_str());
}
{%elif field.m_typename == 'enum' -%}
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_leafname}}", BAD_CAST {{field.m_type | cppnormalize}}::name({{stmsg}}.{{field.m_pbname}}()));
{%else -%}
unkown leaf type {{field.m_typename}}
{%endif -%}
{%if field.m_namespaces %}
{% for prefix, ns in field.m_namespaces.items() %}
{% if prefix == ''%}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs(nodeLeaf, BAD_CAST "{{ns}}", NULL);
{% else %}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs(nodeLeaf, BAD_CAST "{{ns}}", BAD_CAST "{{prefix}}");
{% endif %}
xmlSetNs(nodeLeaf, {{prefix|nsprefix}});
{% endfor %}
{%endif %}
{%if field.m_nodeopr -%}
if ({{stmsg}}.{{field.m_name|pbname}}_opr() != YPK::EDataNodeOpr::NONE) {
xmlSetProp(nodeLeaf, BAD_CAST NC_OPERATION, BAD_CAST YPK::EDataNodeOpr::name({{stmsg}}.{{field.m_name|pbname}}_opr()));
}
{%endif -%}
{%endset -%}
{{createkeyleaf_key-}}
{% endmacro %}
{% macro createleaf(stmsg, field, nodeparent, indent) %}
{% set createleaf | indent(indent*4, True) %}
if ({{stmsg}}.has_{{field.m_pbname}}()) {
{% if field.m_path_priv %}
{{-createparentpriv(nodeparent, field, 1)}}
{{-createkeyleaf(stmsg, field, field.m_path_priv[-1]|nodecppname, 1)}}
{% else %}
{{-createkeyleaf(stmsg, field, nodeparent, 1)-}}
{% endif %}
}
{% endset %}
{{createleaf-}}
{% endmacro %}
{%macro createlisto(stmsg, field, nodeparent, indent) %}
{%set createlistoo | indent(indent*4, True) %}
{%set nodevarname = field.m_path_priv_list|nodecppname %}
for (int i = 0; i < {{stmsg}}.{{field.m_pbname}}_size(); i++) {
{% if field.m_pbtype == 'bytes' %}
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_path_priv_list}}", BAD_CAST {{stmsg}}.{{field.m_pbname}}(i).c_str());
{% elif field.m_pbtype in ('int32', 'uint32', 'int64') %}
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_path_priv_list}}", BAD_CAST YLexicalInteger({{stmsg}}.{{field.m_pbname}}(i)).c_str());
{% elif field.m_pbtype in ('uint64')%}
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_path_priv_list}}", BAD_CAST YLexicalUInteger({{stmsg}}.{{field.m_pbname}}(i)).c_str());
{% elif field.m_pbtype == 'enum' %}
xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_path_priv_list}}", BAD_CAST {{field.m_type | cppnormalize}}::name({{stmsg}}.{{field.m_pbname}}(i)));
{% else %}
const {{ybundle}}PB::{{modpbname}}::{{field.m_pbtype}} &st{{field.m_name|cppname}} = {{stmsg}}.{{field.m_pbname}}(i);
xmlNodePtr {{nodevarname}} = xmlNewChild({{nodeparent}}, NULL, BAD_CAST "{{field.m_path_priv_list}}", NULL);
{% for prefix, ns in field.m_namespaces.items() %}
{% if prefix == '' %}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs({{nodevarname}}, BAD_CAST "{{ns}}", NULL);
{% else %}
xmlNsPtr {{prefix|nsprefix}} = xmlNewNs({{nodevarname}}, BAD_CAST "{{ns}}", BAD_CAST "{{prefix}}");
{% endif %}
xmlSetNs({{nodevarname}}, {{prefix|nsprefix}});
{% endfor %}
{% if field.m_type_obj.m_fields_key %}
if (st{{field.m_name|cppname}}.listopr() != YPK::EDataNodeOpr::NONE) {
xmlSetProp({{nodevarname}}, BAD_CAST NC_OPERATION, BAD_CAST YPK::EDataNodeOpr::name(st{{field.m_name|cppname}}.listopr()));
}
{% endif %}
YXmlGen(st{{field.m_name|cppname}}, {{nodevarname}});
{% endif %}
}
{% endset %}
{{createlistoo-}}
{% endmacro %}
{% macro createlist(stmsg, field, nodeparent, indent) %}
{% set createlist | indent(indent*4, True) %}
if ({{stmsg}}.{{field.m_pbname}}_size() > 0) {
{% if field.m_path_priv %}
{{-createparentpriv(nodeparent, field, 1)}}
{{-createlisto(stmsg, field, field.m_path_priv[-1] | nodecppname, 1)}}
{% else %}
{{-createlisto(stmsg, field, nodeparent, 1)}}
{% endif %}
}
{% endset %}
{{createlist-}}
{% endmacro %}
{% macro createcontainer(stmsg, field, nodeparent, indent)%}
{% set createcontainer | indent(indent*4, True) %}
if ({{stmsg}}.has_{{field.m_pbname}}()) {
{% if field.m_path_priv %}
{{ createparentpriv(nodeparent, field, 1) }}
{% if field.m_nodeopr %}
if ({{stmsg}}.{{field.m_name|pbname}}_opr() != YPK::EDataNodeOpr::NONE) {
xmlSetProp({{field.m_path_priv[-1]|nodecppname}}, BAD_CAST NC_OPERATION, BAD_CAST YPK::EDataNodeOpr::name({{stmsg}}.{{field.m_name|pbname}}_opr()));
}
if ({{stmsg}}.{{field.m_name|pbname}}_opr() != YPK::EDataNodeOpr::REMOVE) {
YXmlGen({{stmsg}}.{{field.m_pbname}}(), {{field.m_path_priv[-1]|nodecppname}});
}
{% else %}
YXmlGen({{stmsg}}.{{field.m_pbname}}(), {{field.m_path_priv[-1]|nodecppname}});
{% endif %}
{% else %}
{% if field.m_nodeopr %}
if ({{stmsg}}.{{field.m_name|pbname}}_opr() != YPK::EDataNodeOpr::NONE) {
xmlSetProp({{nodeparent}}, BAD_CAST NC_OPERATION, BAD_CAST YPK::EDataNodeOpr::name({{stmsg}}.{{field.m_name|pbname}}_opr()));
}
if ({{stmsg}}.{{field.m_name|pbname}}_opr() != YPK::EDataNodeOpr::REMOVE) {
YXmlGen({{stmsg}}.{{field.m_pbname}}(), {{nodeparent}});
}
{% else %}
YXmlGen({{stmsg}}.{{field.m_pbname}}(), {{nodeparent}});
{% endif %}
{% endif %}
}
{% endset %}
{{createcontainer-}}
{% endmacro %}
{% macro fieldset(stmsg, nodeparent, field, indent) %}
{% if field.m_list %}
{{-createlist(stmsg, field, nodeparent, indent)}}
{% elif field.m_leaf %}
{{-createleaf(stmsg, field, nodeparent, indent)}}
{% else %}
{{-createcontainer(stmsg, field, nodeparent, indent)}}
{% endif %}
{% endmacro %}
{% block xnode %}
{% for xnode in complextype.m_xtree.m_xnodes.values() recursive %}
{% if loop.depth == 1 and xnode.m_xname[0] == '/' %}
{{-createrootnode(xnode.m_xname, 1)}}
{% else %}
{{loop.depth | myindent-}}
xmlNodePtr {{xnode.m_xname|nodecppname}} = xmlNewChild({{xnode.m_xname_parent|nodecppname}}, NULL, BAD_CAST "{{xnode.m_xname}}", NULL);
{% endif %}
{{loop.depth | myindent-}}
{
{% set outer_loop = loop %}
{% for field in xnode.m_fields %}
{{-fieldset(stmsg, xnode.m_xname|nodecppname, field, outer_loop.depth + 1)-}}
{% endfor %}
{% if xnode.m_xnodes %}
{{-loop(xnode.m_xnodes.values())-}}
{% endif %}
{{loop.depth | myindent-}}
}
{% endfor %}
{% endblock %}
{% block mandgen %}
{% for key,field in complextype.m_fields_key|dictsort %}
{{-createkeyleaf(stmsg, field, 'yNode', 1)-}}
{% endfor %}
{% if complextype.m_fields_key%}
if ({{stmsg}}.listopr() == YPK::EDataNodeOpr::REMOVE) {
return 0;
}
{% endif %}
{% for field in complextype.m_fields_mandatory %}
{{-createkeyleaf(stmsg, field, 'yNode', 1)-}}
{% endfor %}
{% endblock %}
{% block parentnoshared %}
{% for field in complextype.m_fields_noshared %}
{{-fieldset(stmsg, 'yNode', field, 1)-}}
{% endfor %}
{% endblock %}
"""
H_BUNDLE_TEMPLATE = """//auto generated by xgen toolkit, bug mail to zengmao@fiberhome.com
#pragma once
#include "YPK.h"
{% for ymod in ymods %}
#include "{{ymod.m_modname|pbname}}.pb.h"
{% endfor %}
{% for ymod in ymods %}
#include "{{ymod.m_modname|ypkcppname}}.h"
{% endfor %}
namespace {{ybundle}}
{
int YPK_API YConfigGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc);
int YPK_API YFilterGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc);
int YPK_API YRpcGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc);
int YPK_API YRpcRetGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pModule);
int YPK_API YNotifyGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pModule);
int YPK_API YPBGen(xmlDocPtr yDoc, std::map<std::string, YPK::YPBMessagePtr> &mapModule);
int YPK_API YPBGen(xmlNodePtr xnode, YPBModuleMap &yPBModuleMap);
xmlDocPtr YPK_API ClearDocBlankNode(xmlDocPtr doc);
}
"""
CPP_BUNDLE_TEMPLATE = """//auto generated by xgen toolkit, bug mail to zengmao@fiberhome.com
#include <boost/make_shared.hpp>
#include "{{ybundle}}.h"
namespace {{ybundle}}
{
int YConfigGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc)
{
std::auto_ptr<YPKBase> pYPK;
std::string strMsgName = pModule->GetTypeName();
{% for mod in ymods %}
{% if loop.first %}
if (strMsgName == "{{ybundle}}PB.{{mod.m_modname|pbname}}.{{mod.m_modtype|pbname}}") {
pYPK.reset(new {{mod.m_modname | ypkcppname}}());
}
{% else %}
else if (strMsgName == "{{ybundle}}PB.{{mod.m_modname|pbname}}.{{mod.m_modtype|pbname}}") {
pYPK.reset(new {{mod.m_modname | ypkcppname}}());
}
{% endif %}
{% endfor %}
else {
pYPK.reset(new YPKBase());
}
return pYPK->YConfigGen(pModule, yDoc);
}
int YFilterGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc)
{
std::auto_ptr<YPKBase> pYPK;
std::string strMsgName = pModule->GetTypeName();
{% for mod in ymods %}
{% if loop.first %}
if (strMsgName == "{{ybundle}}PB.{{mod.m_modname|pbname}}.{{mod.m_modtype|pbname}}") {
pYPK.reset(new {{mod.m_modname | ypkcppname}}());
}
{% else %}
else if (strMsgName == "{{ybundle}}PB.{{mod.m_modname|pbname}}.{{mod.m_modtype|pbname}}") {
pYPK.reset(new {{mod.m_modname | ypkcppname}}());
}
{% endif %}
{% endfor %}
else {
pYPK.reset(new YPKBase());
}
return pYPK->YFilterGen(pModule, yDoc);
}
int YRpcGen(YPK::YPBMessagePtr pModule, xmlDocPtr yDoc)
{
std::auto_ptr<YPKBase> pYPK;
std::string strMsgName = pModule->GetTypeName();
do {
{%for mod in ymods%}
{% if mod.m_rpcs %}
if (strMsgName.find("{{ybundle}}PB.{{mod.m_modname|pbname}}") == 0) {
pYPK.reset(new {{mod.m_modname | ypkcppname}}());
break;
}
{% endif %}
{% endfor %}
pYPK.reset(new YPKBase());
} while (false);
return pYPK->YRpcGen(pModule, yDoc);
}
int YRpcRetGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pModule)
{
std::auto_ptr<YPKBase> pYPK;
std::string strMsgName = pModule->GetTypeName();
do {
{%for mod in ymods%}
{% if mod.m_rpcs %}
if (strMsgName.find("{{ybundle}}PB.{{mod.m_modname|pbname}}") == 0) {
pYPK.reset(new {{mod.m_modname | ypkcppname}}());
break;
}
{% endif %}
{% endfor %}
pYPK.reset(new YPKBase());
} while (false);
return pYPK->YRpcRetGen(yDoc, pModule);
}
int YPK_API YNotifyGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pModule)
{
std::auto_ptr<YPKBase> pYPK;
std::string strMsgName = pModule->GetTypeName();
do {
{%for mod in ymods%}
{% if mod.m_notifys %}
if (strMsgName.find("{{ybundle}}PB.{{mod.m_modname|pbname}}") == 0) {
pYPK.reset(new {{mod.m_modname | ypkcppname}}());
break;
}
{% endif %}
{% endfor %}
pYPK.reset(new YPKBase());
} while (false);
return pYPK->YNotifyGen(yDoc, pModule);
}
static int YPBGen(xmlNodePtr xnode, YPBModuleMap &yPBModuleMap)
{
YPK::YPBMessagePtr pModule;
xmlNsPtr pNs = xmlSearchNs(NULL, xnode, NULL);
if (NULL == pNs) {
return -1;
}
std::string strNs((char *) pNs->href);
{%for mod in ymods %}
if (strNs == "{{mod.namespace}}") {
if (yPBModuleMap.find("{{mod.m_modname}}") != yPBModuleMap.end()) {
pModule = yPBModuleMap["{{mod.m_modname}}"];
}
else {
pModule = boost::make_shared<{{ybundle}}PB::{{mod.m_modname | pbname}}::{{mod.m_modname | pbname}}>();
yPBModuleMap["{{mod.m_modname}}"] = pModule;
}
{{mod.m_modname | ypkcppname}} ypk;
ypk.YPBGen(xnode, pModule);
return 0;
}
{%endfor%}
return -1;
}
int YPBGen(xmlDocPtr yDoc, YPBModuleMap &mapModule)
{
int iRet = 0;
xmlNodePtr xRoot = xmlDocGetRootElement(yDoc);
if (NULL == xRoot) {
return 0;
}
xmlNodePtr xnode;
YPK::YPBMessagePtr pModule;
for (xnode = xmlFirstElementChild(xRoot); NULL != xnode; xnode = xmlNextElementSibling(xnode)) {
YPBGen(xnode, mapModule);
}
}
static bool IsBlankNode(xmlNodePtr pNode)
{
if (pNode == NULL)
{
return true;
}
if (pNode->type == XML_ELEMENT_NODE && pNode->children == NULL)
{
return true;
}
else
{
return false;
}
}
static void RecursiveBrotherChild(xmlNodePtr pDstNode)
{
if (!IsBlankNode(pDstNode->children))
{
RecursiveBrotherChild(pDstNode->children);
}
else
{
xmlNodePtr pEarseNode = pDstNode->children;
xmlUnlinkNode(pEarseNode);
xmlFreeNode(pEarseNode);
}
if (!IsBlankNode(pDstNode->next))
{
RecursiveBrotherChild(pDstNode->next);
}
else
{
xmlNodePtr pEarseNode = pDstNode->next;
xmlUnlinkNode(pEarseNode);
xmlFreeNode(pEarseNode);
}
}
xmlDocPtr YPK_API ClearDocBlankNode(xmlDocPtr doc)
{
xmlDocPtr pDstDoc = xmlCopyDoc(doc, 1);
RecursiveBrotherChild(xmlDocGetRootElement(pDstDoc));
return pDstDoc;
}
}
"""
CPP_YPK_TEMPLATE = """//auto generated by xgen toolkit, bug mail to zengmao@fiberhome.com
#include <sstream>
#include "YPK.h"
namespace YPK
{
long long YInteger(const char* value)
{
long long llvalue;
std::stringstream sStream;
sStream << value;
sStream >> llvalue;
return llvalue;
}
unsigned long long YUInteger(const char* value)
{
unsigned long ulvalue;
std::stringstream sStream;
sStream << value;
sStream >> ulvalue;
return ulvalue;
}
unsigned short YUShort(const char* value)
{
unsigned short usvalue;
std::stringstream sStream;
sStream << value;
sStream >> usvalue;
return usvalue;
}
std::string YLexicalInteger(long long llValue)
{
std::stringstream sStream;
sStream << llValue;
return sStream.str();
}
std::string YLexicalUInteger(unsigned long long ullValue)
{
std::stringstream sStream;
sStream << ullValue;
return sStream.str();
}
int selectNodes(xmlNodePtr xNode, const char* path, bool fromself, std::vector<xmlNodePtr> &vecNodes)
{
if (NULL == xNode)
{
return 0;
}
vecNodes.clear();
xmlNodePtr xnode = NULL;
xnode = selectNode(xNode, path, fromself);
while (NULL != xnode)
{
vecNodes.push_back(xnode);
xmlNodePtr xnextNode = xmlNextElementSibling(xnode);
if (NULL == xnextNode)
{
break;
}
xnode = selectNode(xnextNode, (char *)xnode->name, true);
}
return 0;
}
xmlNodePtr selectNode(xmlNodePtr xParent, const char* path,bool fromself)
{
if (NULL == xParent)
{
return NULL;
}
/*ex:path=acl/time-range-cfgs/time-range-cfg;strPathFirstPart=acl;strPathSecondPart=time-range-cfgs/time-range-cfg*/
std::string strPathFirstPart;
std::string strPathSecondPart;
if (std::string(path).npos != std::string(path).find("/"))
{
strPathFirstPart = std::string(path).substr(0,std::string(path).find("/"));
strPathSecondPart = std::string(path).substr(std::string(path).find("/")+1,std::string(path).length()-1);
}
else
{
strPathFirstPart = std::string(path);
}
xmlNodePtr xnode = NULL;
xmlNodePtr xnodeSelectRet = NULL;
if (fromself)
{
std::string strSub;
if (std::string((char *)xParent->name) != strPathFirstPart)
{
return NULL;
}
else
{
if (strPathSecondPart.empty())
{
return xParent;
}
else
{
if (std::string(strPathSecondPart).npos != std::string(strPathSecondPart).find("/"))
{
strPathFirstPart = std::string(strPathSecondPart).substr(0,std::string(strPathSecondPart).find("/"));
strPathSecondPart = std::string(strPathSecondPart).substr(std::string(strPathSecondPart).find("/")+1,std::string(strPathSecondPart).length()-1);
}
else
{
strPathFirstPart = std::string(strPathSecondPart);
strPathSecondPart = "";
}
}
}
}
for (xnode = xmlFirstElementChild(xParent); NULL != xnode; xnode = xmlNextElementSibling(xnode))
{
if (std::string((char *)xnode->name) == strPathFirstPart)
{
if (strPathSecondPart.empty())
{
return xnode;
}
else
{
xnodeSelectRet = selectNode(xnode, strPathSecondPart.c_str(), false);
}
}
}
return xnodeSelectRet;
}
xmlChar *getLeafNodeValue(const xmlNode *cur)
{
if ((cur == NULL) || (XML_ELEMENT_NODE != cur->type))
{
return NULL;
}
char * CDefaultValue = "";
if (cur->children != NULL && XML_TEXT_NODE == cur->children->type)
{
if (NULL != cur->children->content)
{
return cur->children->content;
}
else
{
return (xmlChar *)CDefaultValue;
}
}
return (xmlChar *)CDefaultValue;
}
}
"""
H_YPK_TEMPLATE = """//auto generated by xgen toolkit, bug mail to zengmao@fiberhome.com
#pragma once
#include <vector>
#include <boost/shared_ptr.hpp>
#include <libxml/tree.h>
#include <google/protobuf/message.h>
#include <google/protobuf/text_format.h>
#include <google/protobuf/stubs/strutil.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/descriptor.pb.h>
#include <google/protobuf/dynamic_message.h>
#include <google/protobuf/message.h>
#include <google/protobuf/compiler/importer.h>
#ifdef WIN32
#ifdef YPK_EXPORTS
#define YPK_API __declspec(dllexport)
#else
#define YPK_API __declspec(dllimport)
#endif
#else
#define YPK_API
#endif
namespace YPK
{
typedef boost::shared_ptr<google::protobuf::Message> YPBMessagePtr;
typedef std::vector<YPBMessagePtr> YPBMessagePtrVec;
typedef std::map<std::string, YPBMessagePtr> YPBModuleMap;
struct EDataNodeOpr
{
enum {
NONE,
REMOVE,
CREATE
};
static const char* name (int value) {
switch (value) {
case NONE:
return "";
case REMOVE:
return "remove";
case CREATE:
return "create";
default:
return "invalid";
}
}
};
static const char NC_OPERATION[] = "operation";
class YPK_API YPKBase
{
public:
virtual int YConfigGen(YPBMessagePtr pModule, xmlDocPtr yDoc) {return 0;};
virtual int YFilterGen(YPBMessagePtr pModule, xmlDocPtr yDoc) {return 0;};
virtual int YRpcGen(YPBMessagePtr pModule, xmlDocPtr yDoc) {return 0;};
virtual int YRpcRetGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pModule){return 0;};
virtual int YNotifyGen(xmlDocPtr yDoc, YPK::YPBMessagePtr pModule) { return 0; };
virtual int YPBGen(xmlNodePtr yNode, YPBMessagePtr pModule) {return 0;};
};
#define INCRE_OPTION_REMOVE_LEAF 0x01
#define INCRE_OPTION_REMOVE_LIST 0x02
#define INCRE_OPTION_REMOVE_CONTAINER 0x04
#define INCRE_OPTION_CLEAR_NOTCHG_LEAF 0x10
#define INCRE_OPTION_CLEAR_NOTCHG_LIST 0x20
#define INCRE_OPTION_CLEAR_NOTCHG_CONTAINER 0x40
#define INCRE_OPTION_LIST_REMOVE_FIRST 0x8000
#define INCRE_OPTION_NORMAL 0x07
#define INCRE_OPTION_ALL 0x8077
int YPK_API YExtPBMsgIncremental(google::protobuf::Message *pPBMsgPrev,
google::protobuf::Message *pPBMsg,
int option = INCRE_OPTION_NORMAL,
int maxLevel = -1);
int YPK_API YExtPBMsgIncremental_ChkDup(google::protobuf::Message *pPBMsgPrev,
google::protobuf::Message *pPBMsg,
int option,
int maxLevel, bool &bDup);
int YPK_API YExtPBMsgKeySort(google::protobuf::Message *pMsg, int option, int maxLevel = -1);
long long YInteger(const char* value);
unsigned long long YUInteger(const char* value);
std::string YLexicalInteger(long long llValue);
std::string YLexicalUInteger(unsigned long long ullValue);
unsigned short YUShort(const char* value);
int selectNodes(xmlNodePtr xNode,
const char* path,
bool fromself,
std::vector<xmlNodePtr> &vecNodes);
xmlNodePtr selectNode(xmlNodePtr xNode, const char* path, bool fromself);
xmlChar * getLeafNodeValue(const xmlNode *cur);
}
"""
CPP_YPBINCREMENTAL_TEMPLATE = """//auto generated by xgen toolkit, bug mail to zengmao@fiberhome.com
#include "ypk.pb.h"
#include "YPK.h"
namespace YPK
{
int PBMsgIncremental(google::protobuf::Message *pMsgPrev,
google::protobuf::Message *pMsg,
int option, int level, int maxLevel);
int PBMsgIncremental_ChkDup(google::protobuf::Message *pMsgPrev,
google::protobuf::Message *pMsg,
int option, int level, int maxLevel,
bool &bDup);
bool PBFieldHasNodeoprOption(const google::protobuf::FieldDescriptor* pField)
{
if (pField->options().HasExtension(YPKPB::ynodeopr))
{
return true;
}
else
{
return false;
}
}
int GetListKey(const google::protobuf::Descriptor *pDesc,
std::vector<const google::protobuf::FieldDescriptor*> &vecKeyFields)
{
for (int i = 0; i < pDesc->field_count(); i++)
{
const google::protobuf::FieldDescriptor* pField = pDesc->field(i);
if (!pField->is_required())
{
continue;
}
if (pField->options().HasExtension(YPKPB::ykey))
{
vecKeyFields.push_back(pField);
}
}
assert(!vecKeyFields.empty());
return 0;
}
int RemoveField(google::protobuf::Message *pMsg,
google::protobuf::Message *pMsgPrev,
const google::protobuf::FieldDescriptor *pField,
int option)
{
if (!(option & INCRE_OPTION_REMOVE_CONTAINER) &&
pField->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE)
{
return 0;
}
if (!(option & INCRE_OPTION_REMOVE_LEAF))
{
return 0;
}
const google::protobuf::Reflection* pReflection = pMsg->GetReflection();
const google::protobuf::Descriptor* pDesc = pMsg->GetDescriptor();
const google::protobuf::FieldDescriptor* pFieldOpr = pDesc->FindFieldByName(pField->name() + "_opr");
pReflection->SetInt32(pMsg, pFieldOpr, EDataNodeOpr::REMOVE);
switch(pField->cpp_type())
{
case google::protobuf::FieldDescriptor::CPPTYPE_INT32:
pReflection->SetInt32(pMsg, pField, pReflection->GetInt32(*pMsgPrev, pField));
break;
case google::protobuf::FieldDescriptor::CPPTYPE_INT64:
pReflection->SetInt64(pMsg, pField, pReflection->GetInt64(*pMsgPrev, pField));
break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT32:
pReflection->SetUInt32(pMsg, pField, pReflection->GetUInt32(*pMsgPrev, pField));
break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT64:
pReflection->SetUInt64(pMsg, pField, pReflection->GetUInt64(*pMsgPrev, pField));
break;
case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT:
pReflection->SetFloat(pMsg, pField, pReflection->GetFloat(*pMsgPrev, pField));
break;
case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE:
pReflection->SetDouble(pMsg, pField, pReflection->GetDouble(*pMsgPrev, pField));
break;
case google::protobuf::FieldDescriptor::CPPTYPE_STRING:
pReflection->SetString(pMsg, pField, pReflection->GetStringReference(*pMsgPrev, pField, NULL));
break;
case google::protobuf::FieldDescriptor::CPPTYPE_BOOL:
pReflection->SetBool(pMsg, pField, pReflection->GetBool(*pMsgPrev, pField));
break;
case google::protobuf::FieldDescriptor::CPPTYPE_ENUM:
pReflection->SetEnum(pMsg, pField, pReflection->GetEnum(*pMsgPrev, pField));
break;
case google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE:
pReflection->MutableMessage(pMsg, pField, NULL);
break;
default:
assert(false);
break;
}
return 0;
}
int ClearField(google::protobuf::Message *pMsg,
const google::protobuf::FieldDescriptor *pField,
int option)
{
assert(!pField->is_repeated());
//required remained
if (pField->is_required())
{
return 0;
}
switch(pField->cpp_type())
{
case google::protobuf::FieldDescriptor::CPPTYPE_INT32:
case google::protobuf::FieldDescriptor::CPPTYPE_INT64:
case google::protobuf::FieldDescriptor::CPPTYPE_UINT32:
case google::protobuf::FieldDescriptor::CPPTYPE_UINT64:
case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT:
case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE:
case google::protobuf::FieldDescriptor::CPPTYPE_STRING:
case google::protobuf::FieldDescriptor::CPPTYPE_BOOL:
case google::protobuf::FieldDescriptor::CPPTYPE_ENUM:
if (!(option & INCRE_OPTION_CLEAR_NOTCHG_LEAF))
{
return 0;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE:
if (!(option & INCRE_OPTION_CLEAR_NOTCHG_CONTAINER))
{
return 0;
}
break;
default:
assert(false);
return 0;
}
const google::protobuf::Reflection* pReflection = pMsg->GetReflection();
pReflection->ClearField(pMsg, pField);
return 0;
}
int LeafCompare(const google::protobuf::Message * const pMsgPrev,
const google::protobuf::Message * const pMsg,
const google::protobuf::FieldDescriptor* pField)
{
const google::protobuf::Reflection* pReflection = pMsg->GetReflection();
switch(pField->cpp_type())
{
case google::protobuf::FieldDescriptor::CPPTYPE_INT32:
if (pReflection->GetInt32(*pMsgPrev, pField) < pReflection->GetInt32(*pMsg, pField))
{
return -1;
}
else if (pReflection->GetInt32(*pMsgPrev, pField) > pReflection->GetInt32(*pMsg, pField))
{
return 1;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_INT64:
if (pReflection->GetInt64(*pMsgPrev, pField) < pReflection->GetInt64(*pMsg, pField))
{
return -1;
}
else if (pReflection->GetInt64(*pMsgPrev, pField) > pReflection->GetInt64(*pMsg, pField))
{
return 1;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT32:
if (pReflection->GetUInt32(*pMsgPrev, pField) < pReflection->GetUInt32(*pMsg, pField))
{
return -1;
}
else if (pReflection->GetUInt32(*pMsgPrev, pField) > pReflection->GetUInt32(*pMsg, pField))
{
return 1;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT64:
if (pReflection->GetUInt64(*pMsgPrev, pField) < pReflection->GetUInt64(*pMsg, pField))
{
return -1;
}
else if (pReflection->GetUInt64(*pMsgPrev, pField) > pReflection->GetUInt64(*pMsg, pField))
{
return 1;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT:
if (pReflection->GetFloat(*pMsgPrev, pField) < pReflection->GetFloat(*pMsg, pField))
{
return -1;
}
else if (pReflection->GetFloat(*pMsgPrev, pField) > pReflection->GetFloat(*pMsg, pField))
{
return 1;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE:
if (pReflection->GetDouble(*pMsgPrev, pField) < pReflection->GetDouble(*pMsg, pField))
{
return -1;
}
else if (pReflection->GetDouble(*pMsgPrev, pField) > pReflection->GetDouble(*pMsg, pField))
{
return 1;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_STRING:
if (pReflection->GetStringReference(*pMsgPrev, pField, NULL) < pReflection->GetStringReference(*pMsg, pField, NULL))
{
return -1;
}
else if (pReflection->GetStringReference(*pMsgPrev, pField, NULL) > pReflection->GetStringReference(*pMsg, pField, NULL))
{
return 1;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_BOOL:
if (pReflection->GetBool(*pMsgPrev, pField) < pReflection->GetBool(*pMsg, pField))
{
return -1;
}
else if (pReflection->GetBool(*pMsgPrev, pField) > pReflection->GetBool(*pMsg, pField))
{
return 1;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_ENUM:
if (pReflection->GetEnum(*pMsgPrev, pField) < pReflection->GetEnum(*pMsg, pField))
{
return -1;
}
else if (pReflection->GetEnum(*pMsgPrev, pField) > pReflection->GetEnum(*pMsg, pField))
{
return 1;
}
break;
default:
assert(false);
break;
}
return 0;
}
bool ListMessageNotChanged(const google::protobuf::Message * const pMsgPrev,
const google::protobuf::Message * const pMsg)
{
const google::protobuf::Reflection* pReflection = pMsg->GetReflection();
std::vector<const google::protobuf::FieldDescriptor*> vecFields;
pReflection->ListFields(*pMsg, &vecFields);
for (int i = 0; i < vecFields.size(); i++)
{
const google::protobuf::FieldDescriptor* pField = vecFields[i];
if (pField->is_repeated() || pField->is_optional())
{
return false;
}
if (pField->type() == google::protobuf::FieldDescriptor::TYPE_MESSAGE)
{
return false;
}
if (LeafCompare(pMsgPrev, pMsg, pField))
{
return false;
}
}
return true;
}
int ListCompare(const google::protobuf::Message * const pMsgPrev,
const google::protobuf::Message * const pMsg)
{
int iRet;
const google::protobuf::Reflection* pReflection = pMsg->GetReflection();
std::vector<const google::protobuf::FieldDescriptor*> vecKeyFields;
GetListKey(pMsg->GetDescriptor(), vecKeyFields);
if (vecKeyFields.empty())
{
return -1;
}
for (int i = 0; i < vecKeyFields.size(); i++)
{
const google::protobuf::FieldDescriptor* pField = vecKeyFields[i];
iRet = LeafCompare(pMsgPrev, pMsg, pField);
if (iRet)
{
return iRet;
}
}
return 0;
}
struct ListMessageLess
{
bool operator()(const google::protobuf::Message * const pMsgPrev,
const google::protobuf::Message * const pMsg)
{
if (ListCompare(pMsgPrev, pMsg) < 0)
{
return true;
}
else
{
return false;
}
}
};
bool ListMessageLesser(const google::protobuf::Message * const pMsgPrev,
const google::protobuf::Message * const pMsg)
{
if (ListCompare(pMsgPrev, pMsg) < 0)
{
return true;
}
else
{
return false;
}
}
int ListKeyCompare(const google::protobuf::Message * const pMsgPrev,
const google::protobuf::Message * const pMsg)
{
int iRet;
const google::protobuf::Reflection* pReflection = pMsg->GetReflection();
std::vector<const google::protobuf::FieldDescriptor*> vecKeyFields;
GetListKey(pMsg->GetDescriptor(), vecKeyFields);
if (vecKeyFields.empty())
{
return -1;
}
for (int i = 0; i < vecKeyFields.size(); i++)
{
const google::protobuf::FieldDescriptor* pField = vecKeyFields[i];
iRet = LeafCompare(pMsgPrev, pMsg, pField);
if (iRet)
{
return iRet;
}
}
return 0;
}
struct ListMsgKey
{
bool operator()(const google::protobuf::Message * const pMsgPrev,
const google::protobuf::Message * const pMsg)
{
if (ListKeyCompare(pMsgPrev, pMsg) < 0)
{
return true;
}
else
{
return false;
}
}
};
int ListIncremental(google::protobuf::Message *pMsgPrev,
google::protobuf::Message *pMsg,
const google::protobuf::FieldDescriptor *pField,
int option, int level, int maxLevel)
{
int iRet = 0;
if (maxLevel >0 && level > maxLevel)
{
return 0;
}
assert(pField->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE);
const google::protobuf::Reflection* pReflection = NULL;
std::set<google::protobuf::Message *, ListMessageLess> setPrev;
std::set<google::protobuf::Message *, ListMessageLess> setCurr;
std::map<google::protobuf::Message*, int> mapIndex;
if (pMsgPrev)
{
pReflection = pMsgPrev->GetReflection();
for (int i = 0; i < pReflection->FieldSize(*pMsgPrev, pField); i++)
{
setPrev.insert(pReflection->MutableRepeatedMessage(pMsgPrev, pField, i));
}
}
if (pMsg)
{
pReflection = pMsg->GetReflection();
for (int i = 0; i < pReflection->FieldSize(*pMsg, pField); i++)
{
google::protobuf::Message* pRepatedMsg =
pReflection->MutableRepeatedMessage(pMsg, pField, i);
setCurr.insert(pRepatedMsg);
mapIndex[pRepatedMsg] = i;
}
}
assert(pReflection);
std::set<google::protobuf::Message *, ListMessageLess>::iterator iterPrev = setPrev.begin();
std::set<google::protobuf::Message *, ListMessageLess>::iterator iterCurr = setCurr.begin();
std::set<int> setNotChangedIndex;
while (iterPrev != setPrev.end())
{
if (iterCurr == setCurr.end() ||
ListCompare(*iterPrev, *iterCurr) < 0)
{
const google::protobuf::Message *pRemove = *iterPrev;
iterPrev++;
if (!(option & INCRE_OPTION_REMOVE_LIST))
{
continue;
}
google::protobuf::Message *pRepeated = pReflection->AddMessage(pMsg, pField);
pRepeated->CopyFrom(*pRemove);
const google::protobuf::Descriptor* pDesc = pRepeated->GetDescriptor();
const google::protobuf::FieldDescriptor* pFieldListopr = pDesc->FindFieldByName("listopr");
pRepeated->GetReflection()->SetInt32(pRepeated, pFieldListopr, EDataNodeOpr::REMOVE);
continue;
}
if (ListCompare(*iterPrev, *iterCurr) > 0)
{
iterCurr++;
continue;
}
google::protobuf::Message *pRepeatedPrev = *iterPrev;
google::protobuf::Message *pRepeatedCurr = *iterCurr;
iterPrev++;
iterCurr++;
iRet = PBMsgIncremental(pRepeatedPrev, pRepeatedCurr, option, level + 1, -1);
if (iRet)
{
return iRet;
}
if ((option & INCRE_OPTION_CLEAR_NOTCHG_LIST) &&
ListMessageNotChanged(pRepeatedPrev, pRepeatedCurr))
{
setNotChangedIndex.insert(mapIndex[pRepeatedCurr]);
}
}
if ((option & INCRE_OPTION_CLEAR_NOTCHG_LIST))
{
for (std::set<int>::reverse_iterator iter = setNotChangedIndex.rbegin(); iter != setNotChangedIndex.rend(); iter++)
{
int idx = *iter;
int idxLast = pReflection->FieldSize(*pMsg, pField) - 1;
if (idx != idxLast)
{
pReflection->SwapElements(pMsg, pField, idx, idxLast);
}
pReflection->RemoveLast(pMsg, pField);
}
}
if ((option & INCRE_OPTION_LIST_REMOVE_FIRST))
{
pReflection = pMsg->GetReflection();
int removeIdx = 0;
for (int i = pReflection->FieldSize(*pMsg, pField) - 1; i > removeIdx; i--)
{
google::protobuf::Message* pRepeated =
pReflection->MutableRepeatedMessage(pMsg, pField, i);
const google::protobuf::Descriptor* pDesc = pRepeated->GetDescriptor();
const google::protobuf::FieldDescriptor* pFieldListopr = pDesc->FindFieldByName("listopr");
if (pRepeated->GetReflection()->HasField(*pRepeated, pFieldListopr) &&
pRepeated->GetReflection()->GetInt32(*pRepeated, pFieldListopr) == EDataNodeOpr::REMOVE)
{
pReflection->SwapElements(pMsg, pField, removeIdx, i);
removeIdx++;
}
}
}
return 0;
}
int ListIncremental_ChkDup(google::protobuf::Message *pMsgPrev,
google::protobuf::Message *pMsg,
const google::protobuf::FieldDescriptor *pField,
int option, int level, int maxLevel, bool &bDup)
{
int iRet = 0;
if (maxLevel > 0 && level > maxLevel)
{
return -1;
}
if (pField->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE)
{
return -1;
}
const google::protobuf::Reflection* pReflection = NULL;
std::set<google::protobuf::Message *, ListMessageLess> setPrev;
std::set<google::protobuf::Message *, ListMessageLess> setCurr;
std::map<google::protobuf::Message*, int> mapIndex;
if (pMsgPrev)
{
pReflection = pMsgPrev->GetReflection();
for (int i = 0; i < pReflection->FieldSize(*pMsgPrev, pField); i++)
{
setPrev.insert(pReflection->MutableRepeatedMessage(pMsgPrev, pField, i));
}
}
if (pMsg)
{
pReflection = pMsg->GetReflection();
for (int i = 0; i < pReflection->FieldSize(*pMsg, pField); i++)
{
google::protobuf::Message* pRepatedMsg =
pReflection->MutableRepeatedMessage(pMsg, pField, i);
setCurr.insert(pRepatedMsg);
mapIndex[pRepatedMsg] = i;
}
}
if (!pReflection)
{
return -1;
}
std::set<google::protobuf::Message *, ListMessageLess>::iterator iterPrev = setPrev.begin();
std::set<google::protobuf::Message *, ListMessageLess>::iterator iterCurr = setCurr.begin();
std::set<int> setNotChangedIndex;
while (iterPrev != setPrev.end())
{
if (iterCurr == setCurr.end() ||
ListCompare(*iterPrev, *iterCurr) < 0)
{
bDup = false;
const google::protobuf::Message *pRemove = *iterPrev;
iterPrev++;
if (!(option & INCRE_OPTION_REMOVE_LIST))
{
continue;
}
google::protobuf::Message *pRepeated = pReflection->AddMessage(pMsg, pField);
pRepeated->CopyFrom(*pRemove);
const google::protobuf::Descriptor* pDesc = pRepeated->GetDescriptor();
const google::protobuf::FieldDescriptor* pFieldListopr = pDesc->FindFieldByName("listopr");
pRepeated->GetReflection()->SetInt32(pRepeated, pFieldListopr, EDataNodeOpr::REMOVE);
continue;
}
if (ListCompare(*iterPrev, *iterCurr) > 0)
{
bDup = false;
iterCurr++;
continue;
}
google::protobuf::Message *pRepeatedPrev = *iterPrev;
google::protobuf::Message *pRepeatedCurr = *iterCurr;
iterPrev++;
iterCurr++;
bool bCurDup = true;
iRet = PBMsgIncremental_ChkDup(pRepeatedPrev, pRepeatedCurr, option, level + 1, -1, bCurDup);
if (iRet)
{
return iRet;
}
bDup &= bCurDup;
if (bCurDup)
{
if (option & INCRE_OPTION_CLEAR_NOTCHG_LIST)
{
setNotChangedIndex.insert(mapIndex[pRepeatedCurr]);
}
}
}
if (iterCurr != setCurr.end())
{
bDup = false;
}
if ((option & INCRE_OPTION_CLEAR_NOTCHG_LIST))
{
for (std::set<int>::reverse_iterator iter = setNotChangedIndex.rbegin(); iter != setNotChangedIndex.rend(); iter++)
{
int idx = *iter;
int idxLast = pReflection->FieldSize(*pMsg, pField) - 1;
if (idx != idxLast)
{
pReflection->SwapElements(pMsg, pField, idx, idxLast);
}
pReflection->RemoveLast(pMsg, pField);
}
}
if ((option & INCRE_OPTION_LIST_REMOVE_FIRST))
{
pReflection = pMsg->GetReflection();
int removeIdx = 0;
for (int i = pReflection->FieldSize(*pMsg, pField) - 1; i > removeIdx; i--)
{
google::protobuf::Message* pRepeated =
pReflection->MutableRepeatedMessage(pMsg, pField, i);
const google::protobuf::Descriptor* pDesc = pRepeated->GetDescriptor();
const google::protobuf::FieldDescriptor* pFieldListopr = pDesc->FindFieldByName("listopr");
if (pRepeated->GetReflection()->HasField(*pRepeated, pFieldListopr) &&
pRepeated->GetReflection()->GetInt32(*pRepeated, pFieldListopr) == EDataNodeOpr::REMOVE)
{
pReflection->SwapElements(pMsg, pField, removeIdx, i);
removeIdx++;
}
}
}
return 0;
}
int PBMsgIncremental(google::protobuf::Message *pMsgPrev,
google::protobuf::Message *pMsg,
int option, int level, int maxLevel)
{
int iRet = 0;
if (maxLevel > 0 && level > maxLevel)
{
return 0;
}
assert(pMsgPrev && pMsg && pMsgPrev->GetTypeName() == pMsg->GetTypeName());
const google::protobuf::Reflection* pReflection = pMsgPrev->GetReflection();
std::vector<const google::protobuf::FieldDescriptor*> vecFieldsPrev;
//the field is ordered by field number
pReflection->ListFields(*pMsgPrev, &vecFieldsPrev);
std::vector<const google::protobuf::FieldDescriptor*> vecFields;
pReflection->ListFields(*pMsg, &vecFields);
for (int iPrev = 0, iCurr = 0; iPrev < vecFieldsPrev.size() || iCurr < vecFields.size();)
{
const google::protobuf::FieldDescriptor* pField = NULL;
if (iPrev < vecFieldsPrev.size())
{
if (iCurr >= vecFields.size() ||
vecFieldsPrev[iPrev]->number() < vecFields[iCurr]->number())
{
pField = vecFieldsPrev[iPrev];
iPrev++;
if (pField->is_repeated())
{
ListIncremental(pMsgPrev, pMsg, pField, option, 1, 1);
continue;
}
if (PBFieldHasNodeoprOption(pField))
{
RemoveField(pMsg, pMsgPrev, pField, option);
}
continue;
}
}
if (iCurr < vecFields.size())
{
if (iPrev >= vecFieldsPrev.size() ||
vecFields[iCurr]->number() < vecFieldsPrev[iPrev]->number())
{
iCurr++;
continue;
}
}
pField = vecFields[iCurr];
iPrev++;
iCurr++;
//value comparing
if (pField->is_repeated())
{
if (pField->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE)
{
//not support
assert(false);
return -1;
}
iRet = ListIncremental(pMsgPrev, pMsg, pField, option, level + 1, maxLevel);
if (0 != iRet)
{
return iRet;
}
}
else
{
switch(pField->cpp_type())
{
case google::protobuf::FieldDescriptor::CPPTYPE_INT32:
if (pReflection->GetInt32(*pMsgPrev, pField) == pReflection->GetInt32(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_INT64:
if (pReflection->GetInt64(*pMsgPrev, pField) == pReflection->GetInt64(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT32:
if (pReflection->GetUInt32(*pMsgPrev, pField) == pReflection->GetUInt32(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT64:
if (pReflection->GetUInt64(*pMsgPrev, pField) == pReflection->GetUInt64(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT:
if (pReflection->GetFloat(*pMsgPrev, pField) == pReflection->GetFloat(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE:
if (pReflection->GetDouble(*pMsgPrev, pField) == pReflection->GetDouble(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_STRING:
if (pReflection->GetStringReference(*pMsgPrev, pField, NULL) == pReflection->GetStringReference(*pMsg, pField, NULL))
{
ClearField(pMsg, pField, option);
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_BOOL:
if (pReflection->GetBool(*pMsgPrev, pField) == pReflection->GetBool(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_ENUM:
if (pReflection->GetEnum(*pMsgPrev, pField) == pReflection->GetEnum(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE:
iRet = PBMsgIncremental(pReflection->MutableMessage(pMsgPrev, pField),
pReflection->MutableMessage(pMsg, pField),
option, level + 1, maxLevel);
if (iRet)
{
return iRet;
}
if (pReflection->MutableMessage(pMsg, pField)->ByteSize() == 0)
{
ClearField(pMsg, pField, option);
}
break;
default:
break;
}
}
}
return 0;
}
int PBMsgIncremental_ChkDup(google::protobuf::Message *pMsgPrev,
google::protobuf::Message *pMsg,
int option, int level, int maxLevel, bool &bDup)
{
int iRet = 0;
if (maxLevel > 0 && level > maxLevel)
{
return -1;
}
if (!(pMsgPrev && pMsg && pMsgPrev->GetTypeName() == pMsg->GetTypeName()))
{
return -1;
}
const google::protobuf::Reflection* pReflection = pMsgPrev->GetReflection();
std::vector<const google::protobuf::FieldDescriptor*> vecFieldsPrev;
//the field is ordered by field number
pReflection->ListFields(*pMsgPrev, &vecFieldsPrev);
std::vector<const google::protobuf::FieldDescriptor*> vecFields;
pReflection->ListFields(*pMsg, &vecFields);
for (int iPrev = 0, iCurr = 0; iPrev < vecFieldsPrev.size() || iCurr < vecFields.size();)
{
const google::protobuf::FieldDescriptor* pField = NULL;
if (iPrev < vecFieldsPrev.size())
{
if (iCurr >= vecFields.size() ||
vecFieldsPrev[iPrev]->number() < vecFields[iCurr]->number())
{
bDup = false;
pField = vecFieldsPrev[iPrev];
iPrev++;
if (pField->is_repeated())
{
ListIncremental_ChkDup(pMsgPrev, pMsg, pField, option, 1, 1, bDup);
continue;
}
if (PBFieldHasNodeoprOption(pField))
{
RemoveField(pMsg, pMsgPrev, pField, option);
}
continue;
}
}
if (iCurr < vecFields.size())
{
if (iPrev >= vecFieldsPrev.size() ||
vecFields[iCurr]->number() < vecFieldsPrev[iPrev]->number())
{
bDup = false;
iCurr++;
continue;
}
}
pField = vecFields[iCurr];
iPrev++;
iCurr++;
//value comparing
if (pField->is_repeated())
{
if (pField->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE)
{
//not support
return -1;
}
iRet = ListIncremental_ChkDup(pMsgPrev, pMsg, pField,
option, level + 1, maxLevel, bDup);
if (0 != iRet)
{
return iRet;
}
}
else
{
switch(pField->cpp_type())
{
case google::protobuf::FieldDescriptor::CPPTYPE_INT32:
if (pReflection->GetInt32(*pMsgPrev, pField)
== pReflection->GetInt32(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
else
{
bDup = false;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_INT64:
if (pReflection->GetInt64(*pMsgPrev, pField)
== pReflection->GetInt64(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
else
{
bDup = false;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT32:
if (pReflection->GetUInt32(*pMsgPrev, pField)
== pReflection->GetUInt32(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
else
{
bDup = false;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT64:
if (pReflection->GetUInt64(*pMsgPrev, pField)
== pReflection->GetUInt64(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
else
{
bDup = false;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT:
if (pReflection->GetFloat(*pMsgPrev, pField)
== pReflection->GetFloat(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
else
{
bDup = false;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE:
if (pReflection->GetDouble(*pMsgPrev, pField)
== pReflection->GetDouble(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
else
{
bDup = false;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_STRING:
if (pReflection->GetStringReference(*pMsgPrev, pField, NULL)
== pReflection->GetStringReference(*pMsg, pField, NULL))
{
ClearField(pMsg, pField, option);
}
else
{
bDup = false;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_BOOL:
if (pReflection->GetBool(*pMsgPrev, pField)
== pReflection->GetBool(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
else
{
bDup = false;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_ENUM:
if (pReflection->GetEnum(*pMsgPrev, pField)
== pReflection->GetEnum(*pMsg, pField))
{
ClearField(pMsg, pField, option);
}
else
{
bDup = false;
}
break;
case google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE:
iRet = PBMsgIncremental_ChkDup(pReflection->MutableMessage(pMsgPrev, pField),
pReflection->MutableMessage(pMsg, pField),
option, level + 1, maxLevel, bDup);
if (iRet)
{
return iRet;
}
if (pReflection->MutableMessage(pMsg, pField)->ByteSize() == 0)
{
ClearField(pMsg, pField, option);
}
break;
default:
break;
}
}
}
return 0;
}
int YExtPBMsgIncremental(google::protobuf::Message *pMsgPrev,
google::protobuf::Message *pMsg,
int option, int maxLevel)
{
return PBMsgIncremental(pMsgPrev, pMsg, option, 1, maxLevel);
}
int YExtPBMsgIncremental_ChkDup(google::protobuf::Message *pPBMsgPrev,
google::protobuf::Message *pPBMsg,
int option, int maxLevel, bool &bDup)
{
return PBMsgIncremental_ChkDup(pPBMsgPrev, pPBMsg, option, 1, maxLevel, bDup);
}
int PBMsgKeySort(google::protobuf::Message *pMsg, int option, int level, int maxLevel)
{
int iRet = 0;
if (maxLevel > 0 && level > maxLevel)
{
return 0;
}
assert(pMsg);
const google::protobuf::Reflection* pReflection = pMsg->GetReflection();
//the field is ordered by field number
std::vector<const google::protobuf::FieldDescriptor*> vecFields;
pReflection->ListFields(*pMsg, &vecFields);
for (int iCurr = 0; iCurr < vecFields.size(); iCurr++)
{
const google::protobuf::FieldDescriptor* pField = NULL;
pField = vecFields[iCurr];
google::protobuf::FieldDescriptor::CppType fieldType = pField->cpp_type();
if (fieldType != google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE)
{
continue;
}
if (pField->is_repeated())
{
std::set<google::protobuf::Message *, ListMsgKey> setCurr;
//std::map<google::protobuf::Message*, int> mapIndex;
const google::protobuf::Reflection* pRefle = pMsg->GetReflection();
int iSize = pRefle->FieldSize(*pMsg, pField);
for (int i = 0; i < iSize; i++)
{
google::protobuf::Message *pItem = pRefle->MutableRepeatedMessage(pMsg, pField, i);
setCurr.insert(pItem);
PBMsgKeySort(pItem, option, 1, maxLevel);
}
int iPos = 0;
std::set<google::protobuf::Message *, ListMsgKey>::iterator it = setCurr.begin();
for (int i = iPos; i < iSize; i++)
{
google::protobuf::Message *pItem = pRefle->MutableRepeatedMessage(pMsg, pField, i);
google::protobuf::Message *itt = *it;
if(pItem == itt)
{
if(iPos != i)
{
pRefle->SwapElements(pMsg, pField, iPos, i);
}
i = iPos;
iPos++;
it++;
}
}
}
else
{
//pField->options().HasExtension(YPKPB::ykey);
//bool dsd = pField->options().has_experimental_map_key();
//if(dsd){
// const std::string dsdk = pField->options().experimental_map_key();
//}
google::protobuf::Message *ppt = pReflection->MutableMessage(pMsg, pField);
PBMsgKeySort(ppt, option, 1, maxLevel);
}
//if (!pField->is_required())
//{
// continue;
//}
//if (pField->options().HasExtension(YPKPB::ykey))
//{
// google::protobuf::FieldDescriptor::CppType fieldType = pField->cpp_type();
// if(fieldType == google::protobuf::FieldDescriptor::CPPTYPE_INT32)
// {
// int iKey = pReflection->GetInt32(*pMsg, pField);
// FIELD_INFO obInfo;
// obInfo.iKey = iKey;
// obInfo.iIndex = iCurr;
// obInfo.pField = pField;
// sFiledInfo.insert(obInfo);
// pReflection->ClearField(pMsg, pField);
// }
//}
}
//std::set<FIELD_INFO>::iterator it = sFiledInfo.begin();
//for (int i=0; it != sFiledInfo.end(); i++, it++)
//{
// //if(it.iIndex != )
// //pReflection->SwapElements(i,iSign);
// pReflection->AddMessage(pMsg, it->pField);
//}
//
//vecFields.clear();
//pReflection->ListFields(*pMsg, &vecFields);
//for (int iCurr = 0; iCurr < vecFields.size(); iCurr++)
//{
// const google::protobuf::FieldDescriptor* pField = NULL;
// pField = vecFields[iCurr];
// google::protobuf::FieldDescriptor::CppType fieldType = pField->cpp_type();
// if (fieldType == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE)
// {
// PBMsgKeySort(pReflection->MutableMessage(pMsg, pField), option, 1, maxLevel);
// }
//}
//if (pField->is_repeated())
//{
// ListIncremental(pMsgPrev, pMsg, pField, option, 1, 1);
// continue;
//}
//if (PBFieldHasNodeoprOption(pField))
//{
// RemoveField(pMsg, pMsgPrev, pField, option);
//}
//continue;
////value comparing
//if (pField->is_repeated())
//{
// if (pField->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE)
// {
// //not support
// assert(false);
// return -1;
// }
// iRet = ListIncremental(pMsgPrev, pMsg, pField, option, level + 1, maxLevel);
// if (0 != iRet)
// {
// return iRet;
// }
//}
//else
//{
// switch(pField->cpp_type())
// {
// case google::protobuf::FieldDescriptor::CPPTYPE_INT32:
// if (pReflection->GetInt32(*pMsgPrev, pField) == pReflection->GetInt32(*pMsg, pField))
// {
// ClearField(pMsg, pField, option);
// }
// break;
// case google::protobuf::FieldDescriptor::CPPTYPE_INT64:
// if (pReflection->GetInt64(*pMsgPrev, pField) == pReflection->GetInt64(*pMsg, pField))
// {
// ClearField(pMsg, pField, option);
// }
// break;
// case google::protobuf::FieldDescriptor::CPPTYPE_UINT32:
// if (pReflection->GetUInt32(*pMsgPrev, pField) == pReflection->GetUInt32(*pMsg, pField))
// {
// ClearField(pMsg, pField, option);
// }
// break;
// case google::protobuf::FieldDescriptor::CPPTYPE_UINT64:
// if (pReflection->GetUInt64(*pMsgPrev, pField) == pReflection->GetUInt64(*pMsg, pField))
// {
// ClearField(pMsg, pField, option);
// }
// break;
// case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT:
// if (pReflection->GetFloat(*pMsgPrev, pField) == pReflection->GetFloat(*pMsg, pField))
// {
// ClearField(pMsg, pField, option);
// }
// break;
// case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE:
// if (pReflection->GetDouble(*pMsgPrev, pField) == pReflection->GetDouble(*pMsg, pField))
// {
// ClearField(pMsg, pField, option);
// }
// break;
// case google::protobuf::FieldDescriptor::CPPTYPE_STRING:
// if (pReflection->GetStringReference(*pMsgPrev, pField, NULL) == pReflection->GetStringReference(*pMsg, pField, NULL))
// {
// ClearField(pMsg, pField, option);
// }
// break;
// case google::protobuf::FieldDescriptor::CPPTYPE_BOOL:
// if (pReflection->GetBool(*pMsgPrev, pField) == pReflection->GetBool(*pMsg, pField))
// {
// ClearField(pMsg, pField, option);
// }
// break;
// case google::protobuf::FieldDescriptor::CPPTYPE_ENUM:
// if (pReflection->GetEnum(*pMsgPrev, pField) == pReflection->GetEnum(*pMsg, pField))
// {
// ClearField(pMsg, pField, option);
// }
// break;
// case google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE:
// iRet = PBMsgIncremental(pReflection->MutableMessage(pMsgPrev, pField),
// pReflection->MutableMessage(pMsg, pField),
// option, level + 1, maxLevel);
// if (iRet)
// {
// return iRet;
// }
// if (pReflection->MutableMessage(pMsg, pField)->ByteSize() == 0)
// {
// ClearField(pMsg, pField, option);
// }
// break;
// default:
// break;
// }
//}
return 0;
}
int YExtPBMsgKeySort(google::protobuf::Message *pMsg, int option, int maxLevel)
{
return PBMsgKeySort(pMsg, option, 1, maxLevel);
}
}
"""
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,369
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/cmd_ypk_proto.py
|
#! /usr/bin/python
#
from xgen.util import *
from xgen.yxsd import *
import glob
import os
from jinja2 import Environment, DictLoader
# 2.7
# reload(sys)
#import sys
# sys.setdefaultencoding('utf-8')
import logging
logger = logging.getLogger(__name__)
class YPKGenProto(object):
def __init__(self, xsdDir, inBundle):
self.mMods = {}
self.mXsdDir = xsdDir
self.m_bundle = inBundle
self.initenv()
def initenv(self):
mytemplates = {
"proto": PROTO_TEMPLATE,
"ypk": PROTO_YPKTEMPLATE
}
self.mEnv = Environment(loader=DictLoader(
mytemplates), trim_blocks=True, lstrip_blocks=True)
self.mEnv.filters["pbname"] = pbname
def run(self, inDir):
if os.path.exists(inDir) is False:
os.mkdir(inDir)
with open(inDir + "/" + "ypk.proto", 'w') as f:
ypk_output = self.mEnv.get_template('ypk').render()
f.write(ypk_output)
path = self.mXsdDir + '/*.xsd'
for xsd in glob.glob(path):
logger.debug("begin process[proto] %s", xsd)
mod = YModule(xsd)
mod.parse()
modname = mod.m_modname
outfile = inDir + "/" + pbname(modname) + ".proto"
logger.info("generate %s", outfile)
with open(outfile, 'w') as f:
output = self.mEnv.get_template('proto').render(
ymod=mod, ybundle=self.m_bundle)
f.write(output)
cmddescription = 'generate proto from dev yxsd model'
def makeoptions(optparser):
optparser.add_argument(
"--bundle",
type=str,
help="specify yang bundle")
pass
def run(options):
cmd = YPKGenProto(options.input, options.bundle)
cmd.run(options.output)
PROTO_TEMPLATE = """
{% set modpbname = ymod.m_modname | pbname %}
{% set modpbtype = ymod.m_modtype | pbname %}
//auto generated by xgen toolkit, bug mail to zengmao@fiberhome.com
syntax = "proto2";
import "ypk.proto";
package {{ybundle}}PB.{{modpbname}};
{% for name,complextype in ymod.m_complex_type_dict|dictsort %}
{% set ns = namespace(index = 2048) %}
message {{complextype.m_name_pb}}
{
{% for field in complextype.m_fields %}
{% if field.m_key != -1 %}
{{field.m_pboption}} {{field.m_pbtype}} {{field.m_pbname}} = {{field.m_field_index}}[(YPKPB.ykey)=1];
{% elif field.m_nodeopr%}
{{field.m_pboption}} {{field.m_pbtype}} {{field.m_pbname}} = {{field.m_field_index}}[(YPKPB.ynodeopr)=true];
{% else %}
{{field.m_pboption}} {{field.m_pbtype}} {{field.m_pbname}} = {{field.m_field_index}};
{% endif %}
{% endfor %}
{% if complextype.m_fields_key %}
{% set ns.index = ns.index + 1 %}
optional int32 listopr = {{ns.index}} [default = 0];
{% endif %}
{% for field in complextype.m_fields %}
{% if field.m_nodeopr%}
{% set ns.index = ns.index + 1 %}
optional int32 {{field.m_nodeopr}} = {{ns.index}} [default = 0];
{% endif %}
{% endfor %}
}
{% endfor %}
{%for name, rpc in ymod.m_rpcs | dictsort%}
{% if not rpc.m_input %}
message {{rpc.m_name|pbname}}
{
}
{% endif %}
{%endfor%}
"""
PROTO_YPKTEMPLATE = """
syntax = "proto2";
import "google/protobuf/descriptor.proto";
package YPKPB;
extend google.protobuf.FieldOptions {
optional int32 ykey = 10001[default=0];
optional bool ynodeopr = 10002[default=false];
}
"""
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,370
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/cmd_xsdcompare.py
|
#! /usr/bin/python
#
"""
Auto compare xsd file to auto merge xsd
"""
import pdb
import concurrent.futures
import logging
import re
import glob
import os
import shutil
from xml.dom.minidom import parse
import xml.dom.minidom
import xml.etree.ElementTree as ElementTree
logger = logging.getLogger(__name__)
XSDNS = {'xsd': 'http://www.w3.org/2001/XMLSchema',
'w': 'http://www.fiberhome.com.cn/board/control',
'y': 'http://www.fiberhome.com.cn/ns/yang',
'xdo': 'urn:pxp',
'ms': 'urn:schemas-microsoft-com:xslt',
'stack': 'urn:anything',
'xdb': 'http://xmlns.oracle.com/xdb'}
ElementTree.register_namespace('xsd', 'http://www.w3.org/2001/XMLSchema')
ElementTree.register_namespace(
'w', 'http://www.fiberhome.com.cn/board/control')
ElementTree.register_namespace('y', 'http://www.fiberhome.com.cn/ns/yang')
ElementTree.register_namespace('xdo', 'urn:pxp')
ElementTree.register_namespace('ms', 'urn:schemas-microsoft-com:xslt')
ElementTree.register_namespace('stack', 'urn:anything')
ElementTree.register_namespace('xdb', 'http://xmlns.oracle.com/xdb')
class CmdXsdAutoCompare(object):
def __init__(self, in_orig_dir, in_new_dir, out_new_dir):
self.in_orig_dir = in_orig_dir
self.in_new_dir = in_new_dir
self.out_new_dir = out_new_dir
if os.path.exists(self.out_new_dir) is False:
os.mkdir(self.out_new_dir)
self.in_orig_files = [filename for filename in glob.glob(
self.in_orig_dir + '/*.xsd')]
self.in_new_files = [filename for filename in glob.glob(
self.in_new_dir + '/*.xsd')]
@staticmethod
def merge(create_element, new_element):
new_fields = list(new_element.iterfind(
".//xsd:sequence/xsd:element", XSDNS))
create_fields = list(create_element.iterfind(
".//xsd:sequence/xsd:element", XSDNS))
if create_fields is None or new_fields is None:
return
create_seq = list(create_element.iterfind(".//xsd:sequence", XSDNS))
if len(create_seq) == 1:
create_seq = create_seq[0]
create_max_num = 1
has_attr_field_index = False
for create_field in create_fields:
if create_field.get('field_index') is None:
create_field.set('field_index', str(create_max_num))
create_max_num = create_max_num + 1
else:
create_max_num = max(create_max_num, int(
create_field.get('field_index')))
has_attr_field_index = True
if has_attr_field_index is True:
create_max_num = create_max_num + 1
for new_field in new_fields:
is_new_field = True
for create_field in create_fields:
if create_field.attrib['name'] == new_field.attrib['name']:
is_new_field = False
break
if is_new_field is True:
new_field.set('field_index', str(create_max_num))
create_seq.append(new_field)
create_max_num = create_max_num + 1
@staticmethod
def merge_enumeration(create_element, new_element):
new_fields = list(new_element.iterfind(
".//xsd:restriction/xsd:enumeration", XSDNS))
create_fields = list(create_element.iterfind(
".//xsd:restriction/xsd:enumeration", XSDNS))
if not new_fields or not create_fields:
return
create_seq = list(create_element.iterfind(".//xsd:restriction", XSDNS))
if len(create_seq) == 1:
create_seq = create_seq[0]
enum_en_attr = "{%s}en" % (XSDNS['w'])
enum_cn_attr = "{%s}cn" % (XSDNS['w'])
create_max_num = 1
has_attr_field_index = False
for create_field in create_fields:
if create_field.get('field_index') is None:
create_field.set('field_index', str(create_max_num))
create_max_num = create_max_num + 1
else:
create_max_num = max(create_max_num, int(
create_field.get('field_index')))
has_attr_field_index = True
if has_attr_field_index is True:
create_max_num = create_max_num + 1
for new_field in new_fields:
is_new_field = True
for create_field in create_fields:
if create_field.attrib[enum_en_attr] == new_field.attrib[enum_en_attr] \
and create_field.attrib[enum_cn_attr] == new_field.attrib[enum_cn_attr]:
if create_field.attrib['value'] != new_field.attrib['value']:
create_field.set('value', new_field.attrib['value'])
is_new_field = False
break
if is_new_field is True:
new_field.set('field_index', str(create_max_num))
create_seq.append(new_field)
create_max_num = create_max_num + 1
@staticmethod
def setfieldindex(dst_file):
create_tree = ElementTree.parse(dst_file)
create_root = create_tree.getroot()
create_elements = list(
create_root.iterfind(".//xsd:complexType", XSDNS))
for create_element in create_elements:
field_index = 1
create_fields = list(create_element.iterfind(
".//xsd:sequence/xsd:element", XSDNS))
for create_field in create_fields:
create_field.set('field_index', str(field_index))
field_index = field_index + 1
create_elements = list(
create_root.iterfind(".//xsd:simpleType", XSDNS))
for create_element in create_elements:
field_index = 1
create_fields = list(create_element.iterfind(
".//xsd:restriction/xsd:enumeration", XSDNS))
for create_field in create_fields:
create_field.set('field_index', str(
int(create_field.get('value'), base=16)))
create_tree.write(dst_file, encoding='utf-8', xml_declaration=True)
@staticmethod
def setenumerationfieldindex(new_element):
new_fields = list(new_element.iterfind(
".//xsd:restriction/xsd:enumeration", XSDNS))
for new_field in new_fields:
new_field.set('field_index', str(
int(new_field.get('value'), base=16)))
@staticmethod
def setcomplexfieldindex(new_element):
field_index = 1
new_fields = list(new_element.iterfind(
".//xsd:sequence/xsd:element", XSDNS))
for new_field in new_fields:
new_field.set('field_index', str(field_index))
field_index = field_index + 1
def compare(self, new_file, orig_file):
logger.info("compare (%s)<-(%s)", orig_file, new_file)
if orig_file is None:
logger.info("orig file not found, copy new file:(%s)", new_file)
dst_file = shutil.copy(new_file, self.out_new_dir)
CmdXsdAutoCompare.setfieldindex(dst_file)
return
dst_file = shutil.copy(orig_file, self.out_new_dir)
create_tree = ElementTree.parse(dst_file)
create_root = create_tree.getroot()
new_dom_tree = ElementTree.parse(new_file)
new_root = new_dom_tree.getroot()
create_elements = list(
create_root.iterfind(".//xsd:complexType", XSDNS))
new_elements = list(new_root.iterfind(".//xsd:complexType", XSDNS))
for new_element in new_elements:
new_type = True
for create_element in create_elements:
if new_element.attrib['name'] == create_element.attrib['name']:
CmdXsdAutoCompare.merge(create_element, new_element)
new_type = False
break
if new_type is True:
CmdXsdAutoCompare.setcomplexfieldindex(new_element)
create_root.append(new_element)
create_elements = list(
create_root.iterfind(".//xsd:simpleType", XSDNS))
new_elements = list(new_root.iterfind(".//xsd:simpleType", XSDNS))
for new_element in new_elements:
new_type = True
for create_element in create_elements:
if new_element.attrib['name'] == create_element.attrib['name']:
CmdXsdAutoCompare.merge_enumeration(
create_element, new_element)
new_type = False
break
if new_type is True:
CmdXsdAutoCompare.setenumerationfieldindex(new_element)
create_root.append(new_element)
create_tree.write(dst_file, encoding='utf-8', xml_declaration=True)
def run(self):
new_to_orig_file = {}
for filename in self.in_new_files:
base_filename = os.path.basename(filename)
new_to_orig_file[filename] = None
for orig_filename in self.in_orig_files:
if base_filename == os.path.basename(orig_filename):
new_to_orig_file[filename] = orig_filename
break
logger.info(new_to_orig_file)
for key, value in new_to_orig_file.items():
self.compare(key, value)
cmddescription = 'Auto compare xsd file to auto merge xsd'
def makeoptions(optpartser):
optpartser.add_argument(
"--path",
type=str,
default='./xsd_his',
help="specify a old xsd file/directory")
def run(options):
logger.info("xsd_compare param:(input:%s),(output:%s),(his_xsd:%s)",
options.input, options.output, options.path)
cmd = CmdXsdAutoCompare(options.path, options.input, options.output)
cmd.run()
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,371
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/util.py
|
import re
import logging
import shutil
import os
logger = logging.getLogger(__name__)
cppkeyword = ('alignas', 'continue', 'friend', 'register', 'true',
'alignof', 'decltype' 'goto', 'reinterpret_cast', 'try',
'asm', 'default', 'if', 'return', 'typedef',
'auto', 'delete', 'inline', 'short', 'typeid',
'bool', 'do', 'int', 'signed', 'typename',
'break', 'double', 'long', 'sizeof', 'union',
'case', 'dynamic_cast', 'mutable', 'static', 'unsigned',
'catch', 'else', 'namespace', 'static_assert', 'using',
'char', 'enum', 'new', 'static_cast', 'virtual',
'char16_t', 'explicit', 'noexcept', 'struct', 'void',
'char32_t', 'export', 'nullptr', 'switch', 'volatile',
'class', 'extern', 'operator', 'template', 'wchar_t',
'const', 'false', 'private', 'this', 'while',
'constexpr', 'float', 'protected', 'thread_local',
'const_cast', 'for', 'public', 'throw')
cppconst = ('NULL', 'TRUE', 'FALSE', 'True', 'False')
appconst = ('IN', 'OUT', 'interface')
def camel_name(name):
name = name.lower()
name = name.replace('-', '_')
camelName = ''
for nm in name.split('_'):
camelName = camelName + nm.capitalize()
return camelName
def lower_name(name):
name = name.replace('-', '_')
name = name.lower()
return name
def pbname(name):
name = name.replace('-', '_')
name = name.lower()
if name in cppkeyword:
name += '_'
return name
def cppname(name):
name = name.lower()
name = name.replace('-', '_')
camelName = ''
for nm in name.split('_'):
camelName = camelName + nm.capitalize()
return camelName
def cppnormalize(name):
name = re.sub('\s*<\s*', '_LT_', name)
name = re.sub('\s*<=\s*', '_LE_', name)
name = re.sub('\s*>\s*', '_GT_', name)
name = re.sub('\s*>=\s*', '_GE_', name)
name = re.sub('\s*=\s*', '_EQ_', name)
name = re.sub('\s*\+\s*', '_PS_', name)
name = re.sub('\s+', '_', name)
if name[0] in ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'):
name = '_' + name
name = re.sub('[^a-zA-Z0-9_]', '_', name)
if name in cppkeyword:
name += '_'
elif name in cppconst:
name += '_'
elif name in appconst:
name += '_'
return name
def allzero(default):
for ch in default:
if ch != '0':
return False
return True
def ipv4str(i):
ipstr = "%d.%d.%d.%d" % (i & 0xFF, i >> 8 & 0xFF,
i >> 16 & 0xFF, i >> 32 & 0xFF)
return ipstr
def mkdir(inDir, inRemove=False):
dir = os.path.dirname(inDir)
if dir == '':
return
if inRemove and os.path.exists(dir):
shutil.rmtree(inDir)
if os.path.exists(dir):
logger.debug('%s already exist', dir)
else:
os.makedirs(dir)
logger.debug('makedirs %s', dir)
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
_cescape_byte_to_str = ([r'%02X' % i for i in range(0, 32)] +
[r'%2s' % chr(i) for i in range(32, 127)] +
[r'%02X' % i for i in range(127, 256)])
_cescape_byte_to_str_hex = ([r'\%02X' % i for i in range(0, 256)])
def cescape(text, hexed=False):
if hexed:
return ''.join(_cescape_byte_to_str_hex[ord(c)] for c in text)
else:
return ''.join(_cescape_byte_to_str[ord(c)] for c in text)
class EncodeUtil(object):
@staticmethod
def DecodeBigEndian32(buffer):
i = 1
result = ord(buffer[0])
while i < 4:
result = result * 256 + ord(buffer[i])
i = i + 1
return result
@staticmethod
def DecodeLittleEndian32(buffer):
i = 4
result = ord(buffer[3])
while i > 1:
i -= 1
result = result * 256 + ord(buffer[i - 1])
return result
@staticmethod
def EncodeLittleEndian32(i):
buffer = bytearray(4)
buffer[0] = i & 0xFF
buffer[1] = i >> 8 & 0xFF
buffer[2] = i >> 16 & 0xFF
buffer[3] = i >> 24 & 0xFF
return buffer
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,372
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/cmd_protoc.py
|
# -*- coding:utf-8 -*-
import os
import glob
import concurrent.futures
import sys
import os
import logging
logger = logging.getLogger(__name__)
cmddescription = 'compile all *.proto file under input directory into cpp/python... parallel'
def makeoptions(optparser):
optparser.add_argument(
"--protoc",
type=str,
default='protoc.exe',
help="protoc.exe")
optparser.add_argument(
"-p", "--path",
dest="path",
default=[],
action="append",
help="proto include path")
optparser.add_argument(
"--excludefile",
default=[],
action="append",
help="proto exclude file list")
optparser.add_argument(
"--format",
type=str,
default='cpp',
choices=['cpp', 'python'],
help="compiled format")
def run(options):
if os.path.exists(options.output) is False:
os.mkdir(options.output)
pfiles = ''
ipath = ''
for p in options.path:
ipath += '-I' + p + ' '
path = options.input + '/*.proto'
logger.info('protoc for %s start' % (path))
if options.format == 'cpp':
commandprefix = options.protoc + ' --cpp_out=' + options.output + ' ' + ipath
elif options.format == 'python':
commandprefix = options.protoc + ' --python_out=' + options.output + ' ' + ipath
commands = []
for proto in glob.glob(path):
if os.path.basename(proto) in options.excludefile:
continue
pfiles += proto + ' '
if len(pfiles) > 0x400:
command = commandprefix + pfiles
logger.info(command)
commands.append(command)
pfiles = ''
if pfiles != '':
command = commandprefix + pfiles
logger.info(command)
commands.append(command)
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(os.system, commands)
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,373
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/xgen.py
|
import logging
import concurrent.futures
from jinja2 import Environment, DictLoader
from .xtype import *
logger = logging.getLogger(__name__)
class XGenUtil(object):
def capital(inStr):
return inStr[:1].upper() + inStr[1:]
def classname(inXType):
if not isinstance(inXType, XComplexType):
raise XGenException("")
return 'C' + XGenUtil.capital(inXType.m_name)
def memberclass(inObj, inOrig=False):
if not isinstance(inObj, XElement):
raise XGenException("")
name = ''
if isinstance(inObj.m_type_obj, XSimpleType):
if inObj.m_type_obj.m_len == 1:
name = 'BYTE'
elif inObj.m_type_obj.m_len == 2:
name = 'WORD'
elif inObj.m_type_obj.m_len == 4:
name = 'DWORD'
elif inObj.m_type_obj.m_len > 0:
name = 'BYTE'
else:
raise XGenException('unkown simple type len')
elif isinstance(inObj.m_type_obj, XComplexType):
if hasattr(inObj, 'm_while') and inObj.m_while and not inOrig:
name = 'vector<' + XGenUtil.classname(inObj.m_type_obj) + '> '
else:
name = XGenUtil.classname(inObj.m_type_obj)
else:
raise XGenException('unkown inObj.m_type_obj %s for field %s' % (
inObj.m_type_obj, inObj.m_name))
return name
def memberclasspb(inObj):
if not isinstance(inObj, XElement):
raise XGenException("")
name = ''
if isinstance(inObj.m_type_obj, XSimpleType):
if inObj.m_type_obj.m_len in (1, 2, 4):
name = 'int32'
elif inObj.m_type_obj.m_len > 0:
name = 'bytes'
else:
raise XGenException('unkown simple type len')
elif isinstance(inObj.m_type_obj, XComplexType):
name = inObj.m_type_obj.m_name.lower()
else:
raise XGenException('unkown ftype %s', inObj.m_type_obj)
return name
def membername(inObj):
if not isinstance(inObj, XElement):
raise XGenException("")
name = ''
if isinstance(inObj.m_type_obj, XSimpleType):
if inObj.m_type_obj.m_len == 1:
name = 'm_by' + XGenUtil.capital(inObj.m_name)
elif inObj.m_type_obj.m_len == 2:
name = 'm_w' + XGenUtil.capital(inObj.m_name)
elif inObj.m_type_obj.m_len == 4:
name = 'm_dw' + XGenUtil.capital(inObj.m_name)
elif inObj.m_type_obj.m_len > 0:
name = 'm_ar' + XGenUtil.capital(inObj.m_name)
# print (inObj.m_name)
else:
raise XGenException('unkown simple type len')
elif isinstance(inObj.m_type_obj, XComplexType):
if hasattr(inObj, 'm_while') and inObj.m_while:
name = 'm_ar' + XGenUtil.capital(inObj.m_name)
else:
name = 'm_o' + XGenUtil.capital(inObj.m_name)
else:
raise XGenException('unkown ftype %s', inObj.m_type_obj)
return name
def membernamepb(inObj):
if not isinstance(inObj, XElement):
raise XGenException("")
name = ''
if isinstance(inObj.m_type_obj, XSimpleType):
name = inObj.m_name.lower()
elif isinstance(inObj.m_type_obj, XComplexType):
name = inObj.m_name.lower()
else:
raise XGenException('unkown ftype %s', inObj.m_type_obj)
return name
def memberprefixpb(inObj):
if not isinstance(inObj, XElement):
raise XGenException("")
name = ''
if hasattr(inObj, 'm_while') and inObj.m_while:
name = 'repeated'
else:
name = 'optional'
return name
def membersuffix(inObj):
name = ''
if not isinstance(inObj, XElement):
raise XGenException("")
if isinstance(inObj.m_type_obj, XSimpleType):
if inObj.m_type_obj.m_len not in (1, 2, 4) and inObj.m_type_obj.m_len > 0:
name = '[' + str(inObj.m_type_obj.m_len) + ']'
return name
def defaultallzero(inObj):
if not isinstance(inObj, XElement):
raise XGenException("")
if not hasattr(inObj, 'm_default') or not inObj.m_default or len(inObj.m_default) <= 0:
return True
for ch in inObj.m_default:
if ch != '0':
logger.debug('notallzero default:%s' % (inObj.m_default))
return False
return True
def defaulthex(inStr):
hexs = ''
idx = 0
while idx < len(inStr):
if idx % 2:
hexs += inStr[idx]
else:
hexs += r'\x' + inStr[idx]
idx += 1
logger.debug('default %s hexed %s' % (inStr, hexs))
return hexs
class XGenException(Exception):
pass
class XGenField(object):
"""
XGenField
"""
def __init__(self, inField):
self.m_field = inField
pass
def parse(self):
self.x_member_class = XGenUtil.memberclass(self.m_field)
self.x_member_class_orig = XGenUtil.memberclass(self.m_field, True)
self.x_member_class_pb = XGenUtil.memberclasspb(self.m_field)
self.x_member_name = XGenUtil.membername(self.m_field)
self.x_member_name_pb = XGenUtil.membernamepb(self.m_field)
self.x_member_suffix = XGenUtil.membersuffix(self.m_field)
self.x_member_prefix_pb = XGenUtil.memberprefixpb(self.m_field)
if isinstance(self.m_field.m_type_obj, XSimpleType):
self.x_member_default_allzero = XGenUtil.defaultallzero(
self.m_field)
if hasattr(self.m_field, 'm_default') and self.m_field.m_default and self.m_field.m_type_obj.m_len not in (1, 2, 4):
self.x_member_default_hexed = XGenUtil.defaulthex(
self.m_field.m_default)
class XGenComplex(object):
"""
wrapper for XComplexType
"""
def __init__(self, inComplex):
self.m_complex = inComplex
self.x_class = XGenUtil.classname(self.m_complex)
self.x_class_pb = self.m_complex.m_name.lower()
self.x_fields = []
def parse(self):
for field in self.m_complex.m_elements:
xfield = XGenField(field)
xfield.parse()
self.x_fields.append(xfield)
class XGen(object):
"""
XGen, define method which is call by XGen group
"""
def __init__(self, inName):
self.m_name = inName
self.m_result = ''
self.m_env = None
self.m_xgeng = None
self.m_xtypes = []
self.m_xcomplexs = []
self.m_xsimples = []
def xtype_add(self, inXType):
self.m_xtypes.append(inXType)
def parse(self):
for type in self.m_xtypes:
if isinstance(type, XSimpleType):
self.m_xsimples.append(type)
else:
complex = XGenComplex(type)
complex.parse()
self.m_xcomplexs.append(complex)
def render(self):
# self.m_result = xgenenv.render(inTemplate, kwargs)
pass
def generate(self):
self.parse()
self.render()
def fn_gen(inXGen):
return inXGen.generate()
def fn_output_file(inFile, inStr):
inFile.write(inStr)
class XGenG(object):
"""
XGen group, mutithread, output gen result same order as it is added
"""
def __init__(self, maxworkers=None, outputer=None, outputargs=()):
self.m_name = 'XGenG'
self.m_xgens = {}
self.m_xgen_to_future = {}
self.m_executor = concurrent.futures.ThreadPoolExecutor(
max_workers=maxworkers)
if outputer and not callable(outputer):
raise XGenException("outputer must be callable")
self.m_outputer = outputer
self.m_outputargs = outputargs
self.m_parent = None
def xgen_add(self, inXGen):
if inXGen.m_name in self.m_xgens:
raise XGenException("XGen name must be unique")
inXGen.m_xgeng = self
self.m_xgens[inXGen.m_name] = inXGen
def xtype_add(self, inXGenName, inXTypes):
if inXGenName not in self.m_xgens:
raise XGenException("XGen %s not exist" % (inXGenName))
for xtype in inXTypes:
self.m_xgens[inXGenName].xtype_add(xtype)
def generate(self):
for (name, xgen) in self.m_xgens.items():
future = self.m_executor.submit(fn_gen, xgen)
self.m_xgen_to_future[xgen] = future
for (xgen, future) in self.m_xgen_to_future.items():
concurrent.futures.wait([future])
try:
data = future.result()
except Exception as exc:
raise XGenException('%s %s occur an exception: %s' %
(type(self), xgen.m_name, exc))
else:
# logger.debug('%s %s generated: %s' % (type(self), xgen.m_name, xgen.m_result))
logger.debug('%s:%s generated: %s' %
(self.m_name, xgen.m_name, ''))
self.m_outputer(self.m_outputargs, xgen.m_result)
class XGenFile(object):
def __init__(self, inXTree, inDir, inFile):
self.m_xtree = inXTree
self.m_dir = inDir
self.m_filename = inFile
self.m_file = open(self.m_dir + '/' + self.m_filename, 'w')
self.m_xgeng = XGenG(
maxworkers=None, outputer=fn_output_file, outputargs=self.m_file)
# self.m_xgeng = XGenG(maxworkers=1, outputer=fn_output_file, outputargs = self.m_file)
self.m_xgeng.m_parent = self
def mappre(self):
"""
build pre XGen and add it to XGenG
"""
pass
def xgenp(self, inGenName):
"""
build parallel XGen, called by mapparallel()
"""
raise XGenException('you should implement %s' % ('xgenp'))
def paralit(self, inXType):
"""
map XType to parallel XGen of XGenG or not
"""
if inXType.m_refcnt < 1:
return False
return True
def paralmap(self):
"""
return XType list which put int parallel XGens of XGenG
"""
complexs = []
for complex in sorted(self.m_xtree.m_complex_types, key=lambda d: d.m_name.upper()):
if complex.m_refcnt < 1:
continue
if self.paralit(complex):
complexs.append(complex)
return complexs
def mapparallel(self):
"""
build parallel XGen and add it to XGenG
"""
xtypes = self.paralmap()
logger.debug("%s will parallel process %s" %
(type(self).__name__, [d.m_name for d in xtypes]))
total = len(xtypes)
if total < 1:
return
xtypes = xtypes
paranum = 4
if total < 50:
paranum = 1
elif total < 200:
paranum = 2
elif total < 500:
paranum = 3
step = int(total / paranum)
if step < 1:
raise XGenException(
"too much parallel, total[%d], paranum[%d]" % (total, paranum))
i = 0
while i < total:
genname = 'xgenparallel' + str(int(i/step))
xgen = self.xgenp(genname)
self.m_xgeng.xgen_add(xgen)
end = i + step
self.m_xgeng.xtype_add(genname, xtypes[i:end])
logger.debug("%s %s process %s" % (type(self).__name__,
genname, [d.m_name for d in xtypes[i:end]]))
i += step
def mappost(self):
"""
build post XGen and add it to XGenG
"""
pass
def map(self):
"""
build XGens, and map part of inXTree to XGenG
"""
self.mappre()
self.mapparallel()
self.mappost()
def reduce(self):
"""
process part of XTree mapped
"""
self.m_xgeng.generate()
# logger.warning("reduce not implement by subclass")
def generate(self):
self.map()
self.reduce()
self.output()
# logger.info("file generated %s", self.m_filename)
def output(self):
pass
def __del__(self):
self.m_file.close()
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,374
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgenc.py
|
# -*- coding:utf-8 -*-
#! /usr/bin/python
#
import sys
from argparse import ArgumentParser
import os
import logging
import itertools
import glob
import re
import xgen
import json
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
streamHandler.setLevel(logging.INFO)
logger.addHandler(streamHandler)
def load_config(filepath='config.json'):
with open(filepath, 'r') as f:
options = json.load(f)
return options
return []
class Option(object):
def to_json_string(self):
return json.dumps(self, default=lambda obj: obj.__dict__)
def from_json_string(self, json_string):
data = json.loads(json_string)
for key, val in data.items():
setattr(self, key, val)
def load_options(filepath):
cmd_options = {}
options = load_config(filepath)
for item in options:
option = Option()
option.from_json_string(json.dumps(item))
cmd_options[option.command] = option
return cmd_options
def config_logger(options):
fileHandler = logging.FileHandler(filename=options.logfile)
fileHandler.setLevel(options.loglevel)
fileHandler.setFormatter(logging.Formatter(
fmt='[%(asctime)s][%(levelname)s][%(threadName)s][%(filename)s:%(funcName)s:%(lineno)s]%(message)s'))
logger.addHandler(fileHandler)
def main(argv):
filepath = "config.json"
if (len(argv) == 2):
filepath = argv[1]
cmd_options = load_options(filepath)
cmdmodules = {}
for key, options in cmd_options.items():
if options.logfile:
config_logger(options)
try:
module = __import__('xgen.cmd_' + options.command, globals(), locals(), ['*'], 0)
except Exception as e:
logger.error('can not import subcommand (%s)' % options.command)
logger.error('exception: (%s)' % e)
pass
else:
if hasattr(module, 'makeoptions') and hasattr(module, 'run'):
cmdmodules[options.command] = module
else:
pass
logger.info('xgenc command %s start for %s' % (options.command, options.input))
cmdmodules[options.command].run(options)
logger.info('xgenc command %s end for %s' % (options.command, options.input))
if __name__ == '__main__':
main(sys.argv)
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,375
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/cmd_ypk_xsd.py
|
# -*- coding:utf-8 -*-
"""
generate fiberhome ne dev xsd from yang
"""
import pdb
import concurrent.futures
import logging
import re
import glob
import os
import pyang
from pyang.types import EnumerationTypeSpec, IntTypeSpec, StringTypeSpec, EnumTypeSpec, RangeTypeSpec, BooleanTypeSpec
from xgen import util
logger = logging.getLogger(__name__)
class YException(Exception):
pass
def camelname(name):
name = name.lower()
name = name.replace('-', '_')
camelName = ''
for nm in name.split('_'):
camelName += nm.capitalize()
return camelName
class YWrapper:
@staticmethod
def type_spec(stmt):
if stmt.keyword == 'type':
type_stmt = stmt
else:
type_stmt = stmt.search_one('type')
if hasattr(type_stmt, 'i_typedef') and type_stmt.i_typedef is not None:
typedef_stmt = type_stmt.i_typedef
return YWrapper.type_spec(typedef_stmt)
elif hasattr(type_stmt, 'i_type_spec'):
return type_stmt.i_type_spec
else:
return None
@staticmethod
def count_grouping_uses(stmt):
for stmt_uses in stmt.search('uses'):
# if stmt_uses.i_module != stmt_uses.i_grouping.i_module:
# continue
if hasattr(stmt_uses.i_grouping, 'x_refcount'):
stmt_uses.i_grouping.x_refcount += 1
else:
stmt_uses.i_grouping.x_refcount = 1
if hasattr(stmt, 'i_children'):
for ch in stmt.i_children:
YWrapper.count_grouping_uses(ch)
@staticmethod
def nodename(stmt):
fname = stmt.arg
renamestmt = stmt.search_one(('yymapping', 'rename'))
if renamestmt:
fname = renamestmt.arg
return fname
@staticmethod
def keyseq(stmt):
def attrsearch(tag, attr, list):
for x in list:
if getattr(x, attr) == tag:
return x
return None
keyi = 0
if hasattr(stmt, 'parent') and stmt.parent.keyword == 'list':
key = stmt.parent.search_one('key')
if not key:
return 0
for x in key.arg.split():
if x == '':
continue
if x.find(":") == -1:
name = x
else:
[prefix, name] = x.split(':', 1)
keyi += 1
ptr = attrsearch(name, 'arg', stmt.parent.i_children)
if ptr is stmt:
return keyi
return 0
class YMGen(object):
def __init__(self, inModule, inXsdFile, inExceptionOnDuplicate):
self.m_module = inModule
self.m_xsdfile = inXsdFile
self.m_file = open(inXsdFile, "w+", encoding="utf-8")
self.m_enums = {}
self.m_groupings = []
self.m_rpcs = {}
self.m_complex_types = {}
self.m_exception_on_duplicate = inExceptionOnDuplicate
self.m_expand_default = False
stmt = self.m_module.search_one(('yymapping', 'expanddefault'))
if stmt:
if stmt.arg == 'expand':
self.m_expand_default = True
elif stmt.arg == 'noexpand':
self.m_expand_default = False
def __del__(self):
self.m_file.close()
def gen(self):
self.m_file.write(self.header())
self.m_file.write(self.topgen())
self.m_file.write(self.rpcgen())
self.m_file.write(self.notificationgen())
for name in sorted(self.m_complex_types.keys(), key=lambda d: d.upper()):
self.m_file.write(self.m_complex_types[name])
self.m_file.write(self.enumgen())
self.m_file.write(self.footer())
def header(self):
text = """<?xml version="1.0" encoding="utf-8"?>
<!--auto generated by xgen toolkit, bug mail to zengmao@fiberhome.com -->
<xsd:schema xmlns:xdo="urn:pxp" xmlns:ms="urn:schemas-microsoft-com:xslt" xmlns:stack="urn:anything" xmlns:xdb="http://xmlns.oracle.com/xdb" xmlns:w="http://www.fiberhome.com.cn/board/control" xmlns:y="http://www.fiberhome.com.cn/ns/yang" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:complexType name="YANGModules">
<xsd:sequence>
"""
text += '\t\t\t<xsd:element name="%s" type="%s"/>' % (
self.m_module.arg, self.m_module.arg)
text += """
</xsd:sequence>
</xsd:complexType>
"""
rpcs = [ch for ch in self.m_module.i_children if ch.keyword == "rpc"]
if len(rpcs):
text += """
<xsd:complexType name="YANGRpcs">
<xsd:sequence>
"""
for rpc in rpcs:
text += '\t\t\t<xsd:element name="%s" y:ns="%s" ' % (
rpc.arg, self.m_module.search_one('namespace').arg)
if rpc.search_one('input'):
text += 'y:input="%s-input" ' % (rpc.arg)
if rpc.search_one('output'):
text += 'y:output="%s-output"' % (rpc.arg)
text += '/>\n'
text += """
</xsd:sequence>
</xsd:complexType>
"""
notifys = [
ch for ch in self.m_module.i_children if ch.keyword == "notification"]
if len(notifys):
text += """
<xsd:complexType name="YANGNotifys">
<xsd:sequence>
"""
for notify in notifys:
text += '\t\t\t<xsd:element name="%s" type="%s" y:ns="%s" ' % (
notify.arg, notify.arg, self.m_module.search_one('namespace').arg)
text += '/>\n'
text += """
</xsd:sequence>
</xsd:complexType>
"""
return text
def footer(self):
text = """
<xsd:simpleType name="string">
<xsd:restriction base="xsd:string">
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="int32">
<xsd:restriction base="xsd:hexBinary">
<xsd:length value="4"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="uint32">
<xsd:restriction base="xsd:hexBinary">
<xsd:length value="4"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="int64">
<xsd:restriction base="xsd:hexBinary">
<xsd:length value="8"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="uint64">
<xsd:restriction base="xsd:hexBinary">
<xsd:length value="8"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="boolean">
<xsd:restriction base="xsd:hexBinary">
<xsd:length value="1"/>
<xsd:enumeration value="00" w:en="false" w:cn="false"/>
<xsd:enumeration value="01" w:en="true" w:cn="false"/>
</xsd:restriction>
</xsd:simpleType>
</xsd:schema>
"""
return text
def topgen(self):
text = '\t<xsd:complexType name="%s" y:ns="%s">\n' % (
self.m_module.arg, self.m_module.search_one('namespace').arg)
text += '\t\t<xsd:sequence>\n'
for ch in self.children(self.m_module):
text += self.fieldgen(ch, "/")
text += '\t\t</xsd:sequence>\n'
text += '\t</xsd:complexType>\n\n'
return text
def rpcgen(self):
text = ''
rpcs = [ch for ch in self.m_module.i_children if ch.keyword == "rpc"]
for rpc in rpcs:
rpc_input = rpc.search_one('input')
if rpc_input:
text += '\t<xsd:complexType name="%s-input">\n' % (rpc.arg)
text += '\t\t<xsd:sequence>\n'
for ch in rpc_input.i_children:
if ch.keyword in pyang.statements.data_definition_keywords:
text += self.fieldgen(ch, '')
text += '\t\t</xsd:sequence>\n'
text += '\t</xsd:complexType>\n\n'
rpc_output = rpc.search_one('output')
if rpc_output:
text += '\t<xsd:complexType name="%s-output" y:ns="%s">\n' % (
rpc.arg, self.m_module.search_one('namespace').arg)
text += '\t\t<xsd:sequence>\n'
for ch in rpc_output.i_children:
if ch.keyword in pyang.statements.data_definition_keywords:
text += self.fieldgen(ch, '/')
text += '\t\t</xsd:sequence>\n'
text += '\t</xsd:complexType>\n\n'
return text
def enumgen(self):
text = ''
for (enumname, enumtype) in sorted(self.m_enums.items(), key=lambda e: e[0].upper()):
text += '\t<xsd:simpleType name="%s">\n' % enumname
text += '\t\t<xsd:restriction base="xsd:hexBinary">\n'
text += '\t\t\t<xsd:length value="1"/>\n'
for enum in enumtype.enums:
hval = hex(int(enum[1])).replace("0x", "")
hval = hval.upper()
text += '\t\t\t\t<xsd:enumeration value="%s" w:en="%s" w:cn="%s"/>\n' % (
hval, enum[0], enum[0])
text += '\t\t</xsd:restriction>\n'
text += '\t</xsd:simpleType>\n\n'
return text
def nodegen(self, stmt):
nname = YWrapper.nodename(stmt)
text = '\t<xsd:complexType name="%s">\n' % (nname)
text += '\t\t<xsd:sequence>\n'
for ch in self.children(stmt):
text += self.fieldgen(ch, "")
text += '\t\t</xsd:sequence>\n'
text += '\t</xsd:complexType>\n\n'
if nname in self.m_complex_types and not stmt.search_one(('yymapping', 'override')):
if self.m_exception_on_duplicate:
raise YException("%s duplicate@ %s" %
(nname, stmt.i_module.arg))
else:
logger.error("%s duplicate", nname)
self.m_complex_types[nname] = text
def fieldgen(self, stmt, ppath):
text = ''
if stmt.keyword == 'uses':
text = self.usesgen(stmt)
elif stmt.keyword == 'leaf':
text = self.leafgen(stmt, ppath)
elif stmt.keyword == 'container':
text = self.containergen(stmt, ppath)
elif stmt.keyword == 'list':
text = self.listgen(stmt, ppath)
elif stmt.keyword == 'leaf-list':
text = self.leaflistgen(stmt, ppath)
elif stmt.keyword == 'notification':
text = self.notificationgen(stmt, ppath)
return text
def usesgen(self, stmt):
gname = YWrapper.nodename(stmt.i_grouping)
renamestmt = stmt.search_one(('yymapping', 'rename'))
if renamestmt:
fname = renamestmt.arg
else:
fname = gname
text = '\t\t\t<xsd:element name="%s" type="%s"/>\n' % (fname, gname)
return text
def leafgen(self, stmt, ppath):
text = '\t\t\t<xsd:element name="%s" ' % (YWrapper.nodename(stmt))
if hasattr(stmt, 'i_augment'):
b = stmt.i_module.search_one('belongs-to')
if b is not None:
ns = stmt.i_module.i_ctx.get_module(
b.arg).search_one('namespace')
else:
ns = stmt.i_module.search_one('namespace')
text += 'y:ns="' + ns.arg + '" '
typename = 'string'
typespec = YWrapper.type_spec(stmt)
# if stmt.arg == 'prefix-ipv4':
# pdb.set_trace()
if typespec is not None:
if isinstance(typespec, IntTypeSpec) or isinstance(typespec, RangeTypeSpec):
if typespec.name in ('int8', 'int16', 'int32', 'uint8', 'uint16'):
typename = 'int32'
elif typespec.name in ('uint32'):
typename = 'uint32'
elif typespec.name in ('int64'):
typename = 'int64'
elif typespec.name in ('uint64'):
typename = 'uint64'
else:
typename = 'string'
elif isinstance(typespec, StringTypeSpec):
typename = 'string'
elif isinstance(typespec, BooleanTypeSpec):
typename = 'boolean'
elif isinstance(typespec, EnumTypeSpec):
typestmt = stmt.search_one('type')
typename = typestmt.arg
renamestmt = typestmt.search_one(('yymapping', 'rename'))
if renamestmt:
typename = renamestmt.arg
elif typename == 'enumeration':
typename = YWrapper.nodename(stmt)
typename = 'E' + typename
elif typename.find(":") == -1:
prefix = None
if stmt.i_module is self.m_module:
typename = 'E' + camelname(typename)
else:
typename = 'E' + \
camelname(stmt.i_module.i_prefix) + \
'_' + camelname(typename)
else:
[prefix, typename] = typename.split(':', 1)
typename = 'E' + \
camelname(prefix) + '_' + camelname(typename)
if typename in self.m_enums:
if typespec is self.m_enums[typename]:
logger.debug('enum %s refed multi time' % (typename))
else:
raise YException('enum %s refined' % (typename))
else:
self.m_enums[typename] = typespec
text += 'type="%s" ' % (typename)
# if hasattr(stmt, 'i_is_key'):
# keyi = 1
# for ptr in stmt.parent.i_key:
# if ptr is stmt:
# break
# keyi += 1
keyi = YWrapper.keyseq(stmt)
if keyi > 0:
text += 'y:key="%s" ' % keyi
elif stmt.search_one('mandatory'):
if stmt.search_one('mandatory').arg != 'false':
text += 'y:leafmand="" '
if YWrapper.nodename(stmt) != stmt.arg:
text += 'y:leafname="%s " ' % (stmt.arg)
if ppath != "":
text += 'y:path="' + ppath + '" '
if not hasattr(stmt, 'i_is_key'):
if stmt.search_one(('yymapping', 'nodeopr')) or stmt.search_one('must'):
text += 'y:nodeopr="" '
text += '/>\n'
return text
def leaflistgen(self, stmt, ppath):
# self.nodegen(stmt)
nname = YWrapper.nodename(stmt)
typespec = YWrapper.type_spec(stmt)
typename = "string"
if typespec is not None:
if isinstance(typespec, IntTypeSpec) or isinstance(typespec, RangeTypeSpec):
if typespec.name in ('int8', 'int16', 'int32', 'uint8', 'uint16'):
typename = 'int32'
elif typespec.name in ('uint32'):
typename = 'uint32'
elif typespec.name in ('int64'):
typename = 'int64'
elif typespec.name in ('uint64'):
typename = 'uint64'
else:
typename = 'string'
elif isinstance(typespec, StringTypeSpec):
typename = 'string'
elif isinstance(typespec, BooleanTypeSpec):
typename = 'boolean'
elif isinstance(typespec, EnumTypeSpec):
typestmt = stmt.search_one('type')
typename = typestmt.arg
renamestmt = typestmt.search_one(('yymapping', 'rename'))
if renamestmt:
typename = renamestmt.arg
elif typename == 'enumeration':
typename = YWrapper.nodename(stmt)
typename = 'E' + typename
elif typename.find(":") == -1:
prefix = None
if stmt.i_module is self.m_module:
typename = 'E' + camelname(typename)
else:
typename = 'E' + \
camelname(stmt.i_module.i_prefix) + \
'_' + camelname(typename)
else:
[prefix, typename] = typename.split(':', 1)
typename = 'E' + \
camelname(prefix) + '_' + camelname(typename)
if typename in self.m_enums:
if typespec is self.m_enums[typename]:
logger.debug('enum %s refed multi time' % (typename))
else:
raise YException('enum %s refined' % (typename))
else:
self.m_enums[typename] = typespec
text = '\t\t\t<xsd:element name="%s" type="%s" y:list="" y:path="%s"' % (
nname, typename, ppath + stmt.arg)
if hasattr(stmt, 'i_augment'):
b = stmt.i_module.search_one('belongs-to')
if b is not None:
ns = stmt.i_module.i_ctx.get_module(
b.arg).search_one('namespace')
else:
ns = stmt.i_module.search_one('namespace')
text += ' y:ns="' + ns.arg + '" '
text += "/>\n"
return text
def notificationgen(self):
# if stmt.i_children:
# self.nodegen(stmt)
# nname = YWrapper.nodename(stmt)
# text = '\t\t\t<xsd:element name="%s" type="%s" y:notification=""' % (nname, nname)
text = ''
notifications = [
ch for ch in self.m_module.i_children if ch.keyword == "notification"]
for notification in notifications:
text += '\t<xsd:complexType name="%s">\n' % (notification.arg)
text += '\t\t<xsd:sequence>\n'
for ch in notification.i_children:
if ch.keyword in pyang.statements.data_definition_keywords:
text += self.fieldgen(ch, '')
text += '\t\t</xsd:sequence>\n'
text += '\t</xsd:complexType>\n\n'
return text
def listgen(self, stmt, ppath):
self.nodegen(stmt)
nname = YWrapper.nodename(stmt)
text = '\t\t\t<xsd:element name="%s" type="%s" y:list="" y:path="%s"' % (
nname, nname, ppath + stmt.arg)
if hasattr(stmt, 'i_augment'):
b = stmt.i_module.search_one('belongs-to')
if b is not None:
ns = stmt.i_module.i_ctx.get_module(
b.arg).search_one('namespace')
else:
ns = stmt.i_module.search_one('namespace')
text += ' y:ns="' + ns.arg + '" '
text += "/>\n"
return text
def containergen(self, stmt, ppath):
nname = YWrapper.nodename(stmt)
text = '\t\t\t<xsd:element name="%s" type="%s" ' % (nname, nname)
expand = self.m_expand_default
if hasattr(stmt, 'i_augment'):
b = stmt.i_module.search_one('belongs-to')
if b is not None:
ns = stmt.i_module.i_ctx.get_module(
b.arg).search_one('namespace')
else:
ns = stmt.i_module.search_one('namespace')
text += 'y:ns="' + ns.arg + '" '
expand = False
if stmt.search_one(('yymapping', 'nodeopr')) or stmt.search_one('must'):
text += ' y:nodeopr="" '
expand = False
elif stmt.search_one(('yymapping', 'noexpand')):
expand = False
elif stmt.search_one(('yymapping', 'expand')):
expand = True
elif ppath == '/':
expand = False
elif stmt.search_one('when'):
expand = False
else:
for c in stmt.i_children:
if c.keyword == 'leaf':
m = c.search_one('mandatory')
if m and m.arg != 'false':
expand = False
break
if expand:
text = ''
for ch in stmt.i_children:
text += self.fieldgen(ch, ppath + stmt.arg + '/')
else:
text += 'y:path="' + ppath + stmt.arg + '"/>\n'
self.nodegen(stmt)
return text
def children(self, stmt):
"""
return uses, leaf,leaflist,container,list statment list, same order as they appear in yang file
"""
chs = []
if hasattr(stmt, 'i_children'):
usess = []
if stmt.keyword != 'module':
for uses in stmt.search('uses'):
if not uses.i_grouping.search_one(('yymapping', 'noexpand')):
if not hasattr(uses.i_grouping, 'x_refcount'):
continue
if uses.i_grouping.x_refcount < 2 or len(uses.i_grouping.i_children) < 10:
continue
if uses.search_one(('yymapping', 'expand')):
continue
usess.append(uses)
logger.debug('uses %s as bundle @%s',
uses.i_grouping.arg, uses.pos)
if uses.i_grouping not in self.m_groupings:
self.m_groupings.append(uses.i_grouping)
self.nodegen(uses.i_grouping)
gchs = []
for uses in usess:
gchs.extend([s.arg for s in uses.i_grouping.i_children])
if len(usess):
logger.debug("%s grouping children %s", stmt.arg, gchs)
gchsshot = []
for ch in stmt.i_children:
if ch.keyword not in pyang.statements.data_definition_keywords:
continue
if ch.arg in gchs:
# grouping child
if ch.arg not in gchsshot:
for uses in usess:
if ch.arg in [s.arg for s in uses.i_grouping.i_children]:
gchsshot.extend(
[s.arg for s in uses.i_grouping.i_children])
chs.append(uses)
logger.debug("%s shot grouping %s",
stmt.arg, uses.i_grouping.arg)
break
else:
logger.debug("%s not in grouping %s %s", ch.arg, uses.i_grouping.arg, [
s.arg for s in uses.i_grouping.i_children])
else:
# logger.debug("%s %s's grouping already shot", stmt.arg, ch.arg)
pass
else:
chs.append(ch)
return chs
class CmdYXsd(object):
def __init__(self, inDir, inPath, inExceptionOnDuplicate, inWithWarning):
repos = pyang.FileRepository(inPath, no_path_recurse=True)
self.m_ctx = pyang.Context(repos)
self.m_modules = []
self.m_filenames = []
self.m_modulenames = []
self.m_expanddefault = False
self.m_exception_on_duplicate = inExceptionOnDuplicate
self.m_with_warning = inWithWarning
path = inDir + '/*.yang'
r = re.compile(r"^(.*?)(\@(\d{4}-\d{2}-\d{2}))?\.(yang|yin)$")
for filename in glob.glob(path):
fd = open(filename, "r", encoding="utf-8")
text = fd.read()
m = r.search(filename)
self.m_ctx.yin_module_map = {}
if m is not None:
(name, _dummy, rev, format) = m.groups()
name = os.path.basename(name)
module = self.m_ctx.add_module(filename, text, format, name, rev,
expect_failure_error=False)
else:
module = self.m_ctx.add_module(filename, text)
if module and not module.search_one('belongs-to'):
chs = [ch for ch in module.i_children
if ch.keyword in pyang.statements.data_definition_keywords or ch.keyword in ('rpc', 'notification')]
if len(chs):
self.m_modules.append(module)
self.m_modulenames.append(module.arg)
self.m_filenames.append(filename)
self.m_ctx.validate()
def keyfun(e):
if e[0].ref == self.m_filenames[0]:
return 0
else:
return 1
self.m_ctx.errors.sort(key=lambda e: (e[0].ref, e[0].line))
if len(self.m_filenames) > 0:
# first print error for the first filename given
self.m_ctx.errors.sort(key=keyfun)
haserror = False
for (epos, etag, eargs) in self.m_ctx.errors:
if (self.m_ctx.implicit_errors == False and
hasattr(epos.top, 'i_modulename') and
epos.top.arg not in self.m_modulenames and
epos.top.i_modulename not in self.m_modulenames and
epos.ref not in self.m_filenames):
# this module was added implicitly (by import); skip this error
# the code includes submodules
continue
elevel = pyang.error.err_level(etag)
if pyang.error.is_warning(elevel):
kind = "warning"
if self.m_with_warning:
logger.error(str(epos) + ': %s: ' % kind +
pyang.error.err_to_str(etag, eargs) + '\n')
else:
kind = "error"
haserror = True
logger.error(str(epos) + ': %s: ' % kind +
pyang.error.err_to_str(etag, eargs) + '\n')
if haserror:
raise YException(
'some errors occur in yang modules, error details refer to log please')
for module in self.m_modules:
YWrapper.count_grouping_uses(module)
def run(self, inDir):
if os.path.exists(inDir) is False:
os.mkdir(inDir)
self.emit(self.m_modules, inDir)
def emit(self, modules, xsddir):
for module in modules:
# yxsdfile = xsddir + module.arg + ".xsd"
yxsdfile = os.path.join(xsddir, module.arg + ".xsd")
fd = open(yxsdfile, "w+", encoding="utf-8")
logger.info('generate %s', yxsdfile)
yGen = YMGen(module, yxsdfile, self.m_exception_on_duplicate)
yGen.gen()
cmddescription = 'generate fiberhome dev ne xsd model from yang'
def makeoptions(optpartser):
optpartser.add_argument(
"-p", "--path",
dest="path",
default=[],
action="append",
help=os.pathsep + "-separated search path for yang modules")
optpartser.add_argument(
"--with-warning",
action='store_true',
default=False,
help="log warning info for yang invalidation")
optpartser.add_argument(
"--exception-on-duplicate",
action='store_true',
default=False,
help="raise exception when complexType duplicate")
def run(options):
options.path = os.pathsep.join(options.path)
if len(options.path) == 0:
options.path = "."
else:
options.path += os.pathsep + "."
cmd = CmdYXsd(options.input, options.input + os.pathsep + options.path,
options.exception_on_duplicate, options.with_warning)
cmd.run(options.output)
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,376
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/xtype.py
|
#! /usr/bin/python
#
"""
fiberhome xsd file parser which parse:
w:pbout mark a complexType as a ne config block type
w:pbinc mark a complexType as a ne config block list item type
w:pbcommon mark a complexType as common type
w:en field english name
w:cn field chinese name
w:index field index which equal with protobuf message field no
w:if mark field type is a block complexType, and its id
w:nopb mark field donnot seriealized to protobuf message
w:while mark field as list
w:key mark field as list key
w:range mark field value range
w:show mark field show pattern
w:hide mark field not show
w:bittype mark field bittype
"""
from xml.etree import ElementTree
import re
import logging
import traceback
from . import util
XSDNS = {'xsd': 'http://www.w3.org/2001/XMLSchema',
'w': 'http://www.fiberhome.com.cn/board/control',
'y': 'http://www.fiberhome.com.cn/ns/yang'}
W_PBOUT = '{%s}pbout' % XSDNS['w']
W_PBINC = '{%s}pbinc' % XSDNS['w']
W_PBCOMMON = '{%s}pbcommon' % XSDNS['w']
W_EN = '{%s}en' % XSDNS['w']
W_CN = '{%s}cn' % XSDNS['w']
W_INDEX = '{%s}index' % XSDNS['w']
W_NOPB = '{%s}nopb' % XSDNS['w']
W_WHILE = '{%s}while' % XSDNS['w']
W_KEY = '{%s}key' % XSDNS['w']
W_RANGE = '{%s}range' % XSDNS['w']
W_SHOW = '{%s}show' % XSDNS['w']
W_HIDE = '{%s}hide' % XSDNS['w']
W_IF = '{%s}if' % XSDNS['w']
W_BITTYPE = '{%s}bittype' % XSDNS['w']
logger = logging.getLogger(__name__)
class XTypeException(Exception):
pass
class XEnum(object):
"""
xsd simpleType/restriction/enumeration
"""
def __init__(self, inValue, inEn=None, inCn=None):
self.m_value = inValue
self.m_en = inEn
self.m_cn = inCn
class XElement(object):
"""
xsd complexType/sequence/element
"""
def __init__(self):
self.m_name = ''
self.m_type = ''
self.m_en = ''
self.m_cn = ''
self.m_nopb = False
self.m_type_obj = None
self.m_parent = None
def parse(self, inXmlElement):
self.m_name = inXmlElement.attrib['name']
self.m_type = inXmlElement.attrib['type']
if W_EN in inXmlElement.attrib:
self.m_en = inXmlElement.attrib[W_EN]
if W_CN in inXmlElement.attrib:
self.m_cn = inXmlElement.attrib[W_CN]
if W_NOPB in inXmlElement.attrib:
self.m_nopb = True
class XElementBlock(XElement):
"""
xsd complexType[@name = DataBlockConfig]/sequence/element
"""
def __init__(self):
super().__init__()
self.m_no = None
def parse(self, inXmlElement):
super().parse(inXmlElement)
self.parse_block_id(inXmlElement)
def parse_block_id(self, inXmlElement):
if W_IF in inXmlElement.attrib:
strid = inXmlElement.attrib[W_IF]
hexid = re.search(".*Index.*\'([a-zA-Z0-9]*)\'.*", strid).group(1)
self.m_no = int(hexid, base=16)
class XElementField(XElement):
"""
xsd complexType[@pbcommon or @pbinc or @pbout]/sequence/element
"""
def __init__(self):
super().__init__()
self.m_default = None
self.m_range = None
self.m_show = None
self.m_key = 0
self.m_hide = False
self.m_index = None
self.m_while = None
self.m_bittype = None
self.m_while_for = None
self.m_child_has_key = False
def parse(self, inXmlElement):
super().parse(inXmlElement)
if 'default' in inXmlElement.attrib:
self.m_default = inXmlElement.attrib['default']
if W_RANGE in inXmlElement.attrib:
self.m_range = inXmlElement.attrib[W_RANGE]
if W_SHOW in inXmlElement.attrib:
self.m_show = inXmlElement.attrib[W_SHOW]
if W_KEY in inXmlElement.attrib:
# print(inXmlElement.items())
try:
self.m_key = int(inXmlElement.attrib[W_KEY])
except Exception as exc:
self.m_key = 1
if self.m_key < 1:
raise XTypeException(
"field %s's w:key attribute must > 0" % (self.m_name))
if W_HIDE in inXmlElement.attrib:
# print(inXmlElement.items())
self.m_hide = True
if W_INDEX in inXmlElement.attrib:
# print(inXmlElement.items())
self.m_index = int(inXmlElement.attrib[W_INDEX])
if W_WHILE in inXmlElement.attrib:
self.m_while = inXmlElement.attrib[W_WHILE]
if re.match('^\.\./[a-zA-Z_][a-zA-Z0-9_]*$', self.m_while) is None and re.match('^[1-9][0-9]*$', self.m_while) is None:
raise XTypeException(
"complexType %s field %s attribute w:while invalid, must be '../fieldname'" % (self.m_parent.m_name, self.m_name))
if self.m_while and self.m_key:
raise XTypeException("complexType %s field %s with w:key should be normal field(without w:while)" % (
self.m_parent.m_name, self.m_name))
if W_BITTYPE in inXmlElement.attrib:
self.m_bittype = inXmlElement.attrib[W_BITTYPE]
class XType(object):
"""
base class of simpleType/complexType
"""
def __init__(self, inName):
if not re.match('^[a-zA-Z0-9_]*$', inName):
raise XTypeException(
'simpleType/complexType name "%s" invalid' % (inName))
self.m_name = inName
self.m_refcnt = 0
self.m_root = None
self.m_refedby = []
def ref(self, refedby):
self.m_refcnt += 1
if refedby is not self:
self.m_refedby.append(refedby)
logger.debug("%s refed by %s" % (self.m_name, refedby.m_name))
def refed(self):
return self.m_refcnt > 0
def refcnt(self):
return self.m_refcnt
def parse(self, inXmlElement):
logger.debug("%s parse" % (self.m_name))
pass
def invalidate(self):
"""
invalidate itself elements
"""
logger.debug("%s invalidate" % (self.m_name))
def invalidate_post_1(self):
"""
generate cross object
"""
logger.debug("%s invlidate post 1" % (self.m_name))
pass
def invalidate_post_2(self):
"""
invalidate depend on cross object
"""
logger.debug("%s invlidate post 2" % (self.m_name))
pass
def invalidate_post(self):
logger.debug("%s invalidate post" % (self.m_name))
# logger.debug("".join(traceback.format_stack()))
self.invalidate_post_1()
self.invalidate_post_2()
class XSimpleType(XType):
def __init__(self, inName):
super().__init__(inName)
self.m_len = 0
def parse(self, inXmlElement):
super().parse(inXmlElement)
restriction = inXmlElement.find(
".//xsd:restriction[@base='xsd:hexBinary']", XSDNS)
if restriction is None:
return
length = restriction.find(".//xsd:length", XSDNS)
if length is None:
# raise XTypeException("simpleType must has length info except enumeration, error simpleType %s" % name)
return
self.m_len = int(length.attrib['value'])
class XSimpleTypeEnum(XSimpleType):
def __init__(self, inName):
super().__init__(inName)
self.m_enums = {}
def parse(self, inXmlElement):
super().parse(inXmlElement)
enums = list(inXmlElement.iterfind(
".//xsd:restriction/xsd:enumeration", XSDNS))
for enum in enums:
value = int(enum.attrib["value"], base=16)
en = enum.attrib[W_EN]
cn = None
if W_CN in enum.attrib:
cn = enum.attrib[W_CN]
else:
logger.warning(
"enumeration simpleType %s:%d has no w:cn attribute", self.m_name, value)
enumElement = XEnum(value, inEn=en, inCn=cn)
self.enum_add(enumElement)
def enum_add(self, inEnum):
if inEnum.m_value in self.m_enums:
raise XTypeException("enumeration simpleType %s value %d duplicate" % (
self.m_name, inEnum.m_value))
if inEnum.m_en in [x.m_en for x in self.m_enums.values()]:
raise XTypeException(
"enumeration simpleType %s w:en %s duplicate" % (self.m_name, inEnum.m_en))
if inEnum.m_cn in [x.m_cn for x in self.m_enums.values()]:
raise XTypeException(
"enumeration simpleType %s w:cn %s duplicate" % (self.m_name, inEnum.m_cn))
self.m_enums[inEnum.m_value] = inEnum
class XComplexType(XType):
def __init__(self, inName):
super().__init__(inName)
self.m_elements = []
def element_add(self, inElement):
self.m_elements.append(inElement)
inElement.m_parent = self
def invalidate(self):
super().invalidate()
if len(self.m_elements) == 0:
raise XTypeException("complexType %s has no field", (self.m_name))
fnames = []
for field in self.m_elements:
if not re.match('^[a-zA-Z0-9_]*$', field.m_name):
raise XTypeException(
'complexTpe %s field "%s" name invalid' % (self.m_name, field.m_name))
fname = field.m_name.upper()
if fname in fnames:
raise XTypeException(
"complexType %s field %s name duplicate" % (self.m_name, field.m_name))
fnames.append(fname)
def invalidate_post_1(self):
super().invalidate_post_1()
for field in self.m_elements:
# invalidate type and generate m_type_obj
ftype = None
if field.m_type in self.m_root.m_simple_type_dict:
ftype = self.m_root.m_simple_type_dict[field.m_type]
elif field.m_type in self.m_root.m_complex_type_dict:
ftype = self.m_root.m_complex_type_dict[field.m_type]
if not ftype.refed():
# logger.debug("post 1 invalidate complexType %s" % (field.m_type))
ftype.invalidate_post()
else:
raise XTypeException("complexType %s field %s's type %s not defined" % (
self.m_name, field.m_name, field.m_type))
ftype.ref(self)
field.m_type_obj = ftype
def dependency(self, depends):
for field in self.m_elements:
if field.m_type_obj in depends:
continue
depends.append(field.m_type_obj)
if isinstance(field.m_type_obj, XComplexType):
field.m_type_obj.dependency(depends)
class XComplexTypePB(XComplexType):
def __init__(self, inName):
super().__init__(inName)
self.m_has_key_field = False
def parse(self, inXmlElement):
elements = list(inXmlElement.iterfind(
".//xsd:sequence/xsd:element", XSDNS))
for element in elements:
xelement = XElementField()
self.element_add(xelement)
xelement.parse(element)
def invalidate(self):
super().invalidate()
# invalidate index
indexFirst = self.m_elements[0].m_index
indexs = {}
for field in self.m_elements:
if (indexFirst is not None and field.m_index is None) or (indexFirst is None and field.m_index is not None):
# print(field.m_index, indexFirst)
raise XTypeException("complexType %s field must all have w:index attribute, or no one field has w:index attribute, please check field %s first!" % (
self.m_name, field.m_name))
if field.m_index:
if field.m_index in indexs:
raise XTypeException("complexType %s field %s w:index duplicate with field %s!" % (
self.m_name, field.m_name, indexs[field.m_index].m_name))
indexs[field.m_index] = field
if field.m_key and not field.m_nopb:
self.m_has_key_field = True
index = 1
if indexFirst is None:
for field in self.m_elements:
field.m_index = index
index += 1
# print(field.m_index, index)
def invalidate_post_1(self):
super().invalidate_post_1()
for field in self.m_elements:
if field.m_bittype:
if field.m_bittype in self.m_root.m_simple_type_dict:
ftype = self.m_root.m_simple_type_dict[field.m_bittype]
elif field.m_bittype in self.m_root.m_complex_type_dict:
ftype = self.m_root.m_complex_type_dict[field.m_bittype]
else:
raise XTypeException("complexType %s field %s's bittype %s not defined" % (
self.m_name, field.m_name, field.m_bittype))
ftype.ref(self)
# invlidate while and generate m_while_for; and determine m_key_child
if field.m_while:
match = re.search(
"^../([a-zA-Z_][a-zA-Z0-9_]*)$", field.m_while)
if match is None:
continue
gotit = False
for wfield in self.m_elements:
if wfield.m_name == match.group(1):
wfield.m_while_for = field
field.m_while_field = wfield
gotit = True
break
if not gotit:
raise XTypeException("complexType %s while field %s not exist" % (
self.m_name, match.group(1)))
if not isinstance(field.m_type_obj, XComplexType):
raise XTypeException("complexType %s w:while field %s's type %s must be complexType" % (
self.m_name, field.m_name, field.m_type))
elif isinstance(field.m_type_obj, XComplexType) and field.m_type_obj.m_has_key_field:
field.m_child_has_key = True
self.m_has_key_field = True
# m_default invalidate
if field.m_default:
if not isinstance(field.m_type_obj, XSimpleType):
raise XTypeException("complexType %s field %s's type %s is not simpleType, only simpleType field has default attribute" % (
self.m_name, field.m_name, field.m_type))
if len(field.m_default)/2 > field.m_type_obj.m_len:
raise XTypeException("complexType %s field %s's default value length %d > %s's len %d" % (
self.m_name, field.m_name, len(field.m_default)/2, field.m_type_obj.m_name, field.m_type_obj.m_len))
if field.m_type_obj.m_len in (1, 2, 4):
field.m_default_value = int(field.m_default, base=16)
if isinstance(field.m_type_obj, XSimpleTypeEnum) and field.m_default_value not in field.m_type_obj.m_enums:
raise XTypeException("complexType %s field %s's default value %d is not in enum %s" % (
self.m_name, field.m_name, field.m_default_value, field.m_type_obj.m_enums.keys()))
def invalidate_post_2(self):
super().invalidate_post_2()
for field in self.m_elements:
if not field.m_while_for:
continue
if isinstance(field.m_type_obj, XSimpleType) and field.m_type_obj.m_len in (1, 2, 4):
continue
raise XTypeException("complexType %s field %s's type must be BYTE,WORD,DWORD which w:whiled by element %s" % (
self.m_name, field.m_name, field.m_while_for.m_name))
class XComplexTypeCommon(XComplexTypePB):
def __init__(self, inName):
super().__init__(inName)
def invlidate_post_1(self):
super().invlidate_post_1()
for field in self.m_elements:
if isinstance(field.m_type_obj, XComplexType) and not isinstance(field.m_type_obj, XComplexTypeCommon):
raise XTypeException("complexType %s field %s's type %s must has w:pbcommon attribute", (
self.m_name, field.m_name, field.m_type))
class XComplexTypeInc(XComplexTypePB):
def __init__(self, inName):
super().__init__(inName)
def invlidate_post_1(self):
super().invlidate_post_1()
for field in self.m_elements:
if isinstance(field.m_type_obj, XComplexType) and not isinstance(field.m_type_obj, XComplexTypeCommon):
raise XTypeException("complexType %s field %s's type %s must has w:pbcommon attribute", (
self.m_name, field.m_name, field.m_type))
class XComplexTypeOut(XComplexTypePB):
def __init__(self, inName):
super().__init__(inName)
self.m_item_type = None
def invalidate(self):
super().invalidate()
fieldw = None
for field in self.m_elements:
if field.m_while:
if fieldw is not None:
raise XTypeException(
"w:pbout complexType %s must have only one element with w:while attribute" % (self.m_name))
fieldw = field
# logger.debug(fieldw)
if fieldw is None:
raise XTypeException(
"w:pbout complexType %s must have one element with w:while attribute" % (self.m_name))
if not fieldw.m_type.endswith("_Item") and not fieldw.m_type.endswith("_item"):
logger.warning("w:pbout complexType %s's item type %s suggest endswith '_Item' or '_item'" % (
self.m_name, fieldw.m_type))
self.m_item_type = fieldw.m_type
def invalidate_post_1(self):
super().invalidate_post_1()
for field in self.m_elements:
if field.m_while:
self.m_item_type = self.m_root.m_complex_type_dict[self.m_item_type]
if not isinstance(self.m_item_type, XComplexTypeInc):
raise XTypeException("w:pbout complexType %s's item type %s has no attribute w:pbinc" % (
self.m_name, self.m_item_type.m_name))
break
class XComplexTypeRoot(XComplexType):
def __init__(self, inName):
super().__init__(inName)
def parse(self, inXmlElement):
elements = list(inXmlElement.iterfind(
".//xsd:sequence/xsd:element", XSDNS))
for element in elements:
xelement = XElementBlock()
xelement.parse(element)
for block in self.m_elements:
if xelement.m_no == block.m_no:
raise XTypeException("w:pbout complexType %s's no %X duplicate with %s" % (
xelement.m_type, xelement.m_no, block.m_type))
self.element_add(xelement)
if xelement.m_no:
logger.debug('blockdataconfig element item %s(%X)' %
(xelement.m_name, xelement.m_no))
else:
logger.debug('blockdataconfig element item %s' %
(xelement.m_name, ))
def invalidate(self):
super().invalidate()
def invalidate_post_1(self):
super().invalidate_post_1()
for field in self.m_elements:
if isinstance(field.m_type_obj, XComplexType) and not isinstance(field.m_type_obj, XComplexTypeOut):
raise XTypeException("block %s's type %s must has w:pbout attribute!!" % (
field.m_name, field.m_type))
self.ref(self)
def invalidate_post_2(self):
pass
class XXsdTree(object):
def __init__(self, inXsdFile):
self.m_xsd_file = inXsdFile
self.m_xsd_name = inXsdFile.split('\\')[-1]
self.m_xsd_root = ElementTree.parse(self.m_xsd_file).getroot()
self.m_complex_root = None
self.m_simple_types = []
self.m_simple_type_dict = {}
self.m_complex_types = []
self.m_complex_type_dict = {}
self.m_meta_complex_types = ['BlockTableItem', 'AllConfig']
def meta_complex_add(self, inName):
self.m_meta_complex_types.append(inName)
def simple_types_unrefed(self):
return [type.m_name for type in self.m_simple_types if not type.refed()]
def complex_types_unrefed(self):
return [type.m_name for type in self.m_complex_types if not type.refed()]
def type_add(self, inType):
if isinstance(inType, XSimpleType):
if inType.m_name in self.m_simple_type_dict:
raise XTypeException(
"simpleType %s duplicate!!" % inType.m_name)
self.m_simple_types.append(inType)
self.m_simple_type_dict[inType.m_name] = inType
# logger.debug("add simpleType %s", (inType.m_name))
elif isinstance(inType, XComplexType):
if inType.m_name in self.m_complex_type_dict:
raise XTypeException(
"complexType %s duplicate!!" % inType.m_name)
self.m_complex_types.append(inType)
self.m_complex_type_dict[inType.m_name] = inType
# logger.debug("add complexType %s", (inType.m_name))
else:
raise XTypeException(
'unkown simpleType/complexType %s', inType.m_name)
inType.m_root = self
def parse(self):
self.load_complex_root()
self.load_simple_types()
self.load_complex_types()
self.m_complex_root.invalidate_post()
self.dagsort()
for complex in self.m_complex_types:
if complex is not self.m_complex_type_dict[complex.m_name]:
# raise XTypeException("%s id(list) %d != id(dict) %d" % (complex.m_name, id(complex), id(self.m_complex_type_dict[complex.m_name])))
pass
logger.debug("%s id(list) %d, id(dict) %d", complex.m_name, id(
complex), id(self.m_complex_type_dict[complex.m_name]))
def load_complex_root(self):
element = self.m_xsd_root.find("xsd:element", XSDNS)
self.m_complex_root = XComplexTypeRoot(element.attrib['type'])
logger.debug("root complexType %s" % (self.m_complex_root.m_name))
self.meta_complex_add(self.m_complex_root.m_name)
element = self.m_xsd_root.find(
".//xsd:complexType[@name='" + self.m_complex_root.m_name + "']", XSDNS)
self.m_complex_root.parse(element)
self.type_add(self.m_complex_root)
self.m_complex_root.invalidate()
def load_simple_types(self):
for simple in list(self.m_xsd_root.iterfind(".//xsd:simpleType", XSDNS)):
name = simple.attrib['name']
simpleType = None
if simple.findall(".//xsd:restriction/xsd:enumeration", XSDNS):
simpleType = XSimpleTypeEnum(name)
else:
simpleType = XSimpleType(name)
simpleType.parse(simple)
self.type_add(simpleType)
simpleType.invalidate()
def load_complex_types(self):
for complex in list(self.m_xsd_root.iterfind(".//xsd:complexType", XSDNS)):
name = complex.attrib['name']
complexType = None
if W_PBOUT in complex.attrib:
complexType = XComplexTypeOut(name)
elif W_PBINC in complex.attrib:
complexType = XComplexTypeInc(name)
elif W_PBCOMMON in complex.attrib:
complexType = XComplexTypeCommon(name)
else:
if name not in self.m_meta_complex_types:
complexType = XComplexTypePB(name)
else:
continue
# print("[Warning] complexType %s is not one of (w:pbcommon, w:pbinc, w:pbout), discard it" % (name))
# raise XTypeException("complexType %s has no one attribute of (w:pbcommon, w:pbinc, w:pbout), amend it please" % (name));
# continue
complexType.parse(complex)
self.type_add(complexType)
complexType.invalidate()
def dagsort(self):
"""
Directed Acyclic Graph, topologic sorting
"""
indegrees = dict((u, 0) for u in self.m_complex_types)
vertexnum = len(indegrees)
for u in self.m_complex_types:
for v in self.m_complex_type_dict[u.m_name].m_refedby:
indegrees[v] += 1
seq = []
Q = [u for u in sorted(
self.m_complex_types, key=lambda d:d.m_name.upper()) if indegrees[u] == 0]
logger.debug("no dependency complexType : %s" %
([d.m_name for d in Q]))
while Q:
u = Q.pop(0)
seq.append(u)
tmpQ = []
for v in self.m_complex_type_dict[u.m_name].m_refedby:
indegrees[v] -= 1
if indegrees[v] == 0:
tmpQ.append(v)
elif indegrees[v] < 0:
raise XTypeException("DAG sort BUG!!")
tmpQ = sorted(tmpQ, key=lambda d: d.m_name.upper())
logger.debug("pop %s, push %s" %
(u.m_name, [d.m_name for d in tmpQ]))
Q.extend(tmpQ)
logger.debug("new Q %s " % ([d.m_name for d in Q]))
if len(seq) == vertexnum:
logger.debug("DAG sorting before:%s" %
[d.m_name for d in self.m_complex_types])
self.m_complex_types = seq
logger.debug("DAG sorting after:%s" %
[d.m_name for d in self.m_complex_types])
else:
raise XTypeException("there is a circle")
"""
return block complextype and its dependency as list
"""
def iterblocks(self):
for field in self.m_complex_root.m_elements:
depends = [field.m_type_obj]
field.m_type_obj.dependency(depends)
yield depends
def isXSimpleType(inSimpleType):
if isinstance(inSimpleType, XSimpleType):
return True
else:
return False
def isXSimpleTypeEnum(inSimpleType):
if isinstance(inSimpleType, XSimpleTypeEnum):
return True
else:
return False
def isXComplexType(inComplexType):
if isinstance(inComplexType, XComplexType):
return True
else:
return False
def isXComplexTypeCommon(inComplexType):
if isinstance(inComplexType, XComplexTypeCommon):
return True
else:
return False
def isXComplexTypeInc(inComplexType):
if isinstance(inComplexType, XComplexTypeInc):
return True
else:
return False
def isXComplexTypeOut(inComplexType):
if isinstance(inComplexType, XComplexTypeOut):
return True
else:
return False
def isXComplexTypeRoot(inComplexType):
if isinstance(inComplexType, XComplexTypeRoot):
return True
else:
return False
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,377
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/__init__.py
|
__author__ = 'zengmao'
__version__ = '0.1'
__date__ = '2018-08-28'
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,378
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/setup.py
|
import os
from setuptools import setup, find_packages
setup(
name = "xgen",
version = "0.1.1",
author = "yczhang",
author_email = "yczhang@fiberhome.com",
description = ("Generate xsd/proto/cpp file from yang model."),
license = "BSD",
keywords = "xgen",
url = "http://www.fiberhome.com",
packages=find_packages(),
classifiers=[
"License :: OSI Approved :: MIT License",
"Topic :: Utilities",
"Programming Language :: Python :: 3.6",
],
install_requires=['pyang==1.7.3', 'jinja2==2.10'],
scripts=['xgenc.py'],
)
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,379
|
ZhangYongChang/docker_env
|
refs/heads/master
|
/xgen/xgen/yxsd.py
|
# -*- coding:utf-8 -*-
"""
fiberhome yang xsd parser which parse:
y:ns yang module namespace, mark module namespace at xsd:complexType, mark container/list/leaf at xsd:element
y:parent yang leaf/container/list parent path, container/list y:parent contains itself
y:nodeopr yang leaf/container has operation field which name postfix _opr, list always has field listopr
y:nomap donnot generate edit-config config for this field
y:leafprefix yang leaf namespace prefix
y:leafname yang leafname
y:leafmand yang mandatory leaf
y:list yang list
y:key yang list element key
y:input rpc input type
y:output rpc output type
"""
from xml.etree import ElementTree
from xgen.util import *
import logging
logger = logging.getLogger(__name__)
XSDNS = {'xsd': 'http://www.w3.org/2001/XMLSchema',
'w': 'http://www.fiberhome.com.cn/board/control',
'y': 'http://www.fiberhome.com.cn/ns/yang'}
Y_NS = '{%s}ns' % XSDNS['y']
Y_LEAFNAME = '{%s}leafname' % XSDNS['y']
Y_LEAFPREFIX = '{%s}leafprefix' % XSDNS['y']
Y_LEAFMAND = '{%s}leafmand' % XSDNS['y']
Y_PATH = '{%s}path' % XSDNS['y']
Y_LIST = '{%s}list' % XSDNS['y']
Y_KEY = '{%s}key' % XSDNS['y']
Y_NOMAP = '{%s}nomap' % XSDNS['y']
Y_NODEOPR = '{%s}nodeopr' % XSDNS['y']
Y_INPUT = '{%s}input' % XSDNS['y']
Y_OUTPUT = '{%s}output' % XSDNS['y']
W_EN = '{%s}en' % XSDNS['w']
W_CN = '{%s}cn' % XSDNS['w']
class XException(Exception):
pass
def nsparse(yns):
nsDict = None
for nsstr in yns.split():
nslist = nsstr.split('|')
ns = nslist[0]
prefix = ''
if len(nslist) == 2:
prefix = nslist[1]
if nsDict is None:
nsDict = {prefix: ns}
else:
nsDict[prefix] = ns
return nsDict
def pathlexical(path):
if path[-1] == '/':
return path[0:-1]
else:
return path
def rsubpath(path, count):
i = 0
end = len(path)
while i < count:
i = i+1
end = path.rfind('/', 0, end)
if end == -1 or end == 0:
return None
path = path[0:end]
return path
def pathsplit(path):
if path == '':
return []
plist = path.split('/')
if plist[0] == '':
plist[1] = '/' + plist[1]
del plist[0]
if plist[-1] == '':
del plist[-1]
return plist
class XEnum(object):
"""
xsd simpleType/restriction/enumeration
"""
def __init__(self, inValue, inEn=None, inCn=None, inFieldIndex=None):
self.m_value = inValue
self.m_en = inEn
self.m_cn = inCn
self.m_field_index = inFieldIndex
class XType(object):
"""
base class of simpleType/complexType
"""
def __init__(self, inName):
self.m_name = inName
self.m_root = None
self.m_refed = False
def parse(self, inXmlElement):
pass
def invalidate(self):
"""
invalidate itself elements
"""
logger.debug("%s invalidate" % (self.m_name))
def invalidate_post_1(self):
"""
generate cross object
"""
logger.debug("%s invlidate post 1" % (self.m_name))
pass
def invalidate_post_2(self):
"""
invalidate depend on cross object
"""
logger.debug("%s invlidate post 2" % (self.m_name))
pass
def invalidate_post(self):
logger.debug("%s invalidate post" % (self.m_name))
# logger.debug("".join(traceback.format_stack()))
self.invalidate_post_1()
self.invalidate_post_2()
class XSimpleType(XType):
def __init__(self, inName):
super().__init__(inName)
self.m_len = 0
def parse(self, inXmlElement):
super().parse(inXmlElement)
restriction = inXmlElement.find(
".//xsd:restriction[@base='xsd:hexBinary']", XSDNS)
if restriction is None:
return
length = restriction.find(".//xsd:length", XSDNS)
if length is None:
return
self.m_len = int(length.attrib['value'])
class XSimpleTypeInt(XSimpleType):
def __init__(self, inName):
super().__init__(inName)
class XSimpleTypeString(XSimpleType):
def __init__(self, inName):
super().__init__(inName)
class XSimpleTypeEnum(XSimpleType):
def __init__(self, inName):
super().__init__(inName)
self.m_enums = {}
def parse(self, inXmlElement):
super().parse(inXmlElement)
for enum in inXmlElement.iterfind(".//xsd:restriction/xsd:enumeration", XSDNS):
value = int(enum.attrib["value"], base=16)
en = enum.attrib[W_EN]
cn = None
if W_CN in enum.attrib:
cn = enum.attrib[W_CN]
else:
logger.warning(
"enumeration simpleType %s:%d has no w:cn attribute", self.m_name, value)
xenum = XEnum(value, inEn=en, inCn=cn,
inFieldIndex=enum.attrib["field_index"])
self.enum_add(xenum)
def enum_add(self, inEnum):
if inEnum.m_value in self.m_enums:
raise XException("enumeration simpleType %s value %d duplicate" % (
self.m_name, inEnum.m_value))
if inEnum.m_en in [x.m_en for x in self.m_enums.values()]:
raise XException("enumeration simpleType %s w:en %s duplicate" % (
self.m_name, inEnum.m_en))
# if inEnum.m_cn in [x.m_cn for x in self.m_enums.values()]:
# raise XException("enumeration simpleType %s w:cn %s duplicate" % (self.m_name, inEnum.m_cn))
self.m_enums[inEnum.m_value] = inEnum
class ElementField(object):
def __init__(self, inName, inType, inLeafName=None, inLeafPrefix=None, inFieldNum=None):
self.m_name = inName
self.m_type = inType
self.m_field_index = inFieldNum
self.m_leaf = False
self.m_list = False
self.m_nomap = False
self.m_nodeopr = None
self.m_path = None
self.m_key = -1
self.m_mandatory = False
self.m_namespaces = {}
if inLeafName is None:
self.m_leafname = inName
else:
self.m_leafname = inLeafName
if inLeafPrefix is not None:
self.m_leafname = inLeafPrefix + ':' + self.m_leafname
self.m_pbname = pbname(inName)
self.m_pbtype = pbname(inType)
self.m_pboption = 'optional'
self.m_typename = 'int32'
self.m_path_shared = []
self.m_path_priv = []
self.m_path_priv_list = ''
self.m_type_obj = None
class ElementRpc(object):
def __init__(self, inName):
self.m_name = inName
self.m_input = None
self.m_output = None
self.m_namespace = None
def parse(self, inXmlElement):
if Y_INPUT in inXmlElement.attrib:
self.m_input = inXmlElement.attrib[Y_INPUT]
if Y_OUTPUT in inXmlElement.attrib:
self.m_output = inXmlElement.attrib[Y_OUTPUT]
if Y_NS in inXmlElement.attrib:
self.m_namespace = inXmlElement.attrib[Y_NS]
class ElementNotify(object):
def __init__(self, inName):
self.m_name = inName
self.m_type = None
self.m_namespace = None
def parse(self, inXmlElement):
self.m_type = inXmlElement.attrib['type']
self.m_namespace = inXmlElement.attrib[Y_NS]
class XNode(object):
def __init__(self, inXName=''):
self.m_xname = inXName
self.m_xname_parent = ''
self.m_xnodes = {}
self.m_fields = []
class XTree(object):
def __init__(self):
self.m_xnodes = {}
def build(self, element):
if element.m_path_shared == '':
return
xnode = XNode()
pathlist = element.m_path_shared
parent = ''
i = 0
while i < len(pathlist):
path = pathlist[i]
if i == 0:
if path in self.m_xnodes:
xnode = self.m_xnodes[path]
else:
self.m_xnodes[path] = XNode(path)
xnode = self.m_xnodes[path]
xnode.m_xname_parent = 'yNode'
else:
if path in xnode.m_xnodes:
xnode = xnode.m_xnodes[path]
else:
xnode.m_xnodes[path] = XNode(path)
xnode = xnode.m_xnodes[path]
xnode.m_xname_parent = parent
parent = path
i = i + 1
if i == len(pathlist):
xnode.m_fields.append(element)
return
class XComplexType(XType):
def __init__(self, inName):
super().__init__(inName)
self.m_name_pb = pbname(inName)
self.m_name_cpp = cppname(inName)
self.m_modname = None
self.m_fields_key = {}
self.m_fields_mandatory = []
self.m_fields_noshared = []
self.m_xtree = XTree()
self.m_namespaces = {}
self.m_fields = []
def parse(self, inXmlElement):
super().parse(inXmlElement)
if self.m_name == self.m_root.m_modtype:
self.m_modname = self.m_root.m_modname
if Y_NS in inXmlElement.attrib:
# if self.m_name != self.m_root.m_modtype and self.m_name not in self.m_root.m_rpcs:
# raise XException('only module complex type has y:ns attrib')
self.m_namespaces = nsparse(inXmlElement.attrib[Y_NS])
for element in inXmlElement.iterfind(".//xsd:sequence/xsd:element", XSDNS):
leafname = None
leafprefix = None
if Y_LEAFNAME in element.attrib:
leafname = element.attrib[Y_LEAFNAME]
if Y_LEAFPREFIX in element.attrib:
leafprefix = element.attrib[Y_LEAFPREFIX]
field = ElementField(
element.attrib['name'], element.attrib['type'], leafname, leafprefix, element.attrib['field_index'])
if Y_PATH in element.attrib:
field.m_path = pathlexical(element.attrib[Y_PATH])
ftype = element.attrib['type']
if ftype in self.m_root.m_simple_type_dict:
field.m_leaf = True
stype = self.m_root.m_simple_type_dict[ftype]
if isinstance(stype, XSimpleTypeInt):
field.m_pbtype = ftype
field.m_typename = ftype
elif isinstance(stype, XSimpleTypeString):
field.m_pbtype = 'bytes'
field.m_typename = 'string'
elif isinstance(stype, XSimpleTypeEnum):
field.m_pbtype = 'int32'
field.m_typename = 'enum'
else:
raise XException('unsupport element type %s ' %
element.attrib['type'])
else:
field.m_leaf = False
field.m_pbtype = pbname(element.attrib['type'])
if leafname or leafprefix:
raise XException('%s:%s is not leaf, only leaf has y:leafname, y:leafprefix attrib', (
self.m_name, element.attrib['name']))
if Y_LIST in element.attrib:
field.m_list = True
field.m_pboption = 'repeated'
# if field.m_leaf:
# raise XException('leaf type cannot has y:list attribute')
elif Y_KEY in element.attrib:
ystr = element.attrib[Y_KEY]
field.m_key = int(ystr)
field.m_pboption = 'required'
field.m_list = False
if not field.m_leaf:
raise XException('only leaf type has y:key attribute')
elif Y_LEAFMAND in element.attrib:
field.m_mandatory = True
field.m_pboption = 'required'
field.m_list = False
if not field.m_leaf:
raise XException('only leaf type has y:leafmand attribute')
if Y_NOMAP in element.attrib:
field.m_nomap = True
else:
field.m_nomap = False
if Y_NODEOPR in element.attrib:
field.m_nodeopr = pbname(field.m_name + "_opr")
if Y_NS in element.attrib:
ystr = element.attrib[Y_NS]
if ystr.startswith('/') and self.m_name != self.m_modtype:
raise XException(
'only module complex type has y:ns attrib which start with "/"')
field.m_namespaces = nsparse(ystr)
self.m_fields.append(field)
logger.debug("parsed complexType %s", self.m_name)
def parentpathsplit(self):
index = 0
# while index < len(self.m_fields):
for element in self.m_fields:
#element = self.m_fields[index]
if element.m_nomap:
index = index + 1
continue
shared = element.m_path
if shared is None:
index = index + 1
continue
slashn = 0
while True:
shared = rsubpath(element.m_path, slashn)
slashn = slashn + 1
if shared is None:
break
matched = 0
for element2 in self.m_fields:
if element2.m_nomap:
continue
if element2.m_path is None:
continue
path = element2.m_path + '/'
if path.startswith(shared + '/'):
matched = matched + 1
logger.debug("complexType %s field %s & field %s shared part with %s",
self.m_name, element.m_name, element2.m_name, shared)
if matched >= 2:
break
logger.debug("complexType %s field %s parent %s shared part %s",
self.m_name, element.m_name, element.m_path, shared)
if shared is not None:
element.m_path_shared = pathsplit(shared)
element.m_path_priv = pathsplit(element.m_path[len(shared)+1:])
else:
element.m_path_priv = pathsplit(element.m_path)
if element.m_list:
element.m_path_priv_list = element.m_path_priv.pop()
#self.m_fields[index] = element
#index = index + 1
def build(self):
self.parentpathsplit()
for element in self.m_fields:
if element.m_nomap:
continue
if element.m_key > 0:
self.m_fields_key[element.m_key] = element
continue
elif element.m_mandatory:
self.m_fields_mandatory.append(element)
continue
if not element.m_path_shared:
self.m_fields_noshared.append(element)
continue
self.m_xtree.build(element)
def invalidate_post_1(self):
super().invalidate_post_1()
for field in self.m_fields:
# invalidate type and generate m_type_obj
ftype = None
if field.m_type in self.m_root.m_simple_type_dict:
ftype = self.m_root.m_simple_type_dict[field.m_type]
ftype.m_refed = True
elif field.m_type in self.m_root.m_complex_type_dict:
ftype = self.m_root.m_complex_type_dict[field.m_type]
if not ftype.m_refed:
ftype.invalidate_post()
ftype.m_refed = True
field.m_type_obj = ftype
class YModule(object):
def __init__(self, inXsdName):
self.m_xsd_name = os.path.basename(inXsdName)
self.m_xsd_tree = ElementTree.parse(inXsdName)
self.m_xsd_root = self.m_xsd_tree.getroot()
self.m_modname = ''
self.m_modtype = ''
self.m_namespaces = {}
self.m_simple_types = []
self.m_simple_type_dict = {}
self.m_complex_types = []
self.m_complex_type_dict = {}
self.m_rpcs = {}
self.m_notifys = {}
@property
def namespace(self):
if '' in self.m_namespaces:
return self.m_namespaces['']
raise XException('there is no module namespace')
def parse(self):
logger.debug("begin parse %s", self.m_xsd_name)
self.load_root()
self.load_simple_types()
self.load_complex_types()
self.load_complex_types_post()
def load_root(self):
modules = self.m_xsd_root.findall(
".//xsd:complexType[@name='YANGModules']/xsd:sequence/xsd:element", XSDNS)
if len(modules) != 1:
raise XException('only support one xsd one yang module')
self.m_modname = modules[0].attrib['name']
self.m_modtype = modules[0].attrib['type']
for element in self.m_xsd_root.iterfind(".//xsd:complexType[@name='YANGRpcs']/xsd:sequence/xsd:element", XSDNS):
name = element.attrib['name']
rpc = ElementRpc(name)
rpc.parse(element)
self.m_rpcs[name] = rpc
for element in self.m_xsd_root.iterfind(".//xsd:complexType[@name='YANGNotifys']/xsd:sequence/xsd:element", XSDNS):
name = element.attrib['name']
notify = ElementNotify(name)
notify.parse(element)
self.m_notifys[name] = notify
def load_simple_types(self):
for simple in self.m_xsd_root.iterfind(".//xsd:simpleType", XSDNS):
name = simple.attrib['name']
simpleType = None
if simple.findall(".//xsd:restriction/xsd:enumeration", XSDNS):
simpleType = XSimpleTypeEnum(name)
elif name in ('int32', 'uint32', 'int64', 'uint64'):
simpleType = XSimpleTypeInt(name)
else:
simpleType = XSimpleTypeString(name)
simpleType.parse(simple)
self.type_add(simpleType)
def load_complex_types(self):
for complex in self.m_xsd_root.iterfind(".//xsd:complexType", XSDNS):
name = complex.attrib['name']
if name in ('YANGModules', 'YANGRpcs', 'YANGNotifys'):
continue
complexType = XComplexType(name)
self.type_add(complexType)
complexType.parse(complex)
def load_complex_types_post(self):
for complexType in self.m_complex_types:
complexType.build()
complexType.invalidate_post()
self.m_namespaces = self.m_complex_type_dict[self.m_modtype].m_namespaces
def type_add(self, inType):
if isinstance(inType, XSimpleType):
if inType.m_name in self.m_simple_type_dict:
raise XException("simpleType %s duplicate!!" % inType.m_name)
self.m_simple_types.append(inType)
self.m_simple_type_dict[inType.m_name] = inType
# logger.debug("add simpleType %s", (inType.m_name))
elif isinstance(inType, XComplexType):
if inType.m_name in self.m_complex_type_dict:
raise XTypeException(
"complexType %s duplicate!!" % inType.m_name)
self.m_complex_types.append(inType)
self.m_complex_type_dict[inType.m_name] = inType
# logger.debug("add complexType %s", (inType.m_name))
else:
raise XTypeException(
'unkown simpleType/complexType %s', inType.m_name)
inType.m_root = self
def isXSimpleTypeEnum(inSimpleType):
if isinstance(inSimpleType, XSimpleTypeEnum):
return True
else:
return False
|
{"/xgen/xgen/xgen.py": ["/xgen/xgen/xtype.py"], "/xgen/xgen/xtype.py": ["/xgen/xgen/__init__.py"]}
|
41,386
|
irmakcakmak/great-circle-distance
|
refs/heads/master
|
/test/test_user.py
|
import unittest
from user import User
class UserTest(unittest.TestCase):
def test_less_than_comparison(self):
user1 = User(1, "user1")
user2 = User(2, "user2")
self.assertLess(user1, user2)
def test_malformed_data(self):
with self.assertRaises(ValueError):
User.from_record({"id": 1, "names": "malformed"})
|
{"/test/test_user.py": ["/user.py"], "/main.py": ["/point.py", "/user.py", "/reader.py"], "/test/test_point.py": ["/point.py"], "/test/test_reader.py": ["/reader.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.