file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
views.py | from rest_framework import generics, status, views
from .serializers import (RegisterSerializer, EmailVerificationSerializer, LoginSerializer,
CustomerSerializer, CustomerSerializerDetail, LogoutSerializer, ResetPasswordSerializer,
SetNewPasswordSerializer, PhoneNumberSerializer, OtpSerializer)
from rest_framework.response import Response
from .models import User, Customer, Admin
from django.db import transaction
from django.contrib.auth import logout
from rest_framework_simplejwt.tokens import RefreshToken
from .utils import email_template, generate_otp
from django.contrib.sites.shortcuts import get_current_site
import jwt
from django.contrib.auth import get_user_model
from django.conf import settings
from django.urls import reverse # takes url name and gives us the path
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.encoding import smart_str, force_str, smart_bytes, DjangoUnicodeDecodeError
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from twilio.rest import Client
from datetime import timedelta
from django.utils import timezone
class RegisterView(generics.GenericAPIView):
"""View for registering new users"""
serializer_class = RegisterSerializer
@transaction.atomic()
def post(self, request):
'''Sends a post request to create a new user'''
user = request.data # gets details passed from the request and assigns it to user
full_name = request.data.get('full_name')
serializer = RegisterSerializer(
data=user) # serializes and validates the data sent in request by passing it to register serializer
serializer.is_valid(raise_exception=True) # confirms that the data in serializer is indeed valid
serializer.save() # creates and saves this data which is user to db
user_data = serializer.data # user data is the data that the serializer has saved
user = User.objects.get(
email=user_data['email']) # initializes a user by fetching it from the db using the users email
token = RefreshToken.for_user(user).access_token # generates and ties a token to the users email passed to it
customer = {"user_id": str(user.id), # creates customer object by accessing User id
"full_name": full_name}
customer_serializer = CustomerSerializer(data=customer) # serializes customer data
customer_serializer.is_valid(raise_exception=True)
customer_serializer.save() # creates and save customer to db
customer_instance = Customer.objects.get(full_name=customer_serializer.data["full_name"])
customer_data = CustomerSerializerDetail(
customer_instance) # uses the customer instance to access all the users attributes
current_site = get_current_site(
request).domain # you want the user to be directed back to your site(this site) when they click the registration link
relativeLink = reverse('verify-email') # takes the url name passed and gives us the path
absolute = 'http://' + current_site + relativeLink + "?token=" + str(
token) # this is the link that will be sent to new user to click on
email_subject = 'Welcome To OgaTailor'
email_body = f'''
Hello {user.username}, Welcome to OgaTailor, we are delighted to have you on board!
<br><br><b>Note: <i>Please click the link below to verify your account.</i> </b>
<br><br><b> <i>{absolute}</i> </b>
<br><br><b>Note: <i>It expires in 10 minutes.</i> </b>'''
email_template(email_subject, user.email, email_body)
return Response(customer_data.data, status=status.HTTP_201_CREATED)
class VerifyEmailView(views.APIView):
serializer_class = EmailVerificationSerializer
token_param_config = openapi.Parameter('token', in_=openapi.IN_QUERY, description='Description',
type=openapi.TYPE_STRING)
@swagger_auto_schema(manual_parameters=[token_param_config])
def get(self, request):
token = request.GET.get('token') # get the token from the user when they hit our view
try:
payload = jwt.decode(token,
settings.SECRET_KEY) # here we are truing to access the informattion encoded in to the link. Functionality comes with jwt
user = User.objects.get(id=payload['user_id']) # here we extract the user from the payload
if not user.is_verified: # check that the user is not already verified so as to reduce the db queries
user.is_verified = True
user.email_verified = True
user.save()
return Response({'email': 'Successfully activated'}, status=status.HTTP_200_OK)
except jwt.ExpiredSignatureError:
return Response({'error': 'This activation link has expired. Please request for a new one.'},
status=status.HTTP_400_BAD_REQUEST)
except jwt.exceptions.DecodeError:
return Response({'error': 'Invalid token, request a new one.'}, status=status.HTTP_400_BAD_REQUEST)
class LoginView(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request):
user = request.data
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status.HTTP_200_OK)
class LogoutView(generics.GenericAPIView):
serializer_class = LogoutSerializer
def post(self, request):
logout(request)
data = {'Success': 'Logout Successful'}
return Response(data=data, status=status.HTTP_200_OK)
class RequestPasswordEmailView(generics.GenericAPIView):
serializer_class = ResetPasswordSerializer
def post(self, request):
email = request.data['email']
if User.objects.filter(email=email).exists():
user = User.objects.get(email=email)
uidb64 = urlsafe_base64_encode(smart_bytes(user.id))
token = PasswordResetTokenGenerator().make_token(user)
current_site = get_current_site(
request=request).domain # you want the user to be directed back to your site(this site) when they click the registration link
relativeLink = reverse('password-reset-confirm', kwargs={'uidb64': uidb64,
'token': token}) # takes the url name passed and gives us the path
absolute = 'http://' + current_site + relativeLink # this is the link that will be sent to user to click on
email_subject = 'Password Reset'
email_body = f'''
Hello, \n You have requested a password reset!
<br><br><b>Note: <i>Please click the link below to reset your password.</i> </b>
<br><br><b> <i>{absolute}</i> </b>
<br><br><b>Note: <i>If you did not request this change, disregard this email.</i> </b>'''
email_template(email_subject, user.email, email_body)
return Response({'success': "We have sent you a link to reset your password"}, status=status.HTTP_200_OK)
class PasswordTokenCheckView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def get(self, request, uidb64, token):
# redirect_url = request.GET.get('redirect_url')
try:
id = smart_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
if not PasswordResetTokenGenerator().check_token(user, token):
return Response({'error': 'Token is not valid, please request a new one'})
return Response({'success': True, 'message': 'Credentials valid', 'uidb64': uidb64, 'token': token})
except DjangoUnicodeDecodeError as identifier:
if not PasswordResetTokenGenerator().check_token(user):
return Response({'error': 'Token is not valid, please request for a ew one'})
class SetNewPasswordAPIView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def patch(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response({'success': True, 'message': 'Password reset successful'}, status=status.HTTP_200_OK)
class SendSmsView(generics.GenericAPIView):
|
class VerifyOtpView(generics.GenericAPIView):
serializer_class = OtpSerializer
def post(self, request):
data = request.data
user = get_user_model().objects.filter(email=data['email'])
if not user.exists():
return Response({'errors': 'You are not registered'})
user = user[0]
if user.otp_code != data['otp_code']:
return Response({'errors': 'Please provide a valid OTP'})
otp_expired = OtpSerializer(data=data)
if not otp_expired:
return Response({'errors': 'OTP provided has expired'})
user.phone_verified = True
user.save()
return Response({'message': 'Phone Verified!'})
| serializer_class = PhoneNumberSerializer
otp = None
@transaction.atomic()
def post(self, request, otp=None):
data = request.data
email = data['email']
user = User.objects.get(email=email)
phone_number_valid = PhoneNumberSerializer(data=data)
if not phone_number_valid.is_valid():
return Response({'errors': 'Invalid phone number'})
phone_number = data['phone_number']
otp = self.otp
if otp is None:
otp = generate_otp()
user.otp_code = otp
user.phone_number = phone_number
expiry = timezone.now() + timedelta(minutes=30)
user.otp_code_expiry = expiry
user.save()
try:
client = Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)
message_to_broadcast = f'Your OgaTailor Verification code is {otp}'
client.messages.create(to=phone_number, from_=settings.TWILIO_NUMBER, body=message_to_broadcast)
return Response({'message': 'OTP Sent!', 'otp': otp })
except:
return Response({'errors': 'Having problems sending code'}) | identifier_body |
views.py | from rest_framework import generics, status, views
from .serializers import (RegisterSerializer, EmailVerificationSerializer, LoginSerializer,
CustomerSerializer, CustomerSerializerDetail, LogoutSerializer, ResetPasswordSerializer,
SetNewPasswordSerializer, PhoneNumberSerializer, OtpSerializer)
from rest_framework.response import Response
from .models import User, Customer, Admin
from django.db import transaction
from django.contrib.auth import logout
from rest_framework_simplejwt.tokens import RefreshToken
from .utils import email_template, generate_otp
from django.contrib.sites.shortcuts import get_current_site
import jwt
from django.contrib.auth import get_user_model
from django.conf import settings
from django.urls import reverse # takes url name and gives us the path
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.encoding import smart_str, force_str, smart_bytes, DjangoUnicodeDecodeError
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from twilio.rest import Client
from datetime import timedelta
from django.utils import timezone
class RegisterView(generics.GenericAPIView):
"""View for registering new users"""
serializer_class = RegisterSerializer
@transaction.atomic()
def post(self, request):
'''Sends a post request to create a new user'''
user = request.data # gets details passed from the request and assigns it to user
full_name = request.data.get('full_name')
serializer = RegisterSerializer(
data=user) # serializes and validates the data sent in request by passing it to register serializer
serializer.is_valid(raise_exception=True) # confirms that the data in serializer is indeed valid
serializer.save() # creates and saves this data which is user to db
user_data = serializer.data # user data is the data that the serializer has saved
user = User.objects.get(
email=user_data['email']) # initializes a user by fetching it from the db using the users email
token = RefreshToken.for_user(user).access_token # generates and ties a token to the users email passed to it
customer = {"user_id": str(user.id), # creates customer object by accessing User id
"full_name": full_name}
customer_serializer = CustomerSerializer(data=customer) # serializes customer data
customer_serializer.is_valid(raise_exception=True)
customer_serializer.save() # creates and save customer to db
customer_instance = Customer.objects.get(full_name=customer_serializer.data["full_name"])
customer_data = CustomerSerializerDetail(
customer_instance) # uses the customer instance to access all the users attributes
current_site = get_current_site(
request).domain # you want the user to be directed back to your site(this site) when they click the registration link
relativeLink = reverse('verify-email') # takes the url name passed and gives us the path
absolute = 'http://' + current_site + relativeLink + "?token=" + str(
token) # this is the link that will be sent to new user to click on
email_subject = 'Welcome To OgaTailor'
email_body = f'''
Hello {user.username}, Welcome to OgaTailor, we are delighted to have you on board!
<br><br><b>Note: <i>Please click the link below to verify your account.</i> </b>
<br><br><b> <i>{absolute}</i> </b>
<br><br><b>Note: <i>It expires in 10 minutes.</i> </b>'''
email_template(email_subject, user.email, email_body)
return Response(customer_data.data, status=status.HTTP_201_CREATED)
class VerifyEmailView(views.APIView):
serializer_class = EmailVerificationSerializer
token_param_config = openapi.Parameter('token', in_=openapi.IN_QUERY, description='Description',
type=openapi.TYPE_STRING)
@swagger_auto_schema(manual_parameters=[token_param_config])
def get(self, request):
token = request.GET.get('token') # get the token from the user when they hit our view
try:
payload = jwt.decode(token,
settings.SECRET_KEY) # here we are truing to access the informattion encoded in to the link. Functionality comes with jwt
user = User.objects.get(id=payload['user_id']) # here we extract the user from the payload
if not user.is_verified: # check that the user is not already verified so as to reduce the db queries
user.is_verified = True
user.email_verified = True
user.save()
return Response({'email': 'Successfully activated'}, status=status.HTTP_200_OK)
except jwt.ExpiredSignatureError:
return Response({'error': 'This activation link has expired. Please request for a new one.'},
status=status.HTTP_400_BAD_REQUEST)
except jwt.exceptions.DecodeError:
return Response({'error': 'Invalid token, request a new one.'}, status=status.HTTP_400_BAD_REQUEST)
class LoginView(generics.GenericAPIView):
serializer_class = LoginSerializer
def | (self, request):
user = request.data
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status.HTTP_200_OK)
class LogoutView(generics.GenericAPIView):
serializer_class = LogoutSerializer
def post(self, request):
logout(request)
data = {'Success': 'Logout Successful'}
return Response(data=data, status=status.HTTP_200_OK)
class RequestPasswordEmailView(generics.GenericAPIView):
serializer_class = ResetPasswordSerializer
def post(self, request):
email = request.data['email']
if User.objects.filter(email=email).exists():
user = User.objects.get(email=email)
uidb64 = urlsafe_base64_encode(smart_bytes(user.id))
token = PasswordResetTokenGenerator().make_token(user)
current_site = get_current_site(
request=request).domain # you want the user to be directed back to your site(this site) when they click the registration link
relativeLink = reverse('password-reset-confirm', kwargs={'uidb64': uidb64,
'token': token}) # takes the url name passed and gives us the path
absolute = 'http://' + current_site + relativeLink # this is the link that will be sent to user to click on
email_subject = 'Password Reset'
email_body = f'''
Hello, \n You have requested a password reset!
<br><br><b>Note: <i>Please click the link below to reset your password.</i> </b>
<br><br><b> <i>{absolute}</i> </b>
<br><br><b>Note: <i>If you did not request this change, disregard this email.</i> </b>'''
email_template(email_subject, user.email, email_body)
return Response({'success': "We have sent you a link to reset your password"}, status=status.HTTP_200_OK)
class PasswordTokenCheckView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def get(self, request, uidb64, token):
# redirect_url = request.GET.get('redirect_url')
try:
id = smart_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
if not PasswordResetTokenGenerator().check_token(user, token):
return Response({'error': 'Token is not valid, please request a new one'})
return Response({'success': True, 'message': 'Credentials valid', 'uidb64': uidb64, 'token': token})
except DjangoUnicodeDecodeError as identifier:
if not PasswordResetTokenGenerator().check_token(user):
return Response({'error': 'Token is not valid, please request for a ew one'})
class SetNewPasswordAPIView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def patch(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response({'success': True, 'message': 'Password reset successful'}, status=status.HTTP_200_OK)
class SendSmsView(generics.GenericAPIView):
serializer_class = PhoneNumberSerializer
otp = None
@transaction.atomic()
def post(self, request, otp=None):
data = request.data
email = data['email']
user = User.objects.get(email=email)
phone_number_valid = PhoneNumberSerializer(data=data)
if not phone_number_valid.is_valid():
return Response({'errors': 'Invalid phone number'})
phone_number = data['phone_number']
otp = self.otp
if otp is None:
otp = generate_otp()
user.otp_code = otp
user.phone_number = phone_number
expiry = timezone.now() + timedelta(minutes=30)
user.otp_code_expiry = expiry
user.save()
try:
client = Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)
message_to_broadcast = f'Your OgaTailor Verification code is {otp}'
client.messages.create(to=phone_number, from_=settings.TWILIO_NUMBER, body=message_to_broadcast)
return Response({'message': 'OTP Sent!', 'otp': otp })
except:
return Response({'errors': 'Having problems sending code'})
class VerifyOtpView(generics.GenericAPIView):
serializer_class = OtpSerializer
def post(self, request):
data = request.data
user = get_user_model().objects.filter(email=data['email'])
if not user.exists():
return Response({'errors': 'You are not registered'})
user = user[0]
if user.otp_code != data['otp_code']:
return Response({'errors': 'Please provide a valid OTP'})
otp_expired = OtpSerializer(data=data)
if not otp_expired:
return Response({'errors': 'OTP provided has expired'})
user.phone_verified = True
user.save()
return Response({'message': 'Phone Verified!'})
| post | identifier_name |
entryfile.go | package datatree
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/coinexchain/onvakv/types"
)
type Entry = types.Entry
const MaxEntryBytes int = (1 << 24) - 1
var MagicBytes = [8]byte{255, 254, 253, 252, 252, 253, 254, 255}
var dbg bool
func DummyEntry(sn int64) *Entry {
return &Entry{
Key: []byte("dummy"),
Value: []byte("dummy"),
NextKey: []byte("dummy"),
Height: -2,
LastHeight: -2,
SerialNum: sn,
}
}
func NullEntry() Entry {
return Entry{
Key: []byte{},
Value: []byte{},
NextKey: []byte{},
Height: -1,
LastHeight: -1,
SerialNum: -1,
}
}
// Entry serialization format:
// magicBytes 8-bytes
// 8b snList length
// 24b-totalLength (this length does not include padding, snList and this field itself)
// magicBytesPos(list of 32b-int, -1 for ending), posistions are relative to the end of 32b-totalLength
// normalPayload
// DeactivedSerialNumList (list of 64b-int)
// padding-zero-bytes
const (
MSB32 = uint32(1<<31)
)
func PutUint24(b []byte, n uint32) {
//!! if n == 0 {
//!! panic("here PutUint24")
//!! }
b[0] = byte(n)
b[1] = byte(n>>8)
b[2] = byte(n>>16)
}
func GetUint24(b []byte) (n uint32) {
n = uint32(b[0])
n |= uint32(b[1]) << 8
n |= uint32(b[2]) << 16
//!! if n == 0 {
//!! panic("here GetUint24")
//!! }
return
}
//!! func SkipPosList(bz []byte) []byte {
//!! for i := 0; i + 4 < len(bz); i+=4 {
//!! if (bz[i]&bz[i+1]&bz[i+2]&bz[i+3]) == 0xFF {
//!! return bz[i+4:]
//!! }
//!! }
//!! return nil
//!! }
func ExtractKeyFromRawBytes(b []byte) []byte {
bb := b[4:]
if (bb[0]&bb[1]&bb[2]&bb[3]) == 0xFF { // No MagicBytes to recover
length := int(binary.LittleEndian.Uint32(bb[4:8]))
return append([]byte{}, bb[8:8+length]...)
}
bb = append([]byte{}, b[4:]...)
n := recoverMagicBytes(bb)
bb = bb[n:]
length := int(binary.LittleEndian.Uint32(bb[:4]))
return append([]byte{}, bb[4:4+length]...)
}
func EntryFromRawBytes(b []byte) *Entry |
func ExtractSerialNum(entryBz []byte) int64 {
return int64(binary.LittleEndian.Uint64(entryBz[len(entryBz)-8:]))
}
func UpdateSerialNum(entryBz []byte, sn int64) {
binary.LittleEndian.PutUint64(entryBz[len(entryBz)-8:], uint64(sn))
}
func SNListToBytes(deactivedSerialNumList []int64) []byte {
res := make([]byte, len(deactivedSerialNumList) * 8)
i := 0
for _, sn := range deactivedSerialNumList {
binary.LittleEndian.PutUint64(res[i:i+8], uint64(sn))
i += 8
}
return res
}
func EntryToBytes(entry Entry, deactivedSerialNumList []int64) []byte {
length := 4 + 4 // 32b-length and empty magicBytesPos
length += 4*3 + len(entry.Key) + len(entry.Value) + len(entry.NextKey) // Three strings
length += 8 * 3 // Three int64
length += len(deactivedSerialNumList) * 8
b := make([]byte, length)
b[0] = byte(len(deactivedSerialNumList))
const start = 8
writeEntryPayload(b[start:], entry, deactivedSerialNumList)
// MagicBytes can not lay in or overlap with these 64b integers
stop := len(b) - len(deactivedSerialNumList)*8 - 3*8
magicBytesPosList := getAllPos(b[start:stop], MagicBytes[:])
if len(magicBytesPosList) == 0 {
//!! if dbg {
//!! fmt.Printf("here-length %d %d\n", length, length-4-len(deactivedSerialNumList)*8)
//!! }
PutUint24(b[1:4], uint32(length-4-len(deactivedSerialNumList)*8))
binary.LittleEndian.PutUint32(b[4:8], ^uint32(0))
return b
}
// if magicBytesPosList is not empty:
var zeroBuf [8]byte
for _, pos := range magicBytesPosList {
copy(b[start+pos:start+pos+8], zeroBuf[:]) // over-write the occurrence of magic bytes with zeros
}
length += 4 * len(magicBytesPosList)
buf := make([]byte, length)
bytesAdded := 4 * len(magicBytesPosList)
var i int
for i = 0; i < len(magicBytesPosList); i++ {
pos := magicBytesPosList[i] + bytesAdded /*32b-length*/
binary.LittleEndian.PutUint32(buf[i*4+4:i*4+8], uint32(pos))
}
binary.LittleEndian.PutUint32(buf[i*4+4:i*4+8], ^uint32(0))
copy(buf[i*4+8:], b[8:])
// Re-write the new length. minus 4 because the first 4 bytes of length isn't included
buf[0] = byte(len(deactivedSerialNumList))
PutUint24(buf[1:4], uint32(length-4-len(deactivedSerialNumList)*8))
//!! if dbg {
//!! fmt.Printf("there-length %d %d\n", length, length-4-len(deactivedSerialNumList)*8)
//!! }
return buf
}
func writeEntryPayload(b []byte, entry Entry, deactivedSerialNumList []int64) {
i := 0
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.Key)))
i += 4
copy(b[i:], entry.Key)
i += len(entry.Key)
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.Value)))
i += 4
copy(b[i:], entry.Value)
i += len(entry.Value)
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.NextKey)))
i += 4
copy(b[i:], entry.NextKey)
i += len(entry.NextKey)
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.Height))
i += 8
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.LastHeight))
i += 8
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.SerialNum))
i += 8
for _, sn := range deactivedSerialNumList {
binary.LittleEndian.PutUint64(b[i:i+8], uint64(sn))
i += 8
}
}
func getAllPos(s, sep []byte) (allpos []int) {
for start, pos := 0, 0; start + len(sep) < len(s); start += pos + len(sep) {
pos = bytes.Index(s[start:], sep)
if pos == -1 {
return
}
allpos = append(allpos, pos+start)
}
return
}
func EntryFromBytes(b []byte, numberOfSN int) (*Entry, []int64) {
entry := &Entry{}
i := 0
length := int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.Key = b[i:i+length]
i += length
length = int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.Value = b[i:i+length]
i += length
length = int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.NextKey = b[i:i+length]
i += length
entry.Height = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
entry.LastHeight = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
entry.SerialNum = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
if numberOfSN == 0 {
return entry, nil
}
deactivedSerialNumList := make([]int64, numberOfSN)
for j := range deactivedSerialNumList {
deactivedSerialNumList[j] = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
}
return entry, deactivedSerialNumList
}
type EntryFile struct {
HPFile
}
func getPaddingSize(length int) int {
rem := length % 8
if rem == 0 {
return 0
} else {
return 8 - rem
}
}
func (ef *EntryFile) readMagicBytesAndLength(off int64, withBuf bool) (length int64, numberOfSN int) {
var buf [12]byte
err := ef.HPFile.ReadAt(buf[:], off, withBuf)
if err != nil {
panic(err)
}
if !bytes.Equal(buf[:8], MagicBytes[:]) {
fmt.Printf("Now off %d %x\n", off, off)
panic("Invalid MagicBytes")
}
length = int64(GetUint24(buf[9:12]))
if int(length) >= MaxEntryBytes {
panic("Entry to long")
}
return length, int(buf[8])
}
func getNextPos(off, length int64) int64 {
length += 8 /*magicbytes*/ + 4 /*length*/
paddingSize := getPaddingSize(int(length))
paddedLen := length + int64(paddingSize)
nextPos := off + paddedLen
//fmt.Printf("off %d length %d paddingSize %d paddedLen %d nextPos %d\n", off, length, paddingSize, paddedLen, nextPos)
return nextPos
}
func (ef *EntryFile) ReadEntryAndSNList(off int64) (entry *Entry, deactivedSerialNumList []int64, nextPos int64) {
entryBz, numberOfSN, nextPos := ef.readEntry(off, true, false, true)
entry, deactivedSerialNumList = EntryFromBytes(entryBz, numberOfSN)
return
}
func (ef *EntryFile) ReadEntry(off int64) (entry *Entry, nextPos int64) {
entryBz, numberOfSN, nextPos := ef.readEntry(off, false, false, false)
entry, _ = EntryFromBytes(entryBz, numberOfSN)
return
}
func (ef *EntryFile) ReadEntryRawBytes(off int64) (entryBz []byte, nextPos int64) {
entryBz, _, nextPos = ef.readEntry(off, false, true, true)
return
}
func recoverMagicBytes(b []byte) (n int) {
for n = 0; n + 4 < len(b); n += 4 { // recover magic bytes in payload
pos := binary.LittleEndian.Uint32(b[n : n+4])
if pos == ^(uint32(0)) {
n += 4
break
}
if int(pos) >= MaxEntryBytes {
panic("Position to large")
}
copy(b[int(pos)+4:int(pos)+12], MagicBytes[:])
}
return
}
func (ef *EntryFile) readEntry(off int64, withSNList, useRaw, withBuf bool) (entrybz []byte, numberOfSN int, nextPos int64) {
length, numberOfSN := ef.readMagicBytesAndLength(off, withBuf)
nextPos = getNextPos(off, int64(length)+8*int64(numberOfSN))
if withSNList {
length += 8 * int64(numberOfSN) // ignore snlist
} else {
numberOfSN = 0
}
b := make([]byte, 12+int(length)) // include 12 (magicbytes and length)
err := ef.HPFile.ReadAt(b, off, withBuf)
origB := b
b = b[12:] // ignore magicbytes and length
if err != nil {
panic(err)
}
if useRaw {
return origB[8:], numberOfSN, nextPos
}
n := recoverMagicBytes(b)
return b[n:length], numberOfSN, nextPos
}
func NewEntryFile(bufferSize, blockSize int, dirName string) (res EntryFile, err error) {
res.HPFile, err = NewHPFile(bufferSize, blockSize, dirName)
res.HPFile.InitPreReader()
return
}
func (ef *EntryFile) Size() int64 {
return ef.HPFile.Size()
}
func (ef *EntryFile) Truncate(size int64) {
err := ef.HPFile.Truncate(size)
if err != nil {
panic(err)
}
}
func (ef *EntryFile) Flush() {
ef.HPFile.Flush()
}
func (ef *EntryFile) FlushAsync() {
ef.HPFile.FlushAsync()
}
func (ef *EntryFile) Close() {
err := ef.HPFile.Close()
if err != nil {
panic(err)
}
}
func (ef *EntryFile) PruneHead(off int64) {
err := ef.HPFile.PruneHead(off)
if err != nil {
panic(err)
}
}
func (ef *EntryFile) Append(b [2][]byte) (pos int64) {
//!! if b[0][1] == 0 && b[0][2] == 0 && b[0][3] == 0 {
//!! fmt.Printf("%#v\n", b)
//!! panic("here in Append")
//!! }
var bb [4][]byte
bb[0] = MagicBytes[:]
bb[1] = b[0]
bb[2] = b[1]
paddingSize := getPaddingSize(len(b[0])+len(b[1]))
bb[3] = make([]byte, paddingSize) // padding zero bytes
pos, err := ef.HPFile.Append(bb[:])
//!! if pos > 108996000 {
//!! dbg = true
//!! fmt.Printf("Append pos %d %#v len(bb[1]) %d padding %d\n", pos, bb[:], len(bb[1]), paddingSize)
//!! }
if pos%8 != 0 {
panic("Entries are not aligned")
}
if err != nil {
panic(err)
}
//fmt.Printf("Now Append At: %d len: %d\n", pos, len(b))
return
}
func (ef *EntryFile) GetActiveEntriesInTwig(twig *Twig) chan []byte {
res := make(chan []byte, 100)
go func() {
start := twig.FirstEntryPos
for i := 0; i < LeafCountInTwig; i++ {
if twig.getBit(i) {
entryBz, next := ef.ReadEntryRawBytes(start)
//!! fmt.Printf("Why start %d entryBz %#v\n", start, entryBz)
start = next
res <- entryBz
} else { // skip an inactive entry
length, numberOfSN := ef.readMagicBytesAndLength(start, true)
start = getNextPos(start, length+8*int64(numberOfSN))
}
}
close(res)
}()
return res
}
//!! func (ef *EntryFile) GetActiveEntriesInTwigOld(twig *Twig) chan *Entry {
//!! res := make(chan *Entry, 100)
//!! go func() {
//!! start := twig.FirstEntryPos
//!! for i := 0; i < LeafCountInTwig; i++ {
//!! if twig.getBit(i) {
//!! entry, next := ef.ReadEntry(start)
//!! start = next
//!! res <- entry
//!! } else { // skip an inactive entry
//!! length, numberOfSN := ef.readMagicBytesAndLength(start, true)
//!! start = getNextPos(start, length+8*int64(numberOfSN))
//!! }
//!! }
//!! close(res)
//!! }()
//!! return res
//!! }
| {
bb := b[4:]
if (bb[0]&bb[1]&bb[2]&bb[3]) == 0xFF { // No MagicBytes to recover
e, _ := EntryFromBytes(bb[4:], 0)
return e
}
bb = append([]byte{}, b[4:]...)
n := recoverMagicBytes(bb)
e, _ := EntryFromBytes(bb[n+4:], 0)
return e
} | identifier_body |
entryfile.go | package datatree
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/coinexchain/onvakv/types"
)
type Entry = types.Entry
const MaxEntryBytes int = (1 << 24) - 1
var MagicBytes = [8]byte{255, 254, 253, 252, 252, 253, 254, 255}
var dbg bool
func DummyEntry(sn int64) *Entry {
return &Entry{
Key: []byte("dummy"),
Value: []byte("dummy"),
NextKey: []byte("dummy"),
Height: -2,
LastHeight: -2,
SerialNum: sn,
}
}
func NullEntry() Entry {
return Entry{
Key: []byte{},
Value: []byte{},
NextKey: []byte{},
Height: -1,
LastHeight: -1,
SerialNum: -1,
}
}
// Entry serialization format:
// magicBytes 8-bytes
// 8b snList length
// 24b-totalLength (this length does not include padding, snList and this field itself)
// magicBytesPos(list of 32b-int, -1 for ending), posistions are relative to the end of 32b-totalLength
// normalPayload
// DeactivedSerialNumList (list of 64b-int)
// padding-zero-bytes
const (
MSB32 = uint32(1<<31)
)
func PutUint24(b []byte, n uint32) {
//!! if n == 0 {
//!! panic("here PutUint24")
//!! }
b[0] = byte(n)
b[1] = byte(n>>8)
b[2] = byte(n>>16)
}
func GetUint24(b []byte) (n uint32) {
n = uint32(b[0])
n |= uint32(b[1]) << 8
n |= uint32(b[2]) << 16
//!! if n == 0 {
//!! panic("here GetUint24")
//!! }
return
}
//!! func SkipPosList(bz []byte) []byte {
//!! for i := 0; i + 4 < len(bz); i+=4 {
//!! if (bz[i]&bz[i+1]&bz[i+2]&bz[i+3]) == 0xFF {
//!! return bz[i+4:]
//!! }
//!! }
//!! return nil
//!! }
func ExtractKeyFromRawBytes(b []byte) []byte {
bb := b[4:]
if (bb[0]&bb[1]&bb[2]&bb[3]) == 0xFF { // No MagicBytes to recover
length := int(binary.LittleEndian.Uint32(bb[4:8]))
return append([]byte{}, bb[8:8+length]...)
}
bb = append([]byte{}, b[4:]...)
n := recoverMagicBytes(bb)
bb = bb[n:]
length := int(binary.LittleEndian.Uint32(bb[:4]))
return append([]byte{}, bb[4:4+length]...)
}
func EntryFromRawBytes(b []byte) *Entry {
bb := b[4:]
if (bb[0]&bb[1]&bb[2]&bb[3]) == 0xFF { // No MagicBytes to recover
e, _ := EntryFromBytes(bb[4:], 0)
return e
}
bb = append([]byte{}, b[4:]...)
n := recoverMagicBytes(bb)
e, _ := EntryFromBytes(bb[n+4:], 0)
return e
}
func ExtractSerialNum(entryBz []byte) int64 {
return int64(binary.LittleEndian.Uint64(entryBz[len(entryBz)-8:]))
}
func UpdateSerialNum(entryBz []byte, sn int64) {
binary.LittleEndian.PutUint64(entryBz[len(entryBz)-8:], uint64(sn))
}
func SNListToBytes(deactivedSerialNumList []int64) []byte {
res := make([]byte, len(deactivedSerialNumList) * 8)
i := 0
for _, sn := range deactivedSerialNumList {
binary.LittleEndian.PutUint64(res[i:i+8], uint64(sn))
i += 8
}
return res
}
func EntryToBytes(entry Entry, deactivedSerialNumList []int64) []byte {
length := 4 + 4 // 32b-length and empty magicBytesPos
length += 4*3 + len(entry.Key) + len(entry.Value) + len(entry.NextKey) // Three strings
length += 8 * 3 // Three int64
length += len(deactivedSerialNumList) * 8
b := make([]byte, length)
b[0] = byte(len(deactivedSerialNumList))
const start = 8
writeEntryPayload(b[start:], entry, deactivedSerialNumList)
// MagicBytes can not lay in or overlap with these 64b integers
stop := len(b) - len(deactivedSerialNumList)*8 - 3*8
magicBytesPosList := getAllPos(b[start:stop], MagicBytes[:])
if len(magicBytesPosList) == 0 {
//!! if dbg {
//!! fmt.Printf("here-length %d %d\n", length, length-4-len(deactivedSerialNumList)*8)
//!! }
PutUint24(b[1:4], uint32(length-4-len(deactivedSerialNumList)*8))
binary.LittleEndian.PutUint32(b[4:8], ^uint32(0))
return b
}
// if magicBytesPosList is not empty:
var zeroBuf [8]byte
for _, pos := range magicBytesPosList {
copy(b[start+pos:start+pos+8], zeroBuf[:]) // over-write the occurrence of magic bytes with zeros
}
length += 4 * len(magicBytesPosList)
buf := make([]byte, length)
bytesAdded := 4 * len(magicBytesPosList)
var i int
for i = 0; i < len(magicBytesPosList); i++ {
pos := magicBytesPosList[i] + bytesAdded /*32b-length*/
binary.LittleEndian.PutUint32(buf[i*4+4:i*4+8], uint32(pos))
}
binary.LittleEndian.PutUint32(buf[i*4+4:i*4+8], ^uint32(0))
copy(buf[i*4+8:], b[8:])
// Re-write the new length. minus 4 because the first 4 bytes of length isn't included
buf[0] = byte(len(deactivedSerialNumList))
PutUint24(buf[1:4], uint32(length-4-len(deactivedSerialNumList)*8))
//!! if dbg {
//!! fmt.Printf("there-length %d %d\n", length, length-4-len(deactivedSerialNumList)*8)
//!! }
return buf
}
func writeEntryPayload(b []byte, entry Entry, deactivedSerialNumList []int64) {
i := 0
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.Key)))
i += 4
copy(b[i:], entry.Key)
i += len(entry.Key)
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.Value)))
i += 4
copy(b[i:], entry.Value)
i += len(entry.Value)
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.NextKey)))
i += 4
copy(b[i:], entry.NextKey)
i += len(entry.NextKey)
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.Height))
i += 8
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.LastHeight))
i += 8
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.SerialNum))
i += 8
for _, sn := range deactivedSerialNumList {
binary.LittleEndian.PutUint64(b[i:i+8], uint64(sn))
i += 8
}
}
func getAllPos(s, sep []byte) (allpos []int) {
for start, pos := 0, 0; start + len(sep) < len(s); start += pos + len(sep) {
pos = bytes.Index(s[start:], sep)
if pos == -1 |
allpos = append(allpos, pos+start)
}
return
}
func EntryFromBytes(b []byte, numberOfSN int) (*Entry, []int64) {
entry := &Entry{}
i := 0
length := int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.Key = b[i:i+length]
i += length
length = int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.Value = b[i:i+length]
i += length
length = int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.NextKey = b[i:i+length]
i += length
entry.Height = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
entry.LastHeight = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
entry.SerialNum = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
if numberOfSN == 0 {
return entry, nil
}
deactivedSerialNumList := make([]int64, numberOfSN)
for j := range deactivedSerialNumList {
deactivedSerialNumList[j] = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
}
return entry, deactivedSerialNumList
}
type EntryFile struct {
HPFile
}
func getPaddingSize(length int) int {
rem := length % 8
if rem == 0 {
return 0
} else {
return 8 - rem
}
}
func (ef *EntryFile) readMagicBytesAndLength(off int64, withBuf bool) (length int64, numberOfSN int) {
var buf [12]byte
err := ef.HPFile.ReadAt(buf[:], off, withBuf)
if err != nil {
panic(err)
}
if !bytes.Equal(buf[:8], MagicBytes[:]) {
fmt.Printf("Now off %d %x\n", off, off)
panic("Invalid MagicBytes")
}
length = int64(GetUint24(buf[9:12]))
if int(length) >= MaxEntryBytes {
panic("Entry to long")
}
return length, int(buf[8])
}
func getNextPos(off, length int64) int64 {
length += 8 /*magicbytes*/ + 4 /*length*/
paddingSize := getPaddingSize(int(length))
paddedLen := length + int64(paddingSize)
nextPos := off + paddedLen
//fmt.Printf("off %d length %d paddingSize %d paddedLen %d nextPos %d\n", off, length, paddingSize, paddedLen, nextPos)
return nextPos
}
func (ef *EntryFile) ReadEntryAndSNList(off int64) (entry *Entry, deactivedSerialNumList []int64, nextPos int64) {
entryBz, numberOfSN, nextPos := ef.readEntry(off, true, false, true)
entry, deactivedSerialNumList = EntryFromBytes(entryBz, numberOfSN)
return
}
func (ef *EntryFile) ReadEntry(off int64) (entry *Entry, nextPos int64) {
entryBz, numberOfSN, nextPos := ef.readEntry(off, false, false, false)
entry, _ = EntryFromBytes(entryBz, numberOfSN)
return
}
func (ef *EntryFile) ReadEntryRawBytes(off int64) (entryBz []byte, nextPos int64) {
entryBz, _, nextPos = ef.readEntry(off, false, true, true)
return
}
func recoverMagicBytes(b []byte) (n int) {
for n = 0; n + 4 < len(b); n += 4 { // recover magic bytes in payload
pos := binary.LittleEndian.Uint32(b[n : n+4])
if pos == ^(uint32(0)) {
n += 4
break
}
if int(pos) >= MaxEntryBytes {
panic("Position to large")
}
copy(b[int(pos)+4:int(pos)+12], MagicBytes[:])
}
return
}
func (ef *EntryFile) readEntry(off int64, withSNList, useRaw, withBuf bool) (entrybz []byte, numberOfSN int, nextPos int64) {
length, numberOfSN := ef.readMagicBytesAndLength(off, withBuf)
nextPos = getNextPos(off, int64(length)+8*int64(numberOfSN))
if withSNList {
length += 8 * int64(numberOfSN) // ignore snlist
} else {
numberOfSN = 0
}
b := make([]byte, 12+int(length)) // include 12 (magicbytes and length)
err := ef.HPFile.ReadAt(b, off, withBuf)
origB := b
b = b[12:] // ignore magicbytes and length
if err != nil {
panic(err)
}
if useRaw {
return origB[8:], numberOfSN, nextPos
}
n := recoverMagicBytes(b)
return b[n:length], numberOfSN, nextPos
}
func NewEntryFile(bufferSize, blockSize int, dirName string) (res EntryFile, err error) {
res.HPFile, err = NewHPFile(bufferSize, blockSize, dirName)
res.HPFile.InitPreReader()
return
}
func (ef *EntryFile) Size() int64 {
return ef.HPFile.Size()
}
func (ef *EntryFile) Truncate(size int64) {
err := ef.HPFile.Truncate(size)
if err != nil {
panic(err)
}
}
func (ef *EntryFile) Flush() {
ef.HPFile.Flush()
}
func (ef *EntryFile) FlushAsync() {
ef.HPFile.FlushAsync()
}
func (ef *EntryFile) Close() {
err := ef.HPFile.Close()
if err != nil {
panic(err)
}
}
func (ef *EntryFile) PruneHead(off int64) {
err := ef.HPFile.PruneHead(off)
if err != nil {
panic(err)
}
}
func (ef *EntryFile) Append(b [2][]byte) (pos int64) {
//!! if b[0][1] == 0 && b[0][2] == 0 && b[0][3] == 0 {
//!! fmt.Printf("%#v\n", b)
//!! panic("here in Append")
//!! }
var bb [4][]byte
bb[0] = MagicBytes[:]
bb[1] = b[0]
bb[2] = b[1]
paddingSize := getPaddingSize(len(b[0])+len(b[1]))
bb[3] = make([]byte, paddingSize) // padding zero bytes
pos, err := ef.HPFile.Append(bb[:])
//!! if pos > 108996000 {
//!! dbg = true
//!! fmt.Printf("Append pos %d %#v len(bb[1]) %d padding %d\n", pos, bb[:], len(bb[1]), paddingSize)
//!! }
if pos%8 != 0 {
panic("Entries are not aligned")
}
if err != nil {
panic(err)
}
//fmt.Printf("Now Append At: %d len: %d\n", pos, len(b))
return
}
func (ef *EntryFile) GetActiveEntriesInTwig(twig *Twig) chan []byte {
res := make(chan []byte, 100)
go func() {
start := twig.FirstEntryPos
for i := 0; i < LeafCountInTwig; i++ {
if twig.getBit(i) {
entryBz, next := ef.ReadEntryRawBytes(start)
//!! fmt.Printf("Why start %d entryBz %#v\n", start, entryBz)
start = next
res <- entryBz
} else { // skip an inactive entry
length, numberOfSN := ef.readMagicBytesAndLength(start, true)
start = getNextPos(start, length+8*int64(numberOfSN))
}
}
close(res)
}()
return res
}
//!! func (ef *EntryFile) GetActiveEntriesInTwigOld(twig *Twig) chan *Entry {
//!! res := make(chan *Entry, 100)
//!! go func() {
//!! start := twig.FirstEntryPos
//!! for i := 0; i < LeafCountInTwig; i++ {
//!! if twig.getBit(i) {
//!! entry, next := ef.ReadEntry(start)
//!! start = next
//!! res <- entry
//!! } else { // skip an inactive entry
//!! length, numberOfSN := ef.readMagicBytesAndLength(start, true)
//!! start = getNextPos(start, length+8*int64(numberOfSN))
//!! }
//!! }
//!! close(res)
//!! }()
//!! return res
//!! }
| {
return
} | conditional_block |
entryfile.go | package datatree
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/coinexchain/onvakv/types"
)
type Entry = types.Entry
const MaxEntryBytes int = (1 << 24) - 1
var MagicBytes = [8]byte{255, 254, 253, 252, 252, 253, 254, 255}
var dbg bool
func DummyEntry(sn int64) *Entry {
return &Entry{
Key: []byte("dummy"),
Value: []byte("dummy"),
NextKey: []byte("dummy"),
Height: -2,
LastHeight: -2,
SerialNum: sn,
}
}
func NullEntry() Entry {
return Entry{
Key: []byte{},
Value: []byte{},
NextKey: []byte{},
Height: -1,
LastHeight: -1,
SerialNum: -1,
}
}
// Entry serialization format:
// magicBytes 8-bytes
// 8b snList length
// 24b-totalLength (this length does not include padding, snList and this field itself)
// magicBytesPos(list of 32b-int, -1 for ending), posistions are relative to the end of 32b-totalLength
// normalPayload
// DeactivedSerialNumList (list of 64b-int)
// padding-zero-bytes
const (
MSB32 = uint32(1<<31)
)
func PutUint24(b []byte, n uint32) {
//!! if n == 0 {
//!! panic("here PutUint24")
//!! }
b[0] = byte(n)
b[1] = byte(n>>8)
b[2] = byte(n>>16)
}
func GetUint24(b []byte) (n uint32) {
n = uint32(b[0])
n |= uint32(b[1]) << 8
n |= uint32(b[2]) << 16
//!! if n == 0 {
//!! panic("here GetUint24")
//!! }
return
}
//!! func SkipPosList(bz []byte) []byte {
//!! for i := 0; i + 4 < len(bz); i+=4 {
//!! if (bz[i]&bz[i+1]&bz[i+2]&bz[i+3]) == 0xFF {
//!! return bz[i+4:]
//!! }
//!! }
//!! return nil
//!! }
func ExtractKeyFromRawBytes(b []byte) []byte {
bb := b[4:]
if (bb[0]&bb[1]&bb[2]&bb[3]) == 0xFF { // No MagicBytes to recover
length := int(binary.LittleEndian.Uint32(bb[4:8]))
return append([]byte{}, bb[8:8+length]...)
}
bb = append([]byte{}, b[4:]...)
n := recoverMagicBytes(bb)
bb = bb[n:]
length := int(binary.LittleEndian.Uint32(bb[:4]))
return append([]byte{}, bb[4:4+length]...)
}
func EntryFromRawBytes(b []byte) *Entry {
bb := b[4:]
if (bb[0]&bb[1]&bb[2]&bb[3]) == 0xFF { // No MagicBytes to recover
e, _ := EntryFromBytes(bb[4:], 0)
return e
}
bb = append([]byte{}, b[4:]...)
n := recoverMagicBytes(bb)
e, _ := EntryFromBytes(bb[n+4:], 0)
return e
}
func ExtractSerialNum(entryBz []byte) int64 {
return int64(binary.LittleEndian.Uint64(entryBz[len(entryBz)-8:]))
}
func UpdateSerialNum(entryBz []byte, sn int64) {
binary.LittleEndian.PutUint64(entryBz[len(entryBz)-8:], uint64(sn))
}
func SNListToBytes(deactivedSerialNumList []int64) []byte {
res := make([]byte, len(deactivedSerialNumList) * 8)
i := 0
for _, sn := range deactivedSerialNumList {
binary.LittleEndian.PutUint64(res[i:i+8], uint64(sn))
i += 8
}
return res
}
func EntryToBytes(entry Entry, deactivedSerialNumList []int64) []byte {
length := 4 + 4 // 32b-length and empty magicBytesPos
length += 4*3 + len(entry.Key) + len(entry.Value) + len(entry.NextKey) // Three strings
length += 8 * 3 // Three int64
length += len(deactivedSerialNumList) * 8
b := make([]byte, length)
b[0] = byte(len(deactivedSerialNumList))
const start = 8
writeEntryPayload(b[start:], entry, deactivedSerialNumList)
// MagicBytes can not lay in or overlap with these 64b integers
stop := len(b) - len(deactivedSerialNumList)*8 - 3*8
magicBytesPosList := getAllPos(b[start:stop], MagicBytes[:])
if len(magicBytesPosList) == 0 {
//!! if dbg {
//!! fmt.Printf("here-length %d %d\n", length, length-4-len(deactivedSerialNumList)*8)
//!! }
PutUint24(b[1:4], uint32(length-4-len(deactivedSerialNumList)*8))
binary.LittleEndian.PutUint32(b[4:8], ^uint32(0))
return b
}
// if magicBytesPosList is not empty:
var zeroBuf [8]byte
for _, pos := range magicBytesPosList {
copy(b[start+pos:start+pos+8], zeroBuf[:]) // over-write the occurrence of magic bytes with zeros
}
length += 4 * len(magicBytesPosList)
buf := make([]byte, length)
bytesAdded := 4 * len(magicBytesPosList)
var i int
for i = 0; i < len(magicBytesPosList); i++ {
pos := magicBytesPosList[i] + bytesAdded /*32b-length*/
binary.LittleEndian.PutUint32(buf[i*4+4:i*4+8], uint32(pos))
}
binary.LittleEndian.PutUint32(buf[i*4+4:i*4+8], ^uint32(0))
copy(buf[i*4+8:], b[8:])
// Re-write the new length. minus 4 because the first 4 bytes of length isn't included
buf[0] = byte(len(deactivedSerialNumList))
PutUint24(buf[1:4], uint32(length-4-len(deactivedSerialNumList)*8))
//!! if dbg {
//!! fmt.Printf("there-length %d %d\n", length, length-4-len(deactivedSerialNumList)*8)
//!! }
return buf
}
func writeEntryPayload(b []byte, entry Entry, deactivedSerialNumList []int64) {
i := 0
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.Key)))
i += 4
copy(b[i:], entry.Key)
i += len(entry.Key)
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.Value)))
i += 4
copy(b[i:], entry.Value)
i += len(entry.Value)
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.NextKey)))
i += 4
copy(b[i:], entry.NextKey)
i += len(entry.NextKey)
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.Height))
i += 8
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.LastHeight))
i += 8
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.SerialNum))
i += 8
for _, sn := range deactivedSerialNumList {
binary.LittleEndian.PutUint64(b[i:i+8], uint64(sn))
i += 8
}
}
func getAllPos(s, sep []byte) (allpos []int) {
for start, pos := 0, 0; start + len(sep) < len(s); start += pos + len(sep) {
pos = bytes.Index(s[start:], sep)
if pos == -1 {
return
}
allpos = append(allpos, pos+start)
}
return
}
func EntryFromBytes(b []byte, numberOfSN int) (*Entry, []int64) {
entry := &Entry{}
i := 0
length := int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.Key = b[i:i+length]
i += length
length = int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.Value = b[i:i+length]
i += length
length = int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.NextKey = b[i:i+length]
i += length
entry.Height = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
entry.LastHeight = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
entry.SerialNum = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
if numberOfSN == 0 {
return entry, nil
}
deactivedSerialNumList := make([]int64, numberOfSN)
for j := range deactivedSerialNumList {
deactivedSerialNumList[j] = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
}
return entry, deactivedSerialNumList
}
type EntryFile struct {
HPFile
}
func getPaddingSize(length int) int {
rem := length % 8
if rem == 0 {
return 0
} else {
return 8 - rem
}
}
func (ef *EntryFile) readMagicBytesAndLength(off int64, withBuf bool) (length int64, numberOfSN int) {
var buf [12]byte
err := ef.HPFile.ReadAt(buf[:], off, withBuf)
if err != nil {
panic(err)
}
if !bytes.Equal(buf[:8], MagicBytes[:]) {
fmt.Printf("Now off %d %x\n", off, off)
panic("Invalid MagicBytes")
}
length = int64(GetUint24(buf[9:12]))
if int(length) >= MaxEntryBytes {
panic("Entry to long")
}
return length, int(buf[8])
}
func getNextPos(off, length int64) int64 {
length += 8 /*magicbytes*/ + 4 /*length*/
paddingSize := getPaddingSize(int(length))
paddedLen := length + int64(paddingSize)
nextPos := off + paddedLen
//fmt.Printf("off %d length %d paddingSize %d paddedLen %d nextPos %d\n", off, length, paddingSize, paddedLen, nextPos)
return nextPos
}
func (ef *EntryFile) ReadEntryAndSNList(off int64) (entry *Entry, deactivedSerialNumList []int64, nextPos int64) {
entryBz, numberOfSN, nextPos := ef.readEntry(off, true, false, true)
entry, deactivedSerialNumList = EntryFromBytes(entryBz, numberOfSN)
return
}
func (ef *EntryFile) ReadEntry(off int64) (entry *Entry, nextPos int64) {
entryBz, numberOfSN, nextPos := ef.readEntry(off, false, false, false)
entry, _ = EntryFromBytes(entryBz, numberOfSN)
return
}
func (ef *EntryFile) ReadEntryRawBytes(off int64) (entryBz []byte, nextPos int64) {
entryBz, _, nextPos = ef.readEntry(off, false, true, true)
return
}
func | (b []byte) (n int) {
for n = 0; n + 4 < len(b); n += 4 { // recover magic bytes in payload
pos := binary.LittleEndian.Uint32(b[n : n+4])
if pos == ^(uint32(0)) {
n += 4
break
}
if int(pos) >= MaxEntryBytes {
panic("Position to large")
}
copy(b[int(pos)+4:int(pos)+12], MagicBytes[:])
}
return
}
func (ef *EntryFile) readEntry(off int64, withSNList, useRaw, withBuf bool) (entrybz []byte, numberOfSN int, nextPos int64) {
length, numberOfSN := ef.readMagicBytesAndLength(off, withBuf)
nextPos = getNextPos(off, int64(length)+8*int64(numberOfSN))
if withSNList {
length += 8 * int64(numberOfSN) // ignore snlist
} else {
numberOfSN = 0
}
b := make([]byte, 12+int(length)) // include 12 (magicbytes and length)
err := ef.HPFile.ReadAt(b, off, withBuf)
origB := b
b = b[12:] // ignore magicbytes and length
if err != nil {
panic(err)
}
if useRaw {
return origB[8:], numberOfSN, nextPos
}
n := recoverMagicBytes(b)
return b[n:length], numberOfSN, nextPos
}
func NewEntryFile(bufferSize, blockSize int, dirName string) (res EntryFile, err error) {
res.HPFile, err = NewHPFile(bufferSize, blockSize, dirName)
res.HPFile.InitPreReader()
return
}
func (ef *EntryFile) Size() int64 {
return ef.HPFile.Size()
}
func (ef *EntryFile) Truncate(size int64) {
err := ef.HPFile.Truncate(size)
if err != nil {
panic(err)
}
}
func (ef *EntryFile) Flush() {
ef.HPFile.Flush()
}
func (ef *EntryFile) FlushAsync() {
ef.HPFile.FlushAsync()
}
func (ef *EntryFile) Close() {
err := ef.HPFile.Close()
if err != nil {
panic(err)
}
}
func (ef *EntryFile) PruneHead(off int64) {
err := ef.HPFile.PruneHead(off)
if err != nil {
panic(err)
}
}
func (ef *EntryFile) Append(b [2][]byte) (pos int64) {
//!! if b[0][1] == 0 && b[0][2] == 0 && b[0][3] == 0 {
//!! fmt.Printf("%#v\n", b)
//!! panic("here in Append")
//!! }
var bb [4][]byte
bb[0] = MagicBytes[:]
bb[1] = b[0]
bb[2] = b[1]
paddingSize := getPaddingSize(len(b[0])+len(b[1]))
bb[3] = make([]byte, paddingSize) // padding zero bytes
pos, err := ef.HPFile.Append(bb[:])
//!! if pos > 108996000 {
//!! dbg = true
//!! fmt.Printf("Append pos %d %#v len(bb[1]) %d padding %d\n", pos, bb[:], len(bb[1]), paddingSize)
//!! }
if pos%8 != 0 {
panic("Entries are not aligned")
}
if err != nil {
panic(err)
}
//fmt.Printf("Now Append At: %d len: %d\n", pos, len(b))
return
}
func (ef *EntryFile) GetActiveEntriesInTwig(twig *Twig) chan []byte {
res := make(chan []byte, 100)
go func() {
start := twig.FirstEntryPos
for i := 0; i < LeafCountInTwig; i++ {
if twig.getBit(i) {
entryBz, next := ef.ReadEntryRawBytes(start)
//!! fmt.Printf("Why start %d entryBz %#v\n", start, entryBz)
start = next
res <- entryBz
} else { // skip an inactive entry
length, numberOfSN := ef.readMagicBytesAndLength(start, true)
start = getNextPos(start, length+8*int64(numberOfSN))
}
}
close(res)
}()
return res
}
//!! func (ef *EntryFile) GetActiveEntriesInTwigOld(twig *Twig) chan *Entry {
//!! res := make(chan *Entry, 100)
//!! go func() {
//!! start := twig.FirstEntryPos
//!! for i := 0; i < LeafCountInTwig; i++ {
//!! if twig.getBit(i) {
//!! entry, next := ef.ReadEntry(start)
//!! start = next
//!! res <- entry
//!! } else { // skip an inactive entry
//!! length, numberOfSN := ef.readMagicBytesAndLength(start, true)
//!! start = getNextPos(start, length+8*int64(numberOfSN))
//!! }
//!! }
//!! close(res)
//!! }()
//!! return res
//!! }
| recoverMagicBytes | identifier_name |
entryfile.go | package datatree
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/coinexchain/onvakv/types"
)
type Entry = types.Entry
const MaxEntryBytes int = (1 << 24) - 1
var MagicBytes = [8]byte{255, 254, 253, 252, 252, 253, 254, 255}
var dbg bool
func DummyEntry(sn int64) *Entry {
return &Entry{
Key: []byte("dummy"),
Value: []byte("dummy"),
NextKey: []byte("dummy"),
Height: -2,
LastHeight: -2,
SerialNum: sn,
}
}
func NullEntry() Entry {
return Entry{
Key: []byte{},
Value: []byte{},
NextKey: []byte{},
Height: -1,
LastHeight: -1,
SerialNum: -1,
}
}
// Entry serialization format:
// magicBytes 8-bytes
// 8b snList length
// 24b-totalLength (this length does not include padding, snList and this field itself)
// magicBytesPos(list of 32b-int, -1 for ending), posistions are relative to the end of 32b-totalLength
// normalPayload
// DeactivedSerialNumList (list of 64b-int)
// padding-zero-bytes
const (
MSB32 = uint32(1<<31)
)
func PutUint24(b []byte, n uint32) {
//!! if n == 0 {
//!! panic("here PutUint24")
//!! }
b[0] = byte(n)
b[1] = byte(n>>8)
b[2] = byte(n>>16)
}
func GetUint24(b []byte) (n uint32) {
n = uint32(b[0])
n |= uint32(b[1]) << 8
n |= uint32(b[2]) << 16
//!! if n == 0 {
//!! panic("here GetUint24")
//!! }
return
}
//!! func SkipPosList(bz []byte) []byte {
//!! for i := 0; i + 4 < len(bz); i+=4 {
//!! if (bz[i]&bz[i+1]&bz[i+2]&bz[i+3]) == 0xFF {
//!! return bz[i+4:]
//!! }
//!! }
//!! return nil
//!! }
func ExtractKeyFromRawBytes(b []byte) []byte {
bb := b[4:]
if (bb[0]&bb[1]&bb[2]&bb[3]) == 0xFF { // No MagicBytes to recover
length := int(binary.LittleEndian.Uint32(bb[4:8]))
return append([]byte{}, bb[8:8+length]...)
}
bb = append([]byte{}, b[4:]...)
n := recoverMagicBytes(bb)
bb = bb[n:]
length := int(binary.LittleEndian.Uint32(bb[:4]))
return append([]byte{}, bb[4:4+length]...)
}
func EntryFromRawBytes(b []byte) *Entry {
bb := b[4:]
if (bb[0]&bb[1]&bb[2]&bb[3]) == 0xFF { // No MagicBytes to recover
e, _ := EntryFromBytes(bb[4:], 0)
return e
}
bb = append([]byte{}, b[4:]...)
n := recoverMagicBytes(bb)
e, _ := EntryFromBytes(bb[n+4:], 0)
return e
}
func ExtractSerialNum(entryBz []byte) int64 {
return int64(binary.LittleEndian.Uint64(entryBz[len(entryBz)-8:]))
}
func UpdateSerialNum(entryBz []byte, sn int64) {
binary.LittleEndian.PutUint64(entryBz[len(entryBz)-8:], uint64(sn))
}
func SNListToBytes(deactivedSerialNumList []int64) []byte {
res := make([]byte, len(deactivedSerialNumList) * 8)
i := 0
for _, sn := range deactivedSerialNumList {
binary.LittleEndian.PutUint64(res[i:i+8], uint64(sn))
i += 8
}
return res
}
func EntryToBytes(entry Entry, deactivedSerialNumList []int64) []byte {
length := 4 + 4 // 32b-length and empty magicBytesPos
length += 4*3 + len(entry.Key) + len(entry.Value) + len(entry.NextKey) // Three strings
length += 8 * 3 // Three int64
length += len(deactivedSerialNumList) * 8
b := make([]byte, length)
b[0] = byte(len(deactivedSerialNumList))
const start = 8
writeEntryPayload(b[start:], entry, deactivedSerialNumList)
// MagicBytes can not lay in or overlap with these 64b integers
stop := len(b) - len(deactivedSerialNumList)*8 - 3*8
magicBytesPosList := getAllPos(b[start:stop], MagicBytes[:])
if len(magicBytesPosList) == 0 {
//!! if dbg {
//!! fmt.Printf("here-length %d %d\n", length, length-4-len(deactivedSerialNumList)*8)
//!! }
PutUint24(b[1:4], uint32(length-4-len(deactivedSerialNumList)*8))
binary.LittleEndian.PutUint32(b[4:8], ^uint32(0))
return b
}
// if magicBytesPosList is not empty:
var zeroBuf [8]byte
for _, pos := range magicBytesPosList {
copy(b[start+pos:start+pos+8], zeroBuf[:]) // over-write the occurrence of magic bytes with zeros
}
length += 4 * len(magicBytesPosList)
buf := make([]byte, length)
bytesAdded := 4 * len(magicBytesPosList)
var i int
for i = 0; i < len(magicBytesPosList); i++ {
pos := magicBytesPosList[i] + bytesAdded /*32b-length*/
binary.LittleEndian.PutUint32(buf[i*4+4:i*4+8], uint32(pos))
}
binary.LittleEndian.PutUint32(buf[i*4+4:i*4+8], ^uint32(0))
copy(buf[i*4+8:], b[8:])
// Re-write the new length. minus 4 because the first 4 bytes of length isn't included
buf[0] = byte(len(deactivedSerialNumList))
PutUint24(buf[1:4], uint32(length-4-len(deactivedSerialNumList)*8))
//!! if dbg {
//!! fmt.Printf("there-length %d %d\n", length, length-4-len(deactivedSerialNumList)*8)
//!! }
return buf
}
func writeEntryPayload(b []byte, entry Entry, deactivedSerialNumList []int64) {
i := 0
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.Key)))
i += 4
copy(b[i:], entry.Key)
i += len(entry.Key)
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.Value)))
i += 4
copy(b[i:], entry.Value)
i += len(entry.Value)
binary.LittleEndian.PutUint32(b[i:i+4], uint32(len(entry.NextKey)))
i += 4
copy(b[i:], entry.NextKey)
i += len(entry.NextKey)
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.Height))
i += 8
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.LastHeight))
i += 8
binary.LittleEndian.PutUint64(b[i:i+8], uint64(entry.SerialNum))
i += 8
for _, sn := range deactivedSerialNumList {
binary.LittleEndian.PutUint64(b[i:i+8], uint64(sn))
i += 8
}
}
func getAllPos(s, sep []byte) (allpos []int) {
for start, pos := 0, 0; start + len(sep) < len(s); start += pos + len(sep) {
pos = bytes.Index(s[start:], sep)
if pos == -1 {
return
}
allpos = append(allpos, pos+start)
}
return
}
func EntryFromBytes(b []byte, numberOfSN int) (*Entry, []int64) {
entry := &Entry{}
i := 0
length := int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.Key = b[i:i+length]
i += length
length = int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.Value = b[i:i+length]
i += length
length = int(binary.LittleEndian.Uint32(b[i : i+4]))
i += 4
entry.NextKey = b[i:i+length]
i += length
entry.Height = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
entry.LastHeight = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
entry.SerialNum = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
if numberOfSN == 0 {
return entry, nil
}
deactivedSerialNumList := make([]int64, numberOfSN)
for j := range deactivedSerialNumList {
deactivedSerialNumList[j] = int64(binary.LittleEndian.Uint64(b[i : i+8]))
i += 8
}
return entry, deactivedSerialNumList
}
type EntryFile struct {
HPFile
}
func getPaddingSize(length int) int {
rem := length % 8
if rem == 0 {
return 0
} else {
return 8 - rem
}
}
func (ef *EntryFile) readMagicBytesAndLength(off int64, withBuf bool) (length int64, numberOfSN int) {
var buf [12]byte
err := ef.HPFile.ReadAt(buf[:], off, withBuf)
if err != nil {
panic(err)
}
if !bytes.Equal(buf[:8], MagicBytes[:]) {
fmt.Printf("Now off %d %x\n", off, off)
panic("Invalid MagicBytes")
}
length = int64(GetUint24(buf[9:12]))
if int(length) >= MaxEntryBytes {
panic("Entry to long")
}
return length, int(buf[8])
}
func getNextPos(off, length int64) int64 {
length += 8 /*magicbytes*/ + 4 /*length*/
paddingSize := getPaddingSize(int(length))
paddedLen := length + int64(paddingSize)
nextPos := off + paddedLen
//fmt.Printf("off %d length %d paddingSize %d paddedLen %d nextPos %d\n", off, length, paddingSize, paddedLen, nextPos)
return nextPos
}
func (ef *EntryFile) ReadEntryAndSNList(off int64) (entry *Entry, deactivedSerialNumList []int64, nextPos int64) {
entryBz, numberOfSN, nextPos := ef.readEntry(off, true, false, true)
entry, deactivedSerialNumList = EntryFromBytes(entryBz, numberOfSN)
return
}
func (ef *EntryFile) ReadEntry(off int64) (entry *Entry, nextPos int64) {
entryBz, numberOfSN, nextPos := ef.readEntry(off, false, false, false)
entry, _ = EntryFromBytes(entryBz, numberOfSN)
return
}
func (ef *EntryFile) ReadEntryRawBytes(off int64) (entryBz []byte, nextPos int64) {
entryBz, _, nextPos = ef.readEntry(off, false, true, true)
return
}
func recoverMagicBytes(b []byte) (n int) {
for n = 0; n + 4 < len(b); n += 4 { // recover magic bytes in payload
pos := binary.LittleEndian.Uint32(b[n : n+4])
if pos == ^(uint32(0)) {
n += 4
break
}
if int(pos) >= MaxEntryBytes {
panic("Position to large")
}
copy(b[int(pos)+4:int(pos)+12], MagicBytes[:])
}
return
}
func (ef *EntryFile) readEntry(off int64, withSNList, useRaw, withBuf bool) (entrybz []byte, numberOfSN int, nextPos int64) {
length, numberOfSN := ef.readMagicBytesAndLength(off, withBuf)
nextPos = getNextPos(off, int64(length)+8*int64(numberOfSN))
if withSNList {
length += 8 * int64(numberOfSN) // ignore snlist
} else {
numberOfSN = 0
}
b := make([]byte, 12+int(length)) // include 12 (magicbytes and length)
err := ef.HPFile.ReadAt(b, off, withBuf)
origB := b
b = b[12:] // ignore magicbytes and length
if err != nil {
panic(err)
}
if useRaw {
return origB[8:], numberOfSN, nextPos
}
n := recoverMagicBytes(b)
return b[n:length], numberOfSN, nextPos
}
func NewEntryFile(bufferSize, blockSize int, dirName string) (res EntryFile, err error) {
res.HPFile, err = NewHPFile(bufferSize, blockSize, dirName)
res.HPFile.InitPreReader()
return
}
func (ef *EntryFile) Size() int64 {
return ef.HPFile.Size()
}
func (ef *EntryFile) Truncate(size int64) {
err := ef.HPFile.Truncate(size)
if err != nil {
panic(err)
}
}
func (ef *EntryFile) Flush() {
ef.HPFile.Flush()
}
func (ef *EntryFile) FlushAsync() {
ef.HPFile.FlushAsync() | if err != nil {
panic(err)
}
}
func (ef *EntryFile) PruneHead(off int64) {
err := ef.HPFile.PruneHead(off)
if err != nil {
panic(err)
}
}
func (ef *EntryFile) Append(b [2][]byte) (pos int64) {
//!! if b[0][1] == 0 && b[0][2] == 0 && b[0][3] == 0 {
//!! fmt.Printf("%#v\n", b)
//!! panic("here in Append")
//!! }
var bb [4][]byte
bb[0] = MagicBytes[:]
bb[1] = b[0]
bb[2] = b[1]
paddingSize := getPaddingSize(len(b[0])+len(b[1]))
bb[3] = make([]byte, paddingSize) // padding zero bytes
pos, err := ef.HPFile.Append(bb[:])
//!! if pos > 108996000 {
//!! dbg = true
//!! fmt.Printf("Append pos %d %#v len(bb[1]) %d padding %d\n", pos, bb[:], len(bb[1]), paddingSize)
//!! }
if pos%8 != 0 {
panic("Entries are not aligned")
}
if err != nil {
panic(err)
}
//fmt.Printf("Now Append At: %d len: %d\n", pos, len(b))
return
}
func (ef *EntryFile) GetActiveEntriesInTwig(twig *Twig) chan []byte {
res := make(chan []byte, 100)
go func() {
start := twig.FirstEntryPos
for i := 0; i < LeafCountInTwig; i++ {
if twig.getBit(i) {
entryBz, next := ef.ReadEntryRawBytes(start)
//!! fmt.Printf("Why start %d entryBz %#v\n", start, entryBz)
start = next
res <- entryBz
} else { // skip an inactive entry
length, numberOfSN := ef.readMagicBytesAndLength(start, true)
start = getNextPos(start, length+8*int64(numberOfSN))
}
}
close(res)
}()
return res
}
//!! func (ef *EntryFile) GetActiveEntriesInTwigOld(twig *Twig) chan *Entry {
//!! res := make(chan *Entry, 100)
//!! go func() {
//!! start := twig.FirstEntryPos
//!! for i := 0; i < LeafCountInTwig; i++ {
//!! if twig.getBit(i) {
//!! entry, next := ef.ReadEntry(start)
//!! start = next
//!! res <- entry
//!! } else { // skip an inactive entry
//!! length, numberOfSN := ef.readMagicBytesAndLength(start, true)
//!! start = getNextPos(start, length+8*int64(numberOfSN))
//!! }
//!! }
//!! close(res)
//!! }()
//!! return res
//!! } | }
func (ef *EntryFile) Close() {
err := ef.HPFile.Close() | random_line_split |
app_treasure_huanliang.py | from selenium.webdriver.common.by import By
import selenium.webdriver.support.expected_conditions as EC
from requests.utils import quote
from platform_crawler.utils.post_get import post, get
from platform_crawler.utils.utils import Util
from platform_crawler.spiders.pylib.login_qq import LoginQQ
from platform_crawler.spiders.pylib.cut_img import cut_img
from platform_crawler.spiders.pylib.task_process import TaskProcess
from platform_crawler.settings import join, JS_PATH
import time
import json
import os
set_start_time = """
(function(st, et){
if(jQuery('#daterange') &&
jQuery('#daterange').data('daterangepicker') &&
('setStartDate' in jQuery('#daterange').data('daterangepicker'))
) {
jQuery('#daterange').data('daterangepicker').setStartDate(st);
jQuery('#daterange').data('daterangepicker').setEndDate(et);
document.querySelector('.applyBtn').click();
} else {
let settime = Date.now();
localStorage.setItem('list_sdate', '{"data":"'+st+'","_time":'+settime+',"_expire":31308148}');
localStorage.setItem('list_edate', '{"data":"'+et+'","_time":'+settime+',"_expire":31308148}');
}
})('%s', '%s');"""
u = Util()
logger = None
page_version = 'old'
base_header = {
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
'Content-Type': "application/x-www-form-urlencoded",
'cookie': None,
'origin': "https://e.qq.com",
'referer': None,
'Cache-Control': "no-cache",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
}
class AppTreasureHL(TaskProcess):
def __init__(self, user_info, **kwargs):
global logger
self.dates = None
self.cookies_str = None
self.gtk = None
self.uid = None
self.init__post_param()
self.login_obj = None
super().__init__(headers=base_header, user_info=user_info, **kwargs)
logger = self.logger
def init__post_param(self):
self.params = {
"mod": "report", "act": "productdetail", "g_tk": None
}
self.pdata = {
"page": "1", "pagesize": "50", "sdate": None, "edate": None, "product_type": "20",
"product_id": None, "time_rpt": "0", "owner": None
}
def get_product(self, sd, ed):
url = 'https://e.qq.com/ec/api.php'
params = {'mod': 'report', 'act': 'getproduct', 'g_tk': str(self.gtk), 'sdate': sd, 'edate': ed, 'searchtype': 'product', 'product_type': '20'}
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
'referer': 'https://e.qq.com/atlas/%s/report/producttype' % self.uid,
'cookie': self.cookies_str,
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
res = get(url, params=params, headers=headers)
if not res.get('is_success'):
logger.error(res.get('msg').json())
data = res.get('msg').json()
total_num = data.get('data').get('conf').get('totalnum')
if total_num == 0:
return {'succ': False, 'msg': 'no data'}
data_list = data.get('data').get('list')
data = [{'pname': e.get('pname'), 'pid': e.get('product_id'), 'cost': e.get('cost')} for e in data_list]
return {'succ': True, 'msg': data}
def get_img(self, p_list, sd, ed):
"""截图,并处理图片文件"""
with open(join(JS_PATH, 'e_qq_pagenum.js'), 'r') as p:
pjs = p.read()
for e in p_list:
if not e.get('has_data'):
continue
picname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.png' % {
'productId': e.get('pid'), 'productName': e.get('pname'), 'sd': sd, 'ed': ed
}
url = 'https://e.qq.com/atlas/%s/report/order?ptype=20&pid=%s&pname=%s' % (self.uid, e.get('pid'), quote(e.get('pname')))
self.d.get(url)
time.sleep(0.5)
if page_version == 'new': # 版本判断
try:
self.wait_element(By.CLASS_NAME, 'button-more').click()
except:
self.d.execute_script("document.querySelector('.button-more').click()")
else:
self.wait_element(By.LINK_TEXT, '查看报表', ec=EC.presence_of_element_located).click()
time.sleep(2)
# if page_version != 'new':
# u.pag.hotkey('ctrl', '-', interval=0.3)
# 调整分页数量
self.d.execute_script(pjs)
time.sleep(1.5)
self.d.switch_to.frame(self.d.find_element_by_css_selector('.splitview-tabs-body iframe'))
# 获取高度
get_height = 'return a=document.querySelector("#content").offsetHeight'
height = self.d.execute_script(get_height)
# 截图
cut_res = cut_img(height, self.dir_path, picname)
if not cut_res['succ']:
logger.error('get img %s_%s failed-------msg: %s' % (e['pid'], e['pname'], cut_res['msg']))
logger.info('height: %s ---picname: %s' % (height, picname))
# 恢复
# u.pag.hotkey('ctrl', '0', interval=0.3)
else:
return {'succ': True}
def get_data_process(self, dates):
# 获取上个月到现在每天的数据
err_list, res, data_list, has_data_in_two_mth = [], None, [], []
for sd, ed in dates:
p_res = self.get_product(sd, ed)
if not p_res.get('succ') and p_res.get('msg') == 'no data':
continue
p_list = p_res.get('msg')
for p in p_list:
if page_version == 'new':
res = self.get_data_another_version(p, sd, ed)
else:
res = self.get_data(p, sd, ed)
if res.get('succ'):
time.sleep(0.2)
p.update({'has_data': True})
has_data_in_two_mth.append(1)
continue
elif not res['succ'] and res.get('msg') == 'no data':
p.update({'has_data': False})
else:
err_list.append(p)
else:
data_list.append({'data': p_list, 'date': [sd, ed]})
if not has_data_in_two_mth:
self.result_kwargs['has_data'] = 0
return data_list
def get_version(self):
# 判断界面版本
global page_version
self.d.get('https://e.qq.com/atlas/%s/report/producttype' % self.uid)
# if u.wait_element(self.d, (By.CLASS_NAME, 'datechoose'), 10):
try:
self.d.find_element_by_xpath('//div[@class="datechoose l"]')
except:
page_version = 'new'
time.sleep(1)
def get_data_another_version(self, data, sd, ed):
logger.info('get into (self.get_data_another_version)function')
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data.get('pid'), 'productName': data.get('pname'), 'sd': sd, 'ed': ed
}
url = "https://e.qq.com/ec/api.php"
params = {"g_tk": str(self.gtk), "product_id": data.get('pid'), "product_type": '20', "sdate": sd, "edate": ed}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/analytic2?product_id=%(pid)s&product_type=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self._headers.update(headers)
data = get(url, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def get_data(self, data, sd, ed):
logger.info('get into (self.get_data_common_version)function')
url = "https://e.qq.com/ec/api.php"
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data['pid'], 'productName': data['pname'], 'sd': sd, 'ed': ed
}
params = {"g_tk": str(self.gtk)}
pdata = {
"sdate": sd, "edate": ed, "product_type": '20',
"product_id": data.get('pid'), "owner": self.uid
}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/order_old?pid=%(pid)s&ptype=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self.pdata.update(pdata)
self._headers.update(headers)
data = post(url, data=self.pdata, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def parse_balance(self, *args, **kwargs):
# parse
res = self.login_obj.get_balance(self.uid)
if not res.get('succ'):
return res
unknown_account_name_type = {}
balance_data = {'现金账户': 0, '虚拟账户': 0, '信用账户': 0, '换量账户': 0}
accounts = res.get('msg')
keys = balance_data.keys()
for i in accounts:
account_name = i.get('account_name')
if account_name in keys:
balance_data[account_name] = round(i.get('balance')/100, 2)
else:
# unknown_account_name_type[account_name] = round(i.get('balance')/100, 2)
continue
header = ['账号', '现金账户', '虚拟账户', '信用账户', '换量账户', '总计']
balance_data['总计'] = sum(balance_data.values())
balance_data['账号'] = self.acc
if unknown_account_name_type:
header.extend(unknown_account_name_type.keys())
balance_data.update(unknown_account_name_type)
return header, [balance_data]
def login_part(self, ui):
# 登陆
self.login_obj = LoginQQ(ui, ui.get('platform'))
return self.login_obj.run_login()
def deal_login_result(self, login_res):
if not login_res['succ']:
return login_res
if login_res.get('msg') == 'unknown situation':
logger.warning('got unknown login situation: %s' % login_res.get('desc'))
self.result_kwargs['has_data'] = 0
return {'succ': True, 'msg': 'pass'}
# 获取登录后浏览器驱动和数据
self.d = login_res.pop('driver')
self.cookies_str = self.login_obj.cookies.get('cookie_str')
self.gtk = self.login_obj.gtk
self.uid = login_res.get('data').get('uid')
self.get_version()
def get_data_part(self, ui, **kwargs):
# 获取时间
self.dates = ui.get('dates')
ys, ms, ye, me = self.dates if self.dates else (None, None, None, None)
mths, dates = u.make_dates(ys=ys, | , me=me)
# 获取数据
return self.get_data_process(dates)
def get_img_part(self, get_data_res=None, **kwargs):
# 截图
for e in get_data_res:
sd, ed = e.get('date')
self.d.execute_script(set_start_time % (sd, ed)) # 更新日期
self.d.refresh()
self.get_img(e.get('data'), sd, ed)
if not get_data_res:
self.result_kwargs['has_data'] = 0
return {'succ': True}
| ms=ms, ye=ye | identifier_name |
app_treasure_huanliang.py | from selenium.webdriver.common.by import By
import selenium.webdriver.support.expected_conditions as EC
from requests.utils import quote
from platform_crawler.utils.post_get import post, get
from platform_crawler.utils.utils import Util
from platform_crawler.spiders.pylib.login_qq import LoginQQ
from platform_crawler.spiders.pylib.cut_img import cut_img
from platform_crawler.spiders.pylib.task_process import TaskProcess
from platform_crawler.settings import join, JS_PATH
import time
import json
import os
set_start_time = """
(function(st, et){
if(jQuery('#daterange') &&
jQuery('#daterange').data('daterangepicker') &&
('setStartDate' in jQuery('#daterange').data('daterangepicker'))
) {
jQuery('#daterange').data('daterangepicker').setStartDate(st);
jQuery('#daterange').data('daterangepicker').setEndDate(et);
document.querySelector('.applyBtn').click();
} else {
let settime = Date.now();
localStorage.setItem('list_sdate', '{"data":"'+st+'","_time":'+settime+',"_expire":31308148}');
localStorage.setItem('list_edate', '{"data":"'+et+'","_time":'+settime+',"_expire":31308148}');
}
})('%s', '%s');"""
u = Util()
logger = None
page_version = 'old'
base_header = {
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
'Content-Type': "application/x-www-form-urlencoded",
'cookie': None,
'origin': "https://e.qq.com",
'referer': None,
'Cache-Control': "no-cache",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
}
class AppTreasureHL(TaskProcess):
def __init__(self, user_info, **kwargs):
global logger
self.dates = None
self.cookies_str = None
self.gtk = None
self.uid = None
self.init__post_param()
self.login_obj = None
super().__init__(headers=base_header, user_info=user_info, **kwargs)
logger = self.logger
def init__post_param(self):
self.params = {
"mod": "report", "act": "productdetail", "g_tk": None
}
self.pdata = {
"page": "1", "pagesize": "50", "sdate": None, "edate": None, "product_type": "20",
"product_id": None, "time_rpt": "0", "owner": None
}
def get_product(self, sd, ed):
url = 'https://e.qq.com/ec/api.php'
params = {'mod': 'report', 'act': 'getproduct', 'g_tk': str(self.gtk), 'sdate': sd, 'edate': ed, 'searchtype': 'product', 'product_type': '20'}
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
'referer': 'https://e.qq.com/atlas/%s/report/producttype' % self.uid,
'cookie': self.cookies_str,
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
res = get(url, params=params, headers=headers)
if not res.get('is_success'):
logger.error(res.get('msg').json())
data = res.get('msg').json()
total_num = data.get('data').get('conf').get('totalnum')
if total_num == 0:
return {'succ': False, 'msg': 'no data'}
data_list = data.get('data').get('list')
data = [{'pname': e.get('pname'), 'pid': e.get('product_id'), 'cost': e.get('cost')} for e in data_list]
return {'succ': True, 'msg': data}
def get_img(self, p_list, sd, ed):
"""截图,并处理图片文件"""
with open(join(JS_PATH, 'e_qq_pagenum.js'), 'r') as p:
pjs = p.read()
for e in p_list:
if not e.get('has_data'):
continue
picname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.png' % {
'productId': e.get('pid'), 'productName': e.get('pname'), 'sd': sd, 'ed': ed
}
url = 'https://e.qq.com/atlas/%s/report/order?ptype=20&pid=%s&pname=%s' % (self.uid, e.get('pid'), quote(e.get('pname')))
self.d.get(url)
time.sleep(0.5)
if page_version == 'new': # 版本判断
try:
self.wait_element(By.CLASS_NAME, 'button-more').click()
except:
self.d.execute_script("document.querySelector('.button-more').click()")
else:
self.wait_element(By.LINK_TEXT, '查看报表', ec=EC.presence_of_element_located).click()
time.sleep(2)
# if page_version != 'new':
# u.pag.hotkey('ctrl', '-', interval=0.3)
# 调整分页数量
self.d.execute_script(pjs)
time.sleep(1.5)
self.d.switch_to.frame(self.d.find_element_by_css_selector('.splitview-tabs-body iframe'))
# 获取高度
get_height = 'return a=document.querySelector("#content").offsetHeight'
height = self.d.execute_script(get_height)
# 截图
cut_res = cut_img(height, self.dir_path, picname)
if not cut_res['succ']:
logger.error('get img %s_%s failed-------msg: %s' % (e['pid' | ght, picname))
# 恢复
# u.pag.hotkey('ctrl', '0', interval=0.3)
else:
return {'succ': True}
def get_data_process(self, dates):
# 获取上个月到现在每天的数据
err_list, res, data_list, has_data_in_two_mth = [], None, [], []
for sd, ed in dates:
p_res = self.get_product(sd, ed)
if not p_res.get('succ') and p_res.get('msg') == 'no data':
continue
p_list = p_res.get('msg')
for p in p_list:
if page_version == 'new':
res = self.get_data_another_version(p, sd, ed)
else:
res = self.get_data(p, sd, ed)
if res.get('succ'):
time.sleep(0.2)
p.update({'has_data': True})
has_data_in_two_mth.append(1)
continue
elif not res['succ'] and res.get('msg') == 'no data':
p.update({'has_data': False})
else:
err_list.append(p)
else:
data_list.append({'data': p_list, 'date': [sd, ed]})
if not has_data_in_two_mth:
self.result_kwargs['has_data'] = 0
return data_list
def get_version(self):
# 判断界面版本
global page_version
self.d.get('https://e.qq.com/atlas/%s/report/producttype' % self.uid)
# if u.wait_element(self.d, (By.CLASS_NAME, 'datechoose'), 10):
try:
self.d.find_element_by_xpath('//div[@class="datechoose l"]')
except:
page_version = 'new'
time.sleep(1)
def get_data_another_version(self, data, sd, ed):
logger.info('get into (self.get_data_another_version)function')
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data.get('pid'), 'productName': data.get('pname'), 'sd': sd, 'ed': ed
}
url = "https://e.qq.com/ec/api.php"
params = {"g_tk": str(self.gtk), "product_id": data.get('pid'), "product_type": '20', "sdate": sd, "edate": ed}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/analytic2?product_id=%(pid)s&product_type=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self._headers.update(headers)
data = get(url, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def get_data(self, data, sd, ed):
logger.info('get into (self.get_data_common_version)function')
url = "https://e.qq.com/ec/api.php"
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data['pid'], 'productName': data['pname'], 'sd': sd, 'ed': ed
}
params = {"g_tk": str(self.gtk)}
pdata = {
"sdate": sd, "edate": ed, "product_type": '20',
"product_id": data.get('pid'), "owner": self.uid
}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/order_old?pid=%(pid)s&ptype=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self.pdata.update(pdata)
self._headers.update(headers)
data = post(url, data=self.pdata, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def parse_balance(self, *args, **kwargs):
# parse
res = self.login_obj.get_balance(self.uid)
if not res.get('succ'):
return res
unknown_account_name_type = {}
balance_data = {'现金账户': 0, '虚拟账户': 0, '信用账户': 0, '换量账户': 0}
accounts = res.get('msg')
keys = balance_data.keys()
for i in accounts:
account_name = i.get('account_name')
if account_name in keys:
balance_data[account_name] = round(i.get('balance')/100, 2)
else:
# unknown_account_name_type[account_name] = round(i.get('balance')/100, 2)
continue
header = ['账号', '现金账户', '虚拟账户', '信用账户', '换量账户', '总计']
balance_data['总计'] = sum(balance_data.values())
balance_data['账号'] = self.acc
if unknown_account_name_type:
header.extend(unknown_account_name_type.keys())
balance_data.update(unknown_account_name_type)
return header, [balance_data]
def login_part(self, ui):
# 登陆
self.login_obj = LoginQQ(ui, ui.get('platform'))
return self.login_obj.run_login()
def deal_login_result(self, login_res):
if not login_res['succ']:
return login_res
if login_res.get('msg') == 'unknown situation':
logger.warning('got unknown login situation: %s' % login_res.get('desc'))
self.result_kwargs['has_data'] = 0
return {'succ': True, 'msg': 'pass'}
# 获取登录后浏览器驱动和数据
self.d = login_res.pop('driver')
self.cookies_str = self.login_obj.cookies.get('cookie_str')
self.gtk = self.login_obj.gtk
self.uid = login_res.get('data').get('uid')
self.get_version()
def get_data_part(self, ui, **kwargs):
# 获取时间
self.dates = ui.get('dates')
ys, ms, ye, me = self.dates if self.dates else (None, None, None, None)
mths, dates = u.make_dates(ys=ys, ms=ms, ye=ye, me=me)
# 获取数据
return self.get_data_process(dates)
def get_img_part(self, get_data_res=None, **kwargs):
# 截图
for e in get_data_res:
sd, ed = e.get('date')
self.d.execute_script(set_start_time % (sd, ed)) # 更新日期
self.d.refresh()
self.get_img(e.get('data'), sd, ed)
if not get_data_res:
self.result_kwargs['has_data'] = 0
return {'succ': True}
| ], e['pname'], cut_res['msg']))
logger.info('height: %s ---picname: %s' % (hei | conditional_block |
app_treasure_huanliang.py | from selenium.webdriver.common.by import By
import selenium.webdriver.support.expected_conditions as EC
from requests.utils import quote
from platform_crawler.utils.post_get import post, get
from platform_crawler.utils.utils import Util
from platform_crawler.spiders.pylib.login_qq import LoginQQ
from platform_crawler.spiders.pylib.cut_img import cut_img
from platform_crawler.spiders.pylib.task_process import TaskProcess
from platform_crawler.settings import join, JS_PATH
import time
import json
import os
set_start_time = """
(function(st, et){
if(jQuery('#daterange') &&
jQuery('#daterange').data('daterangepicker') &&
('setStartDate' in jQuery('#daterange').data('daterangepicker'))
) {
jQuery('#daterange').data('daterangepicker').setStartDate(st);
jQuery('#daterange').data('daterangepicker').setEndDate(et);
document.querySelector('.applyBtn').click();
} else {
let settime = Date.now();
localStorage.setItem('list_sdate', '{"data":"'+st+'","_time":'+settime+',"_expire":31308148}');
localStorage.setItem('list_edate', '{"data":"'+et+'","_time":'+settime+',"_expire":31308148}');
}
})('%s', '%s');"""
u = Util()
| 'Content-Type': "application/x-www-form-urlencoded",
'cookie': None,
'origin': "https://e.qq.com",
'referer': None,
'Cache-Control': "no-cache",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
}
class AppTreasureHL(TaskProcess):
def __init__(self, user_info, **kwargs):
global logger
self.dates = None
self.cookies_str = None
self.gtk = None
self.uid = None
self.init__post_param()
self.login_obj = None
super().__init__(headers=base_header, user_info=user_info, **kwargs)
logger = self.logger
def init__post_param(self):
self.params = {
"mod": "report", "act": "productdetail", "g_tk": None
}
self.pdata = {
"page": "1", "pagesize": "50", "sdate": None, "edate": None, "product_type": "20",
"product_id": None, "time_rpt": "0", "owner": None
}
def get_product(self, sd, ed):
url = 'https://e.qq.com/ec/api.php'
params = {'mod': 'report', 'act': 'getproduct', 'g_tk': str(self.gtk), 'sdate': sd, 'edate': ed, 'searchtype': 'product', 'product_type': '20'}
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
'referer': 'https://e.qq.com/atlas/%s/report/producttype' % self.uid,
'cookie': self.cookies_str,
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
res = get(url, params=params, headers=headers)
if not res.get('is_success'):
logger.error(res.get('msg').json())
data = res.get('msg').json()
total_num = data.get('data').get('conf').get('totalnum')
if total_num == 0:
return {'succ': False, 'msg': 'no data'}
data_list = data.get('data').get('list')
data = [{'pname': e.get('pname'), 'pid': e.get('product_id'), 'cost': e.get('cost')} for e in data_list]
return {'succ': True, 'msg': data}
def get_img(self, p_list, sd, ed):
"""截图,并处理图片文件"""
with open(join(JS_PATH, 'e_qq_pagenum.js'), 'r') as p:
pjs = p.read()
for e in p_list:
if not e.get('has_data'):
continue
picname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.png' % {
'productId': e.get('pid'), 'productName': e.get('pname'), 'sd': sd, 'ed': ed
}
url = 'https://e.qq.com/atlas/%s/report/order?ptype=20&pid=%s&pname=%s' % (self.uid, e.get('pid'), quote(e.get('pname')))
self.d.get(url)
time.sleep(0.5)
if page_version == 'new': # 版本判断
try:
self.wait_element(By.CLASS_NAME, 'button-more').click()
except:
self.d.execute_script("document.querySelector('.button-more').click()")
else:
self.wait_element(By.LINK_TEXT, '查看报表', ec=EC.presence_of_element_located).click()
time.sleep(2)
# if page_version != 'new':
# u.pag.hotkey('ctrl', '-', interval=0.3)
# 调整分页数量
self.d.execute_script(pjs)
time.sleep(1.5)
self.d.switch_to.frame(self.d.find_element_by_css_selector('.splitview-tabs-body iframe'))
# 获取高度
get_height = 'return a=document.querySelector("#content").offsetHeight'
height = self.d.execute_script(get_height)
# 截图
cut_res = cut_img(height, self.dir_path, picname)
if not cut_res['succ']:
logger.error('get img %s_%s failed-------msg: %s' % (e['pid'], e['pname'], cut_res['msg']))
logger.info('height: %s ---picname: %s' % (height, picname))
# 恢复
# u.pag.hotkey('ctrl', '0', interval=0.3)
else:
return {'succ': True}
def get_data_process(self, dates):
# 获取上个月到现在每天的数据
err_list, res, data_list, has_data_in_two_mth = [], None, [], []
for sd, ed in dates:
p_res = self.get_product(sd, ed)
if not p_res.get('succ') and p_res.get('msg') == 'no data':
continue
p_list = p_res.get('msg')
for p in p_list:
if page_version == 'new':
res = self.get_data_another_version(p, sd, ed)
else:
res = self.get_data(p, sd, ed)
if res.get('succ'):
time.sleep(0.2)
p.update({'has_data': True})
has_data_in_two_mth.append(1)
continue
elif not res['succ'] and res.get('msg') == 'no data':
p.update({'has_data': False})
else:
err_list.append(p)
else:
data_list.append({'data': p_list, 'date': [sd, ed]})
if not has_data_in_two_mth:
self.result_kwargs['has_data'] = 0
return data_list
def get_version(self):
# 判断界面版本
global page_version
self.d.get('https://e.qq.com/atlas/%s/report/producttype' % self.uid)
# if u.wait_element(self.d, (By.CLASS_NAME, 'datechoose'), 10):
try:
self.d.find_element_by_xpath('//div[@class="datechoose l"]')
except:
page_version = 'new'
time.sleep(1)
def get_data_another_version(self, data, sd, ed):
logger.info('get into (self.get_data_another_version)function')
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data.get('pid'), 'productName': data.get('pname'), 'sd': sd, 'ed': ed
}
url = "https://e.qq.com/ec/api.php"
params = {"g_tk": str(self.gtk), "product_id": data.get('pid'), "product_type": '20', "sdate": sd, "edate": ed}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/analytic2?product_id=%(pid)s&product_type=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self._headers.update(headers)
data = get(url, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def get_data(self, data, sd, ed):
logger.info('get into (self.get_data_common_version)function')
url = "https://e.qq.com/ec/api.php"
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data['pid'], 'productName': data['pname'], 'sd': sd, 'ed': ed
}
params = {"g_tk": str(self.gtk)}
pdata = {
"sdate": sd, "edate": ed, "product_type": '20',
"product_id": data.get('pid'), "owner": self.uid
}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/order_old?pid=%(pid)s&ptype=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self.pdata.update(pdata)
self._headers.update(headers)
data = post(url, data=self.pdata, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def parse_balance(self, *args, **kwargs):
# parse
res = self.login_obj.get_balance(self.uid)
if not res.get('succ'):
return res
unknown_account_name_type = {}
balance_data = {'现金账户': 0, '虚拟账户': 0, '信用账户': 0, '换量账户': 0}
accounts = res.get('msg')
keys = balance_data.keys()
for i in accounts:
account_name = i.get('account_name')
if account_name in keys:
balance_data[account_name] = round(i.get('balance')/100, 2)
else:
# unknown_account_name_type[account_name] = round(i.get('balance')/100, 2)
continue
header = ['账号', '现金账户', '虚拟账户', '信用账户', '换量账户', '总计']
balance_data['总计'] = sum(balance_data.values())
balance_data['账号'] = self.acc
if unknown_account_name_type:
header.extend(unknown_account_name_type.keys())
balance_data.update(unknown_account_name_type)
return header, [balance_data]
def login_part(self, ui):
# 登陆
self.login_obj = LoginQQ(ui, ui.get('platform'))
return self.login_obj.run_login()
def deal_login_result(self, login_res):
if not login_res['succ']:
return login_res
if login_res.get('msg') == 'unknown situation':
logger.warning('got unknown login situation: %s' % login_res.get('desc'))
self.result_kwargs['has_data'] = 0
return {'succ': True, 'msg': 'pass'}
# 获取登录后浏览器驱动和数据
self.d = login_res.pop('driver')
self.cookies_str = self.login_obj.cookies.get('cookie_str')
self.gtk = self.login_obj.gtk
self.uid = login_res.get('data').get('uid')
self.get_version()
def get_data_part(self, ui, **kwargs):
# 获取时间
self.dates = ui.get('dates')
ys, ms, ye, me = self.dates if self.dates else (None, None, None, None)
mths, dates = u.make_dates(ys=ys, ms=ms, ye=ye, me=me)
# 获取数据
return self.get_data_process(dates)
def get_img_part(self, get_data_res=None, **kwargs):
# 截图
for e in get_data_res:
sd, ed = e.get('date')
self.d.execute_script(set_start_time % (sd, ed)) # 更新日期
self.d.refresh()
self.get_img(e.get('data'), sd, ed)
if not get_data_res:
self.result_kwargs['has_data'] = 0
return {'succ': True} | logger = None
page_version = 'old'
base_header = {
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
| random_line_split |
app_treasure_huanliang.py | from selenium.webdriver.common.by import By
import selenium.webdriver.support.expected_conditions as EC
from requests.utils import quote
from platform_crawler.utils.post_get import post, get
from platform_crawler.utils.utils import Util
from platform_crawler.spiders.pylib.login_qq import LoginQQ
from platform_crawler.spiders.pylib.cut_img import cut_img
from platform_crawler.spiders.pylib.task_process import TaskProcess
from platform_crawler.settings import join, JS_PATH
import time
import json
import os
set_start_time = """
(function(st, et){
if(jQuery('#daterange') &&
jQuery('#daterange').data('daterangepicker') &&
('setStartDate' in jQuery('#daterange').data('daterangepicker'))
) {
jQuery('#daterange').data('daterangepicker').setStartDate(st);
jQuery('#daterange').data('daterangepicker').setEndDate(et);
document.querySelector('.applyBtn').click();
} else {
let settime = Date.now();
localStorage.setItem('list_sdate', '{"data":"'+st+'","_time":'+settime+',"_expire":31308148}');
localStorage.setItem('list_edate', '{"data":"'+et+'","_time":'+settime+',"_expire":31308148}');
}
})('%s', '%s');"""
u = Util()
logger = None
page_version = 'old'
base_header = {
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
'Content-Type': "application/x-www-form-urlencoded",
'cookie': None,
'origin': "https://e.qq.com",
'referer': None,
'Cache-Control': "no-cache",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
}
class AppTreasureHL(TaskProcess):
def __init__(self, user_info, **kwargs):
global logger
self.dates = None
self.cookies_str = None
self.gtk = None
self.uid = None
self.init__post_param()
self.login_obj = None
super().__init__(headers=base_header, user_info=user_info, **kwargs)
logger = self.logger
def init__post_param(self):
self.params = {
"mod": "report", "act": "productdetail", "g_tk": None
}
self.pdata = {
"page": "1", "pagesize": "50", "sdate": None, "edate": None, "product_type": "20",
"product_id": None, "time_rpt": "0", "owner": None
}
def get_product(self, sd, ed):
url = 'https://e.qq.com/ec/api.php'
params = {'mod': 'report', 'act': 'getproduct', 'g_tk': str(self.gtk), 'sdate': sd, 'edate': ed, 'searchtype': 'product', 'product_type': '20'}
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
'referer': 'https://e.qq.com/atlas/%s/report/producttype' % self.uid,
'cookie': self.cookies_str,
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
res = get(url, params=params, headers=headers)
if not res.get('is_success'):
logger.error(res.get('msg').json())
data = res.get('msg').json()
total_num = data.get('data').get('conf').get('totalnum')
if total_num == 0:
return {'succ': False, 'msg': 'no data'}
data_list = data.get('data').get('list')
data = [{'pname': e.get('pname'), 'pid': e.get('product_id'), 'cost': e.get('cost')} for e in data_list]
return {'succ': True, 'msg': data}
def get_img(self, p_list, sd, ed):
"""截图,并处理图片文件"""
with open(join(JS_PATH, 'e_qq_pagenum.js'), 'r') as p:
pjs = p.read()
for e in p_list:
if not e.get('has_data'):
continue
picname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.png' % {
'productId': e.get('pid'), 'productName': e.get('pname'), 'sd': sd, 'ed': ed
}
url = 'https://e.qq.com/atlas/%s/report/order?ptype=20&pid=%s&pname=%s' % (self.uid, e.get('pid'), quote(e.get('pname')))
self.d.get(url)
time.sleep(0.5)
if page_version == 'new': # 版本判断
try:
self.wait_element(By.CLASS_NAME, 'button-more').click()
except:
self.d.execute_script("document.querySelector('.button-more').click()")
else:
self.wait_element(By.LINK_TEXT, '查看报表', ec=EC.presence_of_element_located).click()
time.sleep(2)
# if page_version != 'new':
# u.pag.hotkey('ctrl', '-', interval=0.3)
# 调整分页数量
self.d.execute_script(pjs)
time.sleep(1.5)
self.d.switch_to.frame(self.d.find_element_by_css_selector('.splitview-tabs-body iframe'))
# 获取高度
get_height = 'return a=document.querySelector("#content").offsetHeight'
height = self.d.execute_script(get_height)
# 截图
cut_res = cut_img(height, self.dir_path, picname)
if not cut_res['succ']:
logger.error('get img %s_%s failed-------msg: %s' % (e['pid'], e['pname'], cut_res['msg']))
logger.info('height: %s ---picname: %s' % (height, picname))
# 恢复
# u.pag.hotkey('ctrl', '0', interval=0.3)
else:
return {'succ': True}
def get_data_process(self, dates):
# 获取上个月到现在每天的数据
err_list, res, data_list, has_data_in_two_mth = [], None, [], []
for sd, ed in dates:
p_res = self.get_product(sd, ed)
if not p_res.get('succ') and p_res.get('msg') == 'no data':
continue
p_list = p_res.get('msg')
for p in p_list:
if page_version == 'new':
res = self.get_data_another_version(p, sd, ed)
else:
res = self.get_data(p, sd, ed)
if res.get('succ'):
time.sleep(0.2)
p.update({'has_data': True})
has_data_in_two_mth.append(1)
continue
elif not res['succ'] and res.get('msg') == 'no data':
p.update({'has_data': False})
else:
err_list.append(p)
else:
data_list.append({'data': p_list, 'date': [sd, ed]})
if not has_data_in_two_mth:
self.result_kwargs['has_data'] = 0
return data_list
def get_version(self):
# 判断界面版本
global page_version
self.d.get('https://e.qq.com/atlas/%s/report/producttype' % self.uid)
# if u.wait_element(self.d, (By.CLASS_NAME, 'datechoose'), 10):
try:
self.d.find_element_by_xpath('//div[@class="datechoose l"]')
except:
page_version = 'new'
time.sleep(1)
def get_data_another_version(self, data, sd, ed):
logger.info('get into (self.get_data_another_version)function')
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data.get('pid'), 'productName': data.get('pname'), 'sd': sd, 'ed': ed
}
url = "https://e.qq.com/ec/api.php"
params = {"g_tk": str(self.gtk), "product_id": data.get('pid'), "product_type": '20', "sdate": sd, "edate": ed}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/analytic2?product_id=%(pid)s&product_type=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self._headers.update(headers)
data = get(url, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def get_data(self, data, sd, ed):
logger.info('get into (self.get_data_common_version)function')
url = "https://e.qq.com/ec/api.php"
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data['pid'], 'productName': data['pname'], 'sd': sd, 'ed': ed
}
params = {"g_tk": str(self.gtk)}
pdata = {
"sdate": sd, "edate": ed, "product_type": '20',
"product_id": data.get('pid'), "owner": self.uid
}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/order_old?pid=%(pid)s&ptype=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self.pdata.update(pdata)
self._headers.update(headers)
data = post(url, data=self.pdata, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def parse_balance(self, *args, **kwargs):
# parse
res = self.login_obj.get_balance(self.uid)
if not res.get('succ'):
return res
unknown_account_name_type = {}
balance_data = {'现金账户': 0, '虚拟账户': 0, '信用账户': 0, '换量账户': 0}
accounts = res.get('msg')
keys = balance_data.keys()
for i in accounts:
account_name = i.get('account_name')
if account_name in keys:
balance_data[account_name] = round(i.get('balance')/100, 2)
else:
# unknown_account_name_type[account_name] = round(i.get('balance')/100, 2)
continue
header = ['账号', '现金账户', '虚拟账户', '信用账户', '换量账户', '总计']
balance_data['总计'] = sum(balance_data.values())
balance_data['账号'] = self.acc
if unknown_account_name_type:
header.extend(unknown_account_name_type.keys())
balance_data.update(unknown_account_name_type)
return header, [balance_data]
def login_part(self, ui):
# 登陆
self.login_obj = LoginQQ(ui, ui.get('platform'))
return self.login_obj.run_login()
def deal_login_result(self, login_res):
if not login_res['succ']:
return login_res
if login_res.get('msg') == 'unknown situation':
logger.warning('got unknown login situation: %s' % login_res.get('desc'))
self.result_kwargs['has_data'] = 0
return {'succ': True, 'msg': 'pass'}
# 获取登录后浏览器驱动和数据
self.d = login_res.pop('driver')
self.cookies_str = self.login_obj.cookies.get('cookie_str')
self.gtk = self.login_obj.gtk
self.uid = login_res.get('data').get('uid')
self.get_version()
def get_data_part(self, ui, **kwargs):
# 获取时间
self.dates = ui.get('dates')
ys, ms, ye, me = self.dates if self.dates else (None, None, None, None)
mths, dates = u.make_dates(ys=ys, ms=ms, ye=ye, me=me)
# 获取数据
return self.get_data_process(dates)
def get_img_part(self, get_data_res=None, **kwargs):
# 截图
for e in get_data_res:
sd, ed = e.get('date')
self.d.execute_script(set_start_time % (sd, ed)) # 更新日期
self.d.refresh()
self.get_img(e.get('data'), sd, ed)
if not | get_data_res:
self.result_kwargs['has_data'] = 0
return {'succ': True}
| identifier_body | |
meta.py | """
The metainterpreter and metabuiltins.
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
There are seven Forth registers: W, IP, PSP, RSP, X, UP, and TOS. They are
assigned to hardware registers as follows:
+---+--+
|IP |J |
|PSP|SP|
|RSP|Y |
|TOS|Z |
+---+--+
To start the metainterpreter, set RSP to point to a safe area of return stack,
put the address of QUIT into IP, and then call IP.
"""
from StringIO import StringIO
from struct import pack
from cauliflower.assembler import (A, ADD, AND, B, BOR, C, I, IFE, IFN, J,
MUL, PEEK, PC, POP, PUSH, SET, SP, SUB, X,
XOR, Y, Z, Absolute, assemble, call, until)
from cauliflower.utilities import library, read, write
class EvenStringIO(StringIO):
def seek(self, value, *args, **kwargs):
StringIO.seek(self, value * 2, *args, **kwargs)
def tell(self):
rv = StringIO.tell(self)
if rv % 2:
raise Exception("Offset %d is odd!" % rv)
return rv // 2
IMMEDIATE = 0x4000
HIDDEN = 0x8000
def PUSHRSP(register):
"""
Push onto RSP.
"""
ucode = assemble(SUB, Y, 0x1)
ucode += assemble(SET, [Y], register)
return ucode
def POPRSP(register):
"""
Pop from RSP.
"""
ucode = assemble(SET, register, [Y])
ucode += assemble(ADD, Y, 0x1)
return ucode
def _push(register):
"""
Push onto the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, Z, register)
return ucode
def _pop(register):
"""
Pop off the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, register, Z)
ucode += assemble(SET, Z, POP)
return ucode
class MetaAssembler(object):
"""
Assembler which pulls threads together to form a Forth core.
"""
# Pointer to the previous word defined, used to chain all words onto a
# linked list.
previous = 0x0
# Workspace address.
workspace = 0x7000
def __init__(self):
# Hold codewords for threads as we store them.
self.asmwords = {}
self.codewords = {}
self.datawords = {}
# Initialize the space.
self.space = EvenStringIO()
self.bootloader()
self.lib()
def bootloader(self):
"""
Set up the bootloader.
"""
self.space.write(assemble(SET, Y, 0xd000))
self.space.write(assemble(SET, J, 0x5))
self.space.write(assemble(SET, PC, [J]))
# Allocate space for the address of QUIT.
self.space.write("\x00\x00")
# Allocate space for STATE.
self.STATE = self.space.tell()
self.space.write("\x00\x00")
# And HERE.
self.HERE = self.space.tell()
self.space.write("\x00\x00")
# And LATEST, too.
self.LATEST = self.space.tell()
self.space.write("\x00\x00")
# Don't forget FB.
self.FB = self.space.tell()
self.space.write("\x80\x00")
# NEXT. Increment IP and move through it.
ucode = assemble(ADD, J, 0x1)
ucode += assemble(SET, PC, [J])
self.prim("next", ucode)
# EXIT. Pop RSP into IP and then call NEXT.
ucode = POPRSP(J)
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("exit", ucode)
# ENTER. Save IP to RSP, dereference IP to find the caller, enter the
# new word, call NEXT.
ucode = PUSHRSP(J)
ucode += assemble(SET, J, [J])
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("enter", ucode)
def lib(self):
self.library = {}
for name in library:
print "Adding library function", name
self.library[name] = self.space.tell()
self.space.write(library[name]())
def finalize(self):
# Write HERE and LATEST.
location = self.space.tell()
here = pack(">H", location)
latest = pack(">H", self.previous)
self.space.seek(self.HERE)
self.space.write(here)
self.space.seek(self.LATEST)
self.space.write(latest)
self.space.seek(0x5)
self.space.write(pack(">H", self.codewords["quit"]))
# Reset file pointer.
self.space.seek(location)
def prim(self, name, ucode):
"""
Write primitive assembly directly into the core.
"""
self.asmwords[name] = self.space.tell()
self.space.write(ucode)
def create(self, name, flags):
"""
Write a header into the core and update the previous header marker.
"""
location = self.space.tell()
self.datawords[name] = location
print "Creating data word", name, "at 0x%x" % location
length = len(name)
if flags:
length |= flags
header = pack(">HH", self.previous, length)
# Swap locations.
self.previous = location
self.space.write(header)
self.space.write(name.encode("utf-16-be"))
location = self.space.tell()
print "Creating code word", name, "at 0x%x" % location
self.codewords[name] = location
def asm(self, name, ucode, flags=None):
"""
Write an assembly-level word into the core.
Here's what the word looks like:
|prev|len |name|asm |NEXT|
"""
print "Adding assembly word %s" % name
self.create(name, flags)
self.space.write(ucode)
self.space.write(assemble(SET, PC, self.asmwords["next"]))
def thread(self, name, words, flags=None):
|
ma = MetaAssembler()
# Deep primitives.
ma.prim("read", read(A))
ma.prim("write", write(A))
# Top of the line: Go back to the beginning of the string.
ucode = assemble(SET, B, 0x0)
ucode += assemble(SET, C, ma.workspace)
# Read a character into A.
ucode += call(ma.asmwords["read"])
ucode += assemble(SET, [C], A)
ucode += assemble(ADD, B, 0x1)
ucode += assemble(ADD, C, 0x1)
# If it's a space, then we're done. Otherwise, go back to reading things from
# the keyboard.
ucode = until(ucode, (IFN, 0x20, [C]))
ucode += assemble(SET, C, ma.workspace)
ma.prim("word", ucode)
preamble = assemble(SET, C, 0x0)
ucode = assemble(MUL, C, 10)
ucode += assemble(SET, X, [A])
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
# Compiling words.
ucode = _push([J])
ucode += assemble(ADD, J, 0x1)
ma.asm("literal", ucode)
ma.asm("'", ucode)
ucode = assemble(SET, PC, Z)
ma.asm("call", ucode)
# Low-level memory manipulation.
ucode = assemble(SET, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("!", ucode)
# TOS lets us cheat hard.
ucode = assemble(SET, Z, [Z])
ma.asm("@", ucode)
ucode = assemble(ADD, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("+!", ucode)
ucode = assemble(SUB, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("-!", ucode)
# Low-level branching.
ucode = assemble(ADD, J, [J + 0x1])
ma.asm("branch", ucode)
# Ugh.
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(ADD, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0branch", ucode)
# Goddammit DCPU!
ucode = assemble(SUB, J, [J + 0x1])
ma.asm("nbranch", ucode)
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(SUB, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0nbranch", ucode)
# Low-level tests.
# I bet there's a trick to this. I'll revisit this later.
ucode = assemble(IFN, J, 0x0)
ucode += assemble(SET, A, 0x1)
ucode += assemble(IFE, J, 0x0)
ucode += assemble(SET, A, 0x0)
ucode += assemble(SET, J, A)
ma.asm("0=", ucode)
def IF(then, otherwise=[]):
if otherwise:
then += ["branch", len(otherwise)]
return ["0=", "0branch", len(then)] + then + otherwise
def UNTIL(loop):
return loop + ["0nbranch", len(loop)]
# Main stack manipulation.
ucode = assemble(SET, PUSH, Z)
ma.asm("dup", ucode)
# Return stack manipulation.
ucode = _push(0xd000)
ma.asm("r0", ucode)
ucode = _push(Y)
ma.asm("rsp@", ucode)
ucode = _pop(Y)
ma.asm("rsp!", ucode)
ucode = _push([Y])
ucode += assemble(ADD, Y, 0x1)
ma.asm("r>", ucode)
ucode = assemble(SUB, Y, 0x1)
ucode += _pop([Y])
ma.asm(">r", ucode)
ucode = _push([Y])
ma.asm("r@", ucode)
ucode = _pop([Y])
ma.asm("r!", ucode)
ucode = assemble(ADD, Y, 0x1)
ma.asm("rdrop", ucode)
# Arithmetic.
ucode = assemble(ADD, Z, POP)
ma.asm("+", ucode)
# Low-level input.
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, A, Z)
ucode += call(ma.asmwords["word"])
ma.asm("key", ucode)
# High-level input.
ucode = call(ma.asmwords["word"])
ucode += _push(B)
ucode += _push(C)
ma.asm("word", ucode)
ucode = _pop(A)
ucode += _pop(B)
ucode += call(ma.asmwords["snumber"])
ucode += _push(C)
ma.asm("snumber", ucode)
# Output.
ucode = assemble(SET, A, [ma.FB])
ucode += _pop([A])
ucode += assemble(ADD, [ma.FB], 0x1)
ma.asm("emit", ucode)
# Global access.
# This could be done in Forth, but it's so small in assembly!
ucode = _pop([ma.HERE])
ucode += assemble(ADD, [ma.HERE], 0x1)
ma.asm(",", ucode)
ucode = assemble(SET, [ma.STATE], 0x0)
ma.asm("[", ucode)
ucode = assemble(SET, [ma.STATE], 0x1)
ma.asm("]", ucode)
ucode = _push([ma.LATEST])
ma.asm("latest", ucode)
# Compiler stuff.
ucode = call(ma.asmwords["read"])
ucode += _push([C])
ma.asm("char", ucode)
# Pop the target address (below TOS) into a working register. Leave length on
# TOS.
preamble = assemble(SET, A, POP)
# Use B as our linked list pointer.
preamble += assemble(SET, B, ma.LATEST)
# Top of the loop. Dereference B to move along the list.
ucode = assemble(SET, B, [B])
# Compare lengths; if they don't match, go to the next one.
ucode = until(ucode, (IFN, [B + 0x1], Z))
# memcmp() the strings.
ucode += assemble(ADD, B, 0x1)
ucode += assemble(SET, C, A)
ucode += assemble(SET, A, Z)
ucode += call(ma.library["memcmp"])
ucode += assemble(SUB, B, 0x1)
# If it succeeded, push the address back onto the stack and then jump out.
ucode += assemble(IFN, A, 0x0)
ucode += assemble(SET, Z, B)
ucode += assemble(IFN, A, 0x0)
ucode += assemble(ADD, PC, 0x4)
# Loop until we hit NULL.
ucode = until(ucode, (IFE, B, 0x0))
# We finished the loop and couldn't find anything. Guess we'll just set Z to
# 0x0 and exit.
ucode += assemble(SET, Z, 0x0)
ma.asm("find", preamble + ucode)
ma.thread("+1", ["literal", 0x1, "+"])
ma.thread(">cfa", ["+1", "dup", "@", "+", "+1"])
# Grab HERE. It's going to live in A for a bit.
preamble = assemble(SET, A, [ma.HERE])
# Write LATEST to HERE, update LATEST.
preamble += assemble(SET, [A], [ma.LATEST])
preamble += assemble(SET, [ma.LATEST], A)
# Move ahead, write length.
preamble += assemble(ADD, A, 0x1)
preamble += assemble(SET, [A], Z)
# Set the hidden flag.
preamble += assemble(BOR, [A], HIDDEN)
# SP is nerfed, so grab the source address and put it in B.
preamble += assemble(SET, B, PEEK)
# Loop. Copy from the source address to the target address.
ucode = assemble(SUB, Z, 0x1)
ucode += assemble(SET, [A], [B])
ucode += assemble(ADD, A, 0x1)
ucode += assemble(ADD, B, 0x1)
# Break when we have no more bytes to copy.
ucode = until(ucode, (IFE, Z, 0x0))
# Write out the new HERE.
ucode += assemble(SET, [ma.HERE], A)
# Get the stack to be sane again. Shift it down and then pop, same as 2drop.
ucode += assemble(ADD, SP, 0x1)
ucode += assemble(SET, Z, POP)
ma.asm("create", preamble + ucode)
# The stack points to the top of the header. Move forward one...
ucode = assemble(ADD, Z, 0x1)
# Now XOR in the hidden flag.
ucode += assemble(XOR, [Z], HIDDEN)
# And pop the stack.
ucode += assemble(SET, Z, POP)
ma.asm("hidden", ucode)
# We get to grab LATEST ourselves. On the plus side, no stack touching.
ucode = assemble(SET, A, ma.LATEST)
# XOR that flag!
ucode += assemble(XOR, [A + 0x1], IMMEDIATE)
ma.asm("immediate", ucode)
ucode = assemble(AND, Z, IMMEDIATE)
ma.asm("immediate?", ucode)
ma.thread(":", [
"word",
"create",
"literal", ma.asmwords["enter"],
",",
"latest",
"@",
"hidden",
"]",
])
ma.thread(";", [
"literal", ma.asmwords["exit"],
",",
"latest",
"@",
"hidden",
"[",
], flags=IMMEDIATE)
ma.thread("interpret-found", [
"dup",
"+1",
"immediate?",
] + IF([
">cfa",
"call",
], [
">cfa",
",",
])
)
ma.thread("interpret", [
"word",
"find",
"dup",
] + IF([
"interpret-found",
])
)
ma.thread("quit", ["r0", "rsp!", "interpret", "nbranch", 0x2])
ma.finalize()
| """
Assemble a thread of words into the core.
Here's what a thread looks like:
|prev|len |name|ENTER|word|EXIT|
"""
print "Adding Forth thread %s" % name
self.create(name, flags)
# ENTER/DOCOL bytecode.
ucode = assemble(SET, PC, self.asmwords["enter"])
self.space.write(ucode)
for word in words:
if isinstance(word, int):
self.space.write(pack(">H", word))
elif word in self.codewords:
self.space.write(pack(">H", self.codewords[word]))
else:
raise Exception("Can't reference unknown word %r" % word)
self.space.write(pack(">H", self.asmwords["exit"])) | identifier_body |
meta.py | """
The metainterpreter and metabuiltins.
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
There are seven Forth registers: W, IP, PSP, RSP, X, UP, and TOS. They are
assigned to hardware registers as follows:
+---+--+
|IP |J |
|PSP|SP|
|RSP|Y |
|TOS|Z |
+---+--+
To start the metainterpreter, set RSP to point to a safe area of return stack,
put the address of QUIT into IP, and then call IP.
"""
from StringIO import StringIO
from struct import pack
from cauliflower.assembler import (A, ADD, AND, B, BOR, C, I, IFE, IFN, J,
MUL, PEEK, PC, POP, PUSH, SET, SP, SUB, X,
XOR, Y, Z, Absolute, assemble, call, until)
from cauliflower.utilities import library, read, write
class EvenStringIO(StringIO):
def seek(self, value, *args, **kwargs):
StringIO.seek(self, value * 2, *args, **kwargs)
def tell(self):
rv = StringIO.tell(self)
if rv % 2:
raise Exception("Offset %d is odd!" % rv)
return rv // 2
IMMEDIATE = 0x4000
HIDDEN = 0x8000
def PUSHRSP(register):
"""
Push onto RSP.
"""
ucode = assemble(SUB, Y, 0x1)
ucode += assemble(SET, [Y], register)
return ucode
def POPRSP(register):
"""
Pop from RSP.
"""
ucode = assemble(SET, register, [Y])
ucode += assemble(ADD, Y, 0x1)
return ucode
def _push(register):
"""
Push onto the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, Z, register)
return ucode
def _pop(register):
"""
Pop off the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, register, Z)
ucode += assemble(SET, Z, POP)
return ucode
class MetaAssembler(object):
"""
Assembler which pulls threads together to form a Forth core.
"""
# Pointer to the previous word defined, used to chain all words onto a
# linked list.
previous = 0x0
# Workspace address.
workspace = 0x7000
def __init__(self):
# Hold codewords for threads as we store them.
self.asmwords = {}
self.codewords = {}
self.datawords = {}
# Initialize the space.
self.space = EvenStringIO()
self.bootloader()
self.lib()
def bootloader(self):
"""
Set up the bootloader.
"""
self.space.write(assemble(SET, Y, 0xd000))
self.space.write(assemble(SET, J, 0x5))
self.space.write(assemble(SET, PC, [J]))
# Allocate space for the address of QUIT.
self.space.write("\x00\x00")
# Allocate space for STATE.
self.STATE = self.space.tell()
self.space.write("\x00\x00")
# And HERE.
self.HERE = self.space.tell()
self.space.write("\x00\x00")
# And LATEST, too.
self.LATEST = self.space.tell()
self.space.write("\x00\x00")
# Don't forget FB.
self.FB = self.space.tell()
self.space.write("\x80\x00")
# NEXT. Increment IP and move through it.
ucode = assemble(ADD, J, 0x1)
ucode += assemble(SET, PC, [J])
self.prim("next", ucode)
# EXIT. Pop RSP into IP and then call NEXT.
ucode = POPRSP(J)
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("exit", ucode)
# ENTER. Save IP to RSP, dereference IP to find the caller, enter the
# new word, call NEXT.
ucode = PUSHRSP(J)
ucode += assemble(SET, J, [J])
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("enter", ucode)
def lib(self):
self.library = {}
for name in library:
print "Adding library function", name
self.library[name] = self.space.tell()
self.space.write(library[name]())
def finalize(self):
# Write HERE and LATEST.
location = self.space.tell()
here = pack(">H", location)
latest = pack(">H", self.previous)
self.space.seek(self.HERE)
self.space.write(here)
self.space.seek(self.LATEST)
self.space.write(latest)
self.space.seek(0x5)
self.space.write(pack(">H", self.codewords["quit"]))
# Reset file pointer.
self.space.seek(location)
def prim(self, name, ucode):
"""
Write primitive assembly directly into the core.
"""
self.asmwords[name] = self.space.tell()
self.space.write(ucode)
def create(self, name, flags):
"""
Write a header into the core and update the previous header marker.
"""
location = self.space.tell()
self.datawords[name] = location
print "Creating data word", name, "at 0x%x" % location
length = len(name)
if flags:
length |= flags
header = pack(">HH", self.previous, length)
# Swap locations.
self.previous = location
self.space.write(header)
self.space.write(name.encode("utf-16-be"))
location = self.space.tell()
print "Creating code word", name, "at 0x%x" % location
self.codewords[name] = location
def asm(self, name, ucode, flags=None):
"""
Write an assembly-level word into the core.
Here's what the word looks like:
|prev|len |name|asm |NEXT|
"""
print "Adding assembly word %s" % name
self.create(name, flags)
self.space.write(ucode)
self.space.write(assemble(SET, PC, self.asmwords["next"]))
def thread(self, name, words, flags=None):
"""
Assemble a thread of words into the core.
Here's what a thread looks like:
|prev|len |name|ENTER|word|EXIT|
"""
print "Adding Forth thread %s" % name
self.create(name, flags)
# ENTER/DOCOL bytecode.
ucode = assemble(SET, PC, self.asmwords["enter"])
self.space.write(ucode)
for word in words:
if isinstance(word, int):
self.space.write(pack(">H", word))
elif word in self.codewords:
self.space.write(pack(">H", self.codewords[word]))
else:
raise Exception("Can't reference unknown word %r" % word)
self.space.write(pack(">H", self.asmwords["exit"]))
ma = MetaAssembler()
# Deep primitives.
ma.prim("read", read(A))
ma.prim("write", write(A))
# Top of the line: Go back to the beginning of the string.
ucode = assemble(SET, B, 0x0)
ucode += assemble(SET, C, ma.workspace)
# Read a character into A.
ucode += call(ma.asmwords["read"])
ucode += assemble(SET, [C], A)
ucode += assemble(ADD, B, 0x1)
ucode += assemble(ADD, C, 0x1)
# If it's a space, then we're done. Otherwise, go back to reading things from
# the keyboard.
ucode = until(ucode, (IFN, 0x20, [C]))
ucode += assemble(SET, C, ma.workspace)
ma.prim("word", ucode)
preamble = assemble(SET, C, 0x0)
ucode = assemble(MUL, C, 10)
ucode += assemble(SET, X, [A])
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
# Compiling words.
ucode = _push([J])
ucode += assemble(ADD, J, 0x1)
ma.asm("literal", ucode)
ma.asm("'", ucode)
ucode = assemble(SET, PC, Z)
ma.asm("call", ucode)
# Low-level memory manipulation.
ucode = assemble(SET, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("!", ucode)
# TOS lets us cheat hard.
ucode = assemble(SET, Z, [Z])
ma.asm("@", ucode)
ucode = assemble(ADD, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("+!", ucode)
ucode = assemble(SUB, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("-!", ucode)
# Low-level branching.
ucode = assemble(ADD, J, [J + 0x1])
ma.asm("branch", ucode)
# Ugh.
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(ADD, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0branch", ucode)
# Goddammit DCPU!
ucode = assemble(SUB, J, [J + 0x1])
ma.asm("nbranch", ucode)
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(SUB, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0nbranch", ucode)
# Low-level tests.
# I bet there's a trick to this. I'll revisit this later.
ucode = assemble(IFN, J, 0x0)
ucode += assemble(SET, A, 0x1)
ucode += assemble(IFE, J, 0x0)
ucode += assemble(SET, A, 0x0)
ucode += assemble(SET, J, A)
ma.asm("0=", ucode)
def IF(then, otherwise=[]):
if otherwise:
then += ["branch", len(otherwise)]
return ["0=", "0branch", len(then)] + then + otherwise
def | (loop):
return loop + ["0nbranch", len(loop)]
# Main stack manipulation.
ucode = assemble(SET, PUSH, Z)
ma.asm("dup", ucode)
# Return stack manipulation.
ucode = _push(0xd000)
ma.asm("r0", ucode)
ucode = _push(Y)
ma.asm("rsp@", ucode)
ucode = _pop(Y)
ma.asm("rsp!", ucode)
ucode = _push([Y])
ucode += assemble(ADD, Y, 0x1)
ma.asm("r>", ucode)
ucode = assemble(SUB, Y, 0x1)
ucode += _pop([Y])
ma.asm(">r", ucode)
ucode = _push([Y])
ma.asm("r@", ucode)
ucode = _pop([Y])
ma.asm("r!", ucode)
ucode = assemble(ADD, Y, 0x1)
ma.asm("rdrop", ucode)
# Arithmetic.
ucode = assemble(ADD, Z, POP)
ma.asm("+", ucode)
# Low-level input.
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, A, Z)
ucode += call(ma.asmwords["word"])
ma.asm("key", ucode)
# High-level input.
ucode = call(ma.asmwords["word"])
ucode += _push(B)
ucode += _push(C)
ma.asm("word", ucode)
ucode = _pop(A)
ucode += _pop(B)
ucode += call(ma.asmwords["snumber"])
ucode += _push(C)
ma.asm("snumber", ucode)
# Output.
ucode = assemble(SET, A, [ma.FB])
ucode += _pop([A])
ucode += assemble(ADD, [ma.FB], 0x1)
ma.asm("emit", ucode)
# Global access.
# This could be done in Forth, but it's so small in assembly!
ucode = _pop([ma.HERE])
ucode += assemble(ADD, [ma.HERE], 0x1)
ma.asm(",", ucode)
ucode = assemble(SET, [ma.STATE], 0x0)
ma.asm("[", ucode)
ucode = assemble(SET, [ma.STATE], 0x1)
ma.asm("]", ucode)
ucode = _push([ma.LATEST])
ma.asm("latest", ucode)
# Compiler stuff.
ucode = call(ma.asmwords["read"])
ucode += _push([C])
ma.asm("char", ucode)
# Pop the target address (below TOS) into a working register. Leave length on
# TOS.
preamble = assemble(SET, A, POP)
# Use B as our linked list pointer.
preamble += assemble(SET, B, ma.LATEST)
# Top of the loop. Dereference B to move along the list.
ucode = assemble(SET, B, [B])
# Compare lengths; if they don't match, go to the next one.
ucode = until(ucode, (IFN, [B + 0x1], Z))
# memcmp() the strings.
ucode += assemble(ADD, B, 0x1)
ucode += assemble(SET, C, A)
ucode += assemble(SET, A, Z)
ucode += call(ma.library["memcmp"])
ucode += assemble(SUB, B, 0x1)
# If it succeeded, push the address back onto the stack and then jump out.
ucode += assemble(IFN, A, 0x0)
ucode += assemble(SET, Z, B)
ucode += assemble(IFN, A, 0x0)
ucode += assemble(ADD, PC, 0x4)
# Loop until we hit NULL.
ucode = until(ucode, (IFE, B, 0x0))
# We finished the loop and couldn't find anything. Guess we'll just set Z to
# 0x0 and exit.
ucode += assemble(SET, Z, 0x0)
ma.asm("find", preamble + ucode)
ma.thread("+1", ["literal", 0x1, "+"])
ma.thread(">cfa", ["+1", "dup", "@", "+", "+1"])
# Grab HERE. It's going to live in A for a bit.
preamble = assemble(SET, A, [ma.HERE])
# Write LATEST to HERE, update LATEST.
preamble += assemble(SET, [A], [ma.LATEST])
preamble += assemble(SET, [ma.LATEST], A)
# Move ahead, write length.
preamble += assemble(ADD, A, 0x1)
preamble += assemble(SET, [A], Z)
# Set the hidden flag.
preamble += assemble(BOR, [A], HIDDEN)
# SP is nerfed, so grab the source address and put it in B.
preamble += assemble(SET, B, PEEK)
# Loop. Copy from the source address to the target address.
ucode = assemble(SUB, Z, 0x1)
ucode += assemble(SET, [A], [B])
ucode += assemble(ADD, A, 0x1)
ucode += assemble(ADD, B, 0x1)
# Break when we have no more bytes to copy.
ucode = until(ucode, (IFE, Z, 0x0))
# Write out the new HERE.
ucode += assemble(SET, [ma.HERE], A)
# Get the stack to be sane again. Shift it down and then pop, same as 2drop.
ucode += assemble(ADD, SP, 0x1)
ucode += assemble(SET, Z, POP)
ma.asm("create", preamble + ucode)
# The stack points to the top of the header. Move forward one...
ucode = assemble(ADD, Z, 0x1)
# Now XOR in the hidden flag.
ucode += assemble(XOR, [Z], HIDDEN)
# And pop the stack.
ucode += assemble(SET, Z, POP)
ma.asm("hidden", ucode)
# We get to grab LATEST ourselves. On the plus side, no stack touching.
ucode = assemble(SET, A, ma.LATEST)
# XOR that flag!
ucode += assemble(XOR, [A + 0x1], IMMEDIATE)
ma.asm("immediate", ucode)
ucode = assemble(AND, Z, IMMEDIATE)
ma.asm("immediate?", ucode)
ma.thread(":", [
"word",
"create",
"literal", ma.asmwords["enter"],
",",
"latest",
"@",
"hidden",
"]",
])
ma.thread(";", [
"literal", ma.asmwords["exit"],
",",
"latest",
"@",
"hidden",
"[",
], flags=IMMEDIATE)
ma.thread("interpret-found", [
"dup",
"+1",
"immediate?",
] + IF([
">cfa",
"call",
], [
">cfa",
",",
])
)
ma.thread("interpret", [
"word",
"find",
"dup",
] + IF([
"interpret-found",
])
)
ma.thread("quit", ["r0", "rsp!", "interpret", "nbranch", 0x2])
ma.finalize()
| UNTIL | identifier_name |
meta.py | """
The metainterpreter and metabuiltins.
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
There are seven Forth registers: W, IP, PSP, RSP, X, UP, and TOS. They are
assigned to hardware registers as follows:
+---+--+
|IP |J |
|PSP|SP|
|RSP|Y |
|TOS|Z |
+---+--+
To start the metainterpreter, set RSP to point to a safe area of return stack,
put the address of QUIT into IP, and then call IP.
"""
from StringIO import StringIO
from struct import pack
from cauliflower.assembler import (A, ADD, AND, B, BOR, C, I, IFE, IFN, J,
MUL, PEEK, PC, POP, PUSH, SET, SP, SUB, X,
XOR, Y, Z, Absolute, assemble, call, until)
from cauliflower.utilities import library, read, write
class EvenStringIO(StringIO):
def seek(self, value, *args, **kwargs):
StringIO.seek(self, value * 2, *args, **kwargs)
def tell(self):
rv = StringIO.tell(self)
if rv % 2:
raise Exception("Offset %d is odd!" % rv)
return rv // 2
IMMEDIATE = 0x4000
HIDDEN = 0x8000
def PUSHRSP(register):
"""
Push onto RSP.
"""
ucode = assemble(SUB, Y, 0x1)
ucode += assemble(SET, [Y], register)
return ucode
def POPRSP(register):
"""
Pop from RSP.
"""
ucode = assemble(SET, register, [Y])
ucode += assemble(ADD, Y, 0x1)
return ucode
def _push(register):
"""
Push onto the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, Z, register)
return ucode
def _pop(register):
"""
Pop off the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, register, Z)
ucode += assemble(SET, Z, POP)
return ucode
class MetaAssembler(object):
"""
Assembler which pulls threads together to form a Forth core.
"""
# Pointer to the previous word defined, used to chain all words onto a
# linked list.
previous = 0x0
# Workspace address.
workspace = 0x7000
def __init__(self):
# Hold codewords for threads as we store them.
self.asmwords = {}
self.codewords = {}
self.datawords = {}
# Initialize the space.
self.space = EvenStringIO()
self.bootloader()
self.lib()
def bootloader(self):
"""
Set up the bootloader.
"""
self.space.write(assemble(SET, Y, 0xd000))
self.space.write(assemble(SET, J, 0x5))
self.space.write(assemble(SET, PC, [J]))
# Allocate space for the address of QUIT.
self.space.write("\x00\x00")
# Allocate space for STATE.
self.STATE = self.space.tell()
self.space.write("\x00\x00")
# And HERE.
self.HERE = self.space.tell()
self.space.write("\x00\x00")
# And LATEST, too.
self.LATEST = self.space.tell()
self.space.write("\x00\x00")
# Don't forget FB.
self.FB = self.space.tell()
self.space.write("\x80\x00")
# NEXT. Increment IP and move through it.
ucode = assemble(ADD, J, 0x1)
ucode += assemble(SET, PC, [J])
self.prim("next", ucode)
# EXIT. Pop RSP into IP and then call NEXT.
ucode = POPRSP(J)
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("exit", ucode)
# ENTER. Save IP to RSP, dereference IP to find the caller, enter the
# new word, call NEXT.
ucode = PUSHRSP(J)
ucode += assemble(SET, J, [J])
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("enter", ucode)
def lib(self):
self.library = {}
for name in library:
print "Adding library function", name
self.library[name] = self.space.tell()
self.space.write(library[name]())
def finalize(self):
# Write HERE and LATEST.
location = self.space.tell()
here = pack(">H", location)
latest = pack(">H", self.previous)
self.space.seek(self.HERE)
self.space.write(here)
self.space.seek(self.LATEST)
self.space.write(latest)
self.space.seek(0x5)
self.space.write(pack(">H", self.codewords["quit"]))
# Reset file pointer.
self.space.seek(location)
def prim(self, name, ucode):
"""
Write primitive assembly directly into the core.
"""
self.asmwords[name] = self.space.tell()
self.space.write(ucode)
def create(self, name, flags):
"""
Write a header into the core and update the previous header marker.
"""
location = self.space.tell()
self.datawords[name] = location
print "Creating data word", name, "at 0x%x" % location
length = len(name)
if flags:
length |= flags
header = pack(">HH", self.previous, length)
# Swap locations.
self.previous = location
self.space.write(header)
self.space.write(name.encode("utf-16-be"))
location = self.space.tell()
print "Creating code word", name, "at 0x%x" % location
self.codewords[name] = location
def asm(self, name, ucode, flags=None):
"""
Write an assembly-level word into the core.
Here's what the word looks like:
|prev|len |name|asm |NEXT|
"""
print "Adding assembly word %s" % name
self.create(name, flags)
self.space.write(ucode)
self.space.write(assemble(SET, PC, self.asmwords["next"]))
def thread(self, name, words, flags=None):
"""
Assemble a thread of words into the core.
Here's what a thread looks like:
|prev|len |name|ENTER|word|EXIT|
"""
print "Adding Forth thread %s" % name
self.create(name, flags)
# ENTER/DOCOL bytecode.
ucode = assemble(SET, PC, self.asmwords["enter"])
self.space.write(ucode)
for word in words:
if isinstance(word, int):
self.space.write(pack(">H", word))
elif word in self.codewords:
self.space.write(pack(">H", self.codewords[word]))
else:
raise Exception("Can't reference unknown word %r" % word)
self.space.write(pack(">H", self.asmwords["exit"]))
ma = MetaAssembler()
# Deep primitives.
ma.prim("read", read(A))
ma.prim("write", write(A))
# Top of the line: Go back to the beginning of the string.
ucode = assemble(SET, B, 0x0)
ucode += assemble(SET, C, ma.workspace)
# Read a character into A.
ucode += call(ma.asmwords["read"])
ucode += assemble(SET, [C], A)
ucode += assemble(ADD, B, 0x1)
ucode += assemble(ADD, C, 0x1)
# If it's a space, then we're done. Otherwise, go back to reading things from
# the keyboard.
ucode = until(ucode, (IFN, 0x20, [C]))
ucode += assemble(SET, C, ma.workspace)
ma.prim("word", ucode)
preamble = assemble(SET, C, 0x0)
ucode = assemble(MUL, C, 10)
ucode += assemble(SET, X, [A])
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
# Compiling words.
ucode = _push([J])
ucode += assemble(ADD, J, 0x1)
ma.asm("literal", ucode)
ma.asm("'", ucode)
ucode = assemble(SET, PC, Z)
ma.asm("call", ucode)
# Low-level memory manipulation.
ucode = assemble(SET, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("!", ucode)
# TOS lets us cheat hard.
ucode = assemble(SET, Z, [Z])
ma.asm("@", ucode)
ucode = assemble(ADD, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("+!", ucode)
ucode = assemble(SUB, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("-!", ucode)
# Low-level branching.
ucode = assemble(ADD, J, [J + 0x1])
ma.asm("branch", ucode)
# Ugh.
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(ADD, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0branch", ucode)
# Goddammit DCPU!
ucode = assemble(SUB, J, [J + 0x1])
ma.asm("nbranch", ucode)
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(SUB, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0nbranch", ucode)
# Low-level tests.
# I bet there's a trick to this. I'll revisit this later.
ucode = assemble(IFN, J, 0x0)
ucode += assemble(SET, A, 0x1)
ucode += assemble(IFE, J, 0x0)
ucode += assemble(SET, A, 0x0)
ucode += assemble(SET, J, A)
ma.asm("0=", ucode)
def IF(then, otherwise=[]):
if otherwise:
then += ["branch", len(otherwise)]
return ["0=", "0branch", len(then)] + then + otherwise
def UNTIL(loop):
return loop + ["0nbranch", len(loop)]
# Main stack manipulation.
ucode = assemble(SET, PUSH, Z)
ma.asm("dup", ucode)
# Return stack manipulation.
ucode = _push(0xd000)
ma.asm("r0", ucode)
ucode = _push(Y)
ma.asm("rsp@", ucode)
ucode = _pop(Y)
ma.asm("rsp!", ucode)
ucode = _push([Y])
ucode += assemble(ADD, Y, 0x1)
ma.asm("r>", ucode)
ucode = assemble(SUB, Y, 0x1)
ucode += _pop([Y])
ma.asm(">r", ucode)
ucode = _push([Y])
ma.asm("r@", ucode)
ucode = _pop([Y])
ma.asm("r!", ucode)
ucode = assemble(ADD, Y, 0x1)
ma.asm("rdrop", ucode)
# Arithmetic.
ucode = assemble(ADD, Z, POP)
ma.asm("+", ucode)
# Low-level input.
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, A, Z)
ucode += call(ma.asmwords["word"])
ma.asm("key", ucode)
# High-level input.
ucode = call(ma.asmwords["word"])
ucode += _push(B)
ucode += _push(C)
ma.asm("word", ucode)
ucode = _pop(A)
ucode += _pop(B)
ucode += call(ma.asmwords["snumber"])
ucode += _push(C)
ma.asm("snumber", ucode)
# Output.
ucode = assemble(SET, A, [ma.FB])
ucode += _pop([A])
ucode += assemble(ADD, [ma.FB], 0x1)
ma.asm("emit", ucode)
# Global access.
# This could be done in Forth, but it's so small in assembly!
ucode = _pop([ma.HERE])
ucode += assemble(ADD, [ma.HERE], 0x1)
ma.asm(",", ucode)
ucode = assemble(SET, [ma.STATE], 0x0)
ma.asm("[", ucode)
ucode = assemble(SET, [ma.STATE], 0x1)
ma.asm("]", ucode)
ucode = _push([ma.LATEST])
ma.asm("latest", ucode)
# Compiler stuff.
ucode = call(ma.asmwords["read"])
ucode += _push([C])
ma.asm("char", ucode)
# Pop the target address (below TOS) into a working register. Leave length on
# TOS.
preamble = assemble(SET, A, POP)
# Use B as our linked list pointer.
preamble += assemble(SET, B, ma.LATEST)
# Top of the loop. Dereference B to move along the list.
ucode = assemble(SET, B, [B])
# Compare lengths; if they don't match, go to the next one.
ucode = until(ucode, (IFN, [B + 0x1], Z))
# memcmp() the strings.
ucode += assemble(ADD, B, 0x1)
ucode += assemble(SET, C, A)
ucode += assemble(SET, A, Z)
ucode += call(ma.library["memcmp"])
ucode += assemble(SUB, B, 0x1)
# If it succeeded, push the address back onto the stack and then jump out.
ucode += assemble(IFN, A, 0x0)
ucode += assemble(SET, Z, B)
ucode += assemble(IFN, A, 0x0)
ucode += assemble(ADD, PC, 0x4)
# Loop until we hit NULL.
ucode = until(ucode, (IFE, B, 0x0))
# We finished the loop and couldn't find anything. Guess we'll just set Z to
# 0x0 and exit.
ucode += assemble(SET, Z, 0x0)
ma.asm("find", preamble + ucode)
ma.thread("+1", ["literal", 0x1, "+"])
ma.thread(">cfa", ["+1", "dup", "@", "+", "+1"])
# Grab HERE. It's going to live in A for a bit. | # Write LATEST to HERE, update LATEST.
preamble += assemble(SET, [A], [ma.LATEST])
preamble += assemble(SET, [ma.LATEST], A)
# Move ahead, write length.
preamble += assemble(ADD, A, 0x1)
preamble += assemble(SET, [A], Z)
# Set the hidden flag.
preamble += assemble(BOR, [A], HIDDEN)
# SP is nerfed, so grab the source address and put it in B.
preamble += assemble(SET, B, PEEK)
# Loop. Copy from the source address to the target address.
ucode = assemble(SUB, Z, 0x1)
ucode += assemble(SET, [A], [B])
ucode += assemble(ADD, A, 0x1)
ucode += assemble(ADD, B, 0x1)
# Break when we have no more bytes to copy.
ucode = until(ucode, (IFE, Z, 0x0))
# Write out the new HERE.
ucode += assemble(SET, [ma.HERE], A)
# Get the stack to be sane again. Shift it down and then pop, same as 2drop.
ucode += assemble(ADD, SP, 0x1)
ucode += assemble(SET, Z, POP)
ma.asm("create", preamble + ucode)
# The stack points to the top of the header. Move forward one...
ucode = assemble(ADD, Z, 0x1)
# Now XOR in the hidden flag.
ucode += assemble(XOR, [Z], HIDDEN)
# And pop the stack.
ucode += assemble(SET, Z, POP)
ma.asm("hidden", ucode)
# We get to grab LATEST ourselves. On the plus side, no stack touching.
ucode = assemble(SET, A, ma.LATEST)
# XOR that flag!
ucode += assemble(XOR, [A + 0x1], IMMEDIATE)
ma.asm("immediate", ucode)
ucode = assemble(AND, Z, IMMEDIATE)
ma.asm("immediate?", ucode)
ma.thread(":", [
"word",
"create",
"literal", ma.asmwords["enter"],
",",
"latest",
"@",
"hidden",
"]",
])
ma.thread(";", [
"literal", ma.asmwords["exit"],
",",
"latest",
"@",
"hidden",
"[",
], flags=IMMEDIATE)
ma.thread("interpret-found", [
"dup",
"+1",
"immediate?",
] + IF([
">cfa",
"call",
], [
">cfa",
",",
])
)
ma.thread("interpret", [
"word",
"find",
"dup",
] + IF([
"interpret-found",
])
)
ma.thread("quit", ["r0", "rsp!", "interpret", "nbranch", 0x2])
ma.finalize() | preamble = assemble(SET, A, [ma.HERE]) | random_line_split |
meta.py | """
The metainterpreter and metabuiltins.
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
There are seven Forth registers: W, IP, PSP, RSP, X, UP, and TOS. They are
assigned to hardware registers as follows:
+---+--+
|IP |J |
|PSP|SP|
|RSP|Y |
|TOS|Z |
+---+--+
To start the metainterpreter, set RSP to point to a safe area of return stack,
put the address of QUIT into IP, and then call IP.
"""
from StringIO import StringIO
from struct import pack
from cauliflower.assembler import (A, ADD, AND, B, BOR, C, I, IFE, IFN, J,
MUL, PEEK, PC, POP, PUSH, SET, SP, SUB, X,
XOR, Y, Z, Absolute, assemble, call, until)
from cauliflower.utilities import library, read, write
class EvenStringIO(StringIO):
def seek(self, value, *args, **kwargs):
StringIO.seek(self, value * 2, *args, **kwargs)
def tell(self):
rv = StringIO.tell(self)
if rv % 2:
raise Exception("Offset %d is odd!" % rv)
return rv // 2
IMMEDIATE = 0x4000
HIDDEN = 0x8000
def PUSHRSP(register):
"""
Push onto RSP.
"""
ucode = assemble(SUB, Y, 0x1)
ucode += assemble(SET, [Y], register)
return ucode
def POPRSP(register):
"""
Pop from RSP.
"""
ucode = assemble(SET, register, [Y])
ucode += assemble(ADD, Y, 0x1)
return ucode
def _push(register):
"""
Push onto the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, Z, register)
return ucode
def _pop(register):
"""
Pop off the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, register, Z)
ucode += assemble(SET, Z, POP)
return ucode
class MetaAssembler(object):
"""
Assembler which pulls threads together to form a Forth core.
"""
# Pointer to the previous word defined, used to chain all words onto a
# linked list.
previous = 0x0
# Workspace address.
workspace = 0x7000
def __init__(self):
# Hold codewords for threads as we store them.
self.asmwords = {}
self.codewords = {}
self.datawords = {}
# Initialize the space.
self.space = EvenStringIO()
self.bootloader()
self.lib()
def bootloader(self):
"""
Set up the bootloader.
"""
self.space.write(assemble(SET, Y, 0xd000))
self.space.write(assemble(SET, J, 0x5))
self.space.write(assemble(SET, PC, [J]))
# Allocate space for the address of QUIT.
self.space.write("\x00\x00")
# Allocate space for STATE.
self.STATE = self.space.tell()
self.space.write("\x00\x00")
# And HERE.
self.HERE = self.space.tell()
self.space.write("\x00\x00")
# And LATEST, too.
self.LATEST = self.space.tell()
self.space.write("\x00\x00")
# Don't forget FB.
self.FB = self.space.tell()
self.space.write("\x80\x00")
# NEXT. Increment IP and move through it.
ucode = assemble(ADD, J, 0x1)
ucode += assemble(SET, PC, [J])
self.prim("next", ucode)
# EXIT. Pop RSP into IP and then call NEXT.
ucode = POPRSP(J)
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("exit", ucode)
# ENTER. Save IP to RSP, dereference IP to find the caller, enter the
# new word, call NEXT.
ucode = PUSHRSP(J)
ucode += assemble(SET, J, [J])
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("enter", ucode)
def lib(self):
self.library = {}
for name in library:
|
def finalize(self):
# Write HERE and LATEST.
location = self.space.tell()
here = pack(">H", location)
latest = pack(">H", self.previous)
self.space.seek(self.HERE)
self.space.write(here)
self.space.seek(self.LATEST)
self.space.write(latest)
self.space.seek(0x5)
self.space.write(pack(">H", self.codewords["quit"]))
# Reset file pointer.
self.space.seek(location)
def prim(self, name, ucode):
"""
Write primitive assembly directly into the core.
"""
self.asmwords[name] = self.space.tell()
self.space.write(ucode)
def create(self, name, flags):
"""
Write a header into the core and update the previous header marker.
"""
location = self.space.tell()
self.datawords[name] = location
print "Creating data word", name, "at 0x%x" % location
length = len(name)
if flags:
length |= flags
header = pack(">HH", self.previous, length)
# Swap locations.
self.previous = location
self.space.write(header)
self.space.write(name.encode("utf-16-be"))
location = self.space.tell()
print "Creating code word", name, "at 0x%x" % location
self.codewords[name] = location
def asm(self, name, ucode, flags=None):
"""
Write an assembly-level word into the core.
Here's what the word looks like:
|prev|len |name|asm |NEXT|
"""
print "Adding assembly word %s" % name
self.create(name, flags)
self.space.write(ucode)
self.space.write(assemble(SET, PC, self.asmwords["next"]))
def thread(self, name, words, flags=None):
"""
Assemble a thread of words into the core.
Here's what a thread looks like:
|prev|len |name|ENTER|word|EXIT|
"""
print "Adding Forth thread %s" % name
self.create(name, flags)
# ENTER/DOCOL bytecode.
ucode = assemble(SET, PC, self.asmwords["enter"])
self.space.write(ucode)
for word in words:
if isinstance(word, int):
self.space.write(pack(">H", word))
elif word in self.codewords:
self.space.write(pack(">H", self.codewords[word]))
else:
raise Exception("Can't reference unknown word %r" % word)
self.space.write(pack(">H", self.asmwords["exit"]))
ma = MetaAssembler()
# Deep primitives.
ma.prim("read", read(A))
ma.prim("write", write(A))
# Top of the line: Go back to the beginning of the string.
ucode = assemble(SET, B, 0x0)
ucode += assemble(SET, C, ma.workspace)
# Read a character into A.
ucode += call(ma.asmwords["read"])
ucode += assemble(SET, [C], A)
ucode += assemble(ADD, B, 0x1)
ucode += assemble(ADD, C, 0x1)
# If it's a space, then we're done. Otherwise, go back to reading things from
# the keyboard.
ucode = until(ucode, (IFN, 0x20, [C]))
ucode += assemble(SET, C, ma.workspace)
ma.prim("word", ucode)
preamble = assemble(SET, C, 0x0)
ucode = assemble(MUL, C, 10)
ucode += assemble(SET, X, [A])
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
# Compiling words.
ucode = _push([J])
ucode += assemble(ADD, J, 0x1)
ma.asm("literal", ucode)
ma.asm("'", ucode)
ucode = assemble(SET, PC, Z)
ma.asm("call", ucode)
# Low-level memory manipulation.
ucode = assemble(SET, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("!", ucode)
# TOS lets us cheat hard.
ucode = assemble(SET, Z, [Z])
ma.asm("@", ucode)
ucode = assemble(ADD, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("+!", ucode)
ucode = assemble(SUB, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("-!", ucode)
# Low-level branching.
ucode = assemble(ADD, J, [J + 0x1])
ma.asm("branch", ucode)
# Ugh.
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(ADD, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0branch", ucode)
# Goddammit DCPU!
ucode = assemble(SUB, J, [J + 0x1])
ma.asm("nbranch", ucode)
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(SUB, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0nbranch", ucode)
# Low-level tests.
# I bet there's a trick to this. I'll revisit this later.
ucode = assemble(IFN, J, 0x0)
ucode += assemble(SET, A, 0x1)
ucode += assemble(IFE, J, 0x0)
ucode += assemble(SET, A, 0x0)
ucode += assemble(SET, J, A)
ma.asm("0=", ucode)
def IF(then, otherwise=[]):
if otherwise:
then += ["branch", len(otherwise)]
return ["0=", "0branch", len(then)] + then + otherwise
def UNTIL(loop):
return loop + ["0nbranch", len(loop)]
# Main stack manipulation.
ucode = assemble(SET, PUSH, Z)
ma.asm("dup", ucode)
# Return stack manipulation.
ucode = _push(0xd000)
ma.asm("r0", ucode)
ucode = _push(Y)
ma.asm("rsp@", ucode)
ucode = _pop(Y)
ma.asm("rsp!", ucode)
ucode = _push([Y])
ucode += assemble(ADD, Y, 0x1)
ma.asm("r>", ucode)
ucode = assemble(SUB, Y, 0x1)
ucode += _pop([Y])
ma.asm(">r", ucode)
ucode = _push([Y])
ma.asm("r@", ucode)
ucode = _pop([Y])
ma.asm("r!", ucode)
ucode = assemble(ADD, Y, 0x1)
ma.asm("rdrop", ucode)
# Arithmetic.
ucode = assemble(ADD, Z, POP)
ma.asm("+", ucode)
# Low-level input.
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, A, Z)
ucode += call(ma.asmwords["word"])
ma.asm("key", ucode)
# High-level input.
ucode = call(ma.asmwords["word"])
ucode += _push(B)
ucode += _push(C)
ma.asm("word", ucode)
ucode = _pop(A)
ucode += _pop(B)
ucode += call(ma.asmwords["snumber"])
ucode += _push(C)
ma.asm("snumber", ucode)
# Output.
ucode = assemble(SET, A, [ma.FB])
ucode += _pop([A])
ucode += assemble(ADD, [ma.FB], 0x1)
ma.asm("emit", ucode)
# Global access.
# This could be done in Forth, but it's so small in assembly!
ucode = _pop([ma.HERE])
ucode += assemble(ADD, [ma.HERE], 0x1)
ma.asm(",", ucode)
ucode = assemble(SET, [ma.STATE], 0x0)
ma.asm("[", ucode)
ucode = assemble(SET, [ma.STATE], 0x1)
ma.asm("]", ucode)
ucode = _push([ma.LATEST])
ma.asm("latest", ucode)
# Compiler stuff.
ucode = call(ma.asmwords["read"])
ucode += _push([C])
ma.asm("char", ucode)
# Pop the target address (below TOS) into a working register. Leave length on
# TOS.
preamble = assemble(SET, A, POP)
# Use B as our linked list pointer.
preamble += assemble(SET, B, ma.LATEST)
# Top of the loop. Dereference B to move along the list.
ucode = assemble(SET, B, [B])
# Compare lengths; if they don't match, go to the next one.
ucode = until(ucode, (IFN, [B + 0x1], Z))
# memcmp() the strings.
ucode += assemble(ADD, B, 0x1)
ucode += assemble(SET, C, A)
ucode += assemble(SET, A, Z)
ucode += call(ma.library["memcmp"])
ucode += assemble(SUB, B, 0x1)
# If it succeeded, push the address back onto the stack and then jump out.
ucode += assemble(IFN, A, 0x0)
ucode += assemble(SET, Z, B)
ucode += assemble(IFN, A, 0x0)
ucode += assemble(ADD, PC, 0x4)
# Loop until we hit NULL.
ucode = until(ucode, (IFE, B, 0x0))
# We finished the loop and couldn't find anything. Guess we'll just set Z to
# 0x0 and exit.
ucode += assemble(SET, Z, 0x0)
ma.asm("find", preamble + ucode)
ma.thread("+1", ["literal", 0x1, "+"])
ma.thread(">cfa", ["+1", "dup", "@", "+", "+1"])
# Grab HERE. It's going to live in A for a bit.
preamble = assemble(SET, A, [ma.HERE])
# Write LATEST to HERE, update LATEST.
preamble += assemble(SET, [A], [ma.LATEST])
preamble += assemble(SET, [ma.LATEST], A)
# Move ahead, write length.
preamble += assemble(ADD, A, 0x1)
preamble += assemble(SET, [A], Z)
# Set the hidden flag.
preamble += assemble(BOR, [A], HIDDEN)
# SP is nerfed, so grab the source address and put it in B.
preamble += assemble(SET, B, PEEK)
# Loop. Copy from the source address to the target address.
ucode = assemble(SUB, Z, 0x1)
ucode += assemble(SET, [A], [B])
ucode += assemble(ADD, A, 0x1)
ucode += assemble(ADD, B, 0x1)
# Break when we have no more bytes to copy.
ucode = until(ucode, (IFE, Z, 0x0))
# Write out the new HERE.
ucode += assemble(SET, [ma.HERE], A)
# Get the stack to be sane again. Shift it down and then pop, same as 2drop.
ucode += assemble(ADD, SP, 0x1)
ucode += assemble(SET, Z, POP)
ma.asm("create", preamble + ucode)
# The stack points to the top of the header. Move forward one...
ucode = assemble(ADD, Z, 0x1)
# Now XOR in the hidden flag.
ucode += assemble(XOR, [Z], HIDDEN)
# And pop the stack.
ucode += assemble(SET, Z, POP)
ma.asm("hidden", ucode)
# We get to grab LATEST ourselves. On the plus side, no stack touching.
ucode = assemble(SET, A, ma.LATEST)
# XOR that flag!
ucode += assemble(XOR, [A + 0x1], IMMEDIATE)
ma.asm("immediate", ucode)
ucode = assemble(AND, Z, IMMEDIATE)
ma.asm("immediate?", ucode)
ma.thread(":", [
"word",
"create",
"literal", ma.asmwords["enter"],
",",
"latest",
"@",
"hidden",
"]",
])
ma.thread(";", [
"literal", ma.asmwords["exit"],
",",
"latest",
"@",
"hidden",
"[",
], flags=IMMEDIATE)
ma.thread("interpret-found", [
"dup",
"+1",
"immediate?",
] + IF([
">cfa",
"call",
], [
">cfa",
",",
])
)
ma.thread("interpret", [
"word",
"find",
"dup",
] + IF([
"interpret-found",
])
)
ma.thread("quit", ["r0", "rsp!", "interpret", "nbranch", 0x2])
ma.finalize()
| print "Adding library function", name
self.library[name] = self.space.tell()
self.space.write(library[name]()) | conditional_block |
template.go | package versions
import (
"context"
"fmt"
"io/ioutil"
"path"
"strings"
imagev1 "github.com/openshift/api/image/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
v1 "github.com/maistra/istio-operator/pkg/apis/maistra/v1"
v2 "github.com/maistra/istio-operator/pkg/apis/maistra/v2"
"github.com/maistra/istio-operator/pkg/controller/common"
)
// GetChartsDir returns the location of the Helm charts. Similar layout to istio.io/istio/install/kubernetes/helm.
func (v Ver) GetChartsDir() string {
if len(common.Config.Rendering.ChartsDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "helm", v.String())
}
return path.Join(common.Config.Rendering.ChartsDir, v.String())
}
// GetTemplatesDir returns the location of the Operator templates files
func (v Ver) GetUserTemplatesDir() string {
if len(common.Config.Rendering.UserTemplatesDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "templates")
}
return common.Config.Rendering.UserTemplatesDir
}
// GetDefaultTemplatesDir returns the location of the Default Operator templates files
func (v Ver) GetDefaultTemplatesDir() string {
if len(common.Config.Rendering.DefaultTemplatesDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "default-templates", v.String())
}
return path.Join(common.Config.Rendering.DefaultTemplatesDir, v.String())
}
// common code for managing rendering
// mergeValues merges a map containing input values on top of a map containing
// base values, giving preference to the base values for conflicts
func mergeValues(base map[string]interface{}, input map[string]interface{}) map[string]interface{} {
if base == nil {
base = make(map[string]interface{}, 1)
}
for key, value := range input {
// if the key doesn't already exist, add it
if _, exists := base[key]; !exists {
base[key] = value
continue
}
// at this point, key exists in both input and base.
// If both are maps, recurse.
// If only input is a map, ignore it. We don't want to overrwrite base.
// If both are values, again, ignore it since we don't want to overrwrite base.
if baseKeyAsMap, baseOK := base[key].(map[string]interface{}); baseOK {
if inputAsMap, inputOK := value.(map[string]interface{}); inputOK {
base[key] = mergeValues(baseKeyAsMap, inputAsMap)
}
}
}
return base
}
func (v Ver) getSMCPProfile(name string, targetNamespace string) (*v1.ControlPlaneSpec, []string, error) {
if strings.Contains(name, "/") {
return nil, nil, fmt.Errorf("profile name contains invalid character '/'")
}
profileContent, err := ioutil.ReadFile(path.Join(v.GetUserTemplatesDir(), name))
if err != nil {
// if we can't read from the user profile path, try from the default path
// we use two paths because Kubernetes will not auto-update volume mounted
// configmaps mounted in directories with pre-existing content
defaultProfileContent, defaultErr := ioutil.ReadFile(path.Join(v.GetDefaultTemplatesDir(), name))
if defaultErr != nil {
return nil, nil, fmt.Errorf("template cannot be loaded from user or default directory. Error from user: %s. Error from default: %s", err, defaultErr)
}
profileContent = defaultProfileContent
}
obj, gvk, err := decoder.Decode(profileContent, nil, nil)
if err != nil || gvk == nil {
return nil, nil, fmt.Errorf("failed to parse profile %s contents: %s", name, err)
}
switch smcp := obj.(type) {
case *v1.ServiceMeshControlPlane:
// ensure version is set so conversion works correctly
smcp.Spec.Version = v.String()
if len(smcp.Spec.Profiles) == 0 {
if smcp.Spec.Template == "" {
return &smcp.Spec, nil, nil
}
return &smcp.Spec, []string{smcp.Spec.Template}, nil
}
return &smcp.Spec, smcp.Spec.Profiles, nil
case *v2.ServiceMeshControlPlane:
// ensure version is set so conversion works correctly
smcp.Spec.Version = v.String()
smcpv1 := &v1.ServiceMeshControlPlane{}
smcp.SetNamespace(targetNamespace)
if err := smcpv1.ConvertFrom(smcp); err != nil {
return nil, nil, err
}
return &smcpv1.Spec, smcp.Spec.Profiles, nil
default:
return nil, nil, fmt.Errorf("unsupported ServiceMeshControlPlane version: %s", gvk.String())
}
}
// renderSMCPTemplates traverses and processes all of the references templates
func (v Ver) recursivelyApplyProfiles(
ctx context.Context, smcp *v1.ControlPlaneSpec, targetNamespace string, profiles []string, visited sets.String,
) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
for index := len(profiles) - 1; index >= 0; index-- {
profileName := profiles[index]
if visited.Has(profileName) {
log.Info(fmt.Sprintf("smcp profile %s has already been applied", profileName))
continue | profile, profiles, err := v.getSMCPProfile(profileName, targetNamespace)
if err != nil {
return *smcp, err
}
if log.V(5).Enabled() {
rawValues, _ := yaml.Marshal(profile)
log.V(5).Info(fmt.Sprintf("profile values:\n%s\n", string(rawValues)))
rawValues, _ = yaml.Marshal(smcp)
log.V(5).Info(fmt.Sprintf("before applying profile values:\n%s\n", string(rawValues)))
}
// apply this profile first, then its children
smcp.Istio = v1.NewHelmValues(mergeValues(smcp.Istio.GetContent(), profile.Istio.GetContent()))
smcp.ThreeScale = v1.NewHelmValues(mergeValues(smcp.ThreeScale.GetContent(), profile.ThreeScale.GetContent()))
if log.V(5).Enabled() {
rawValues, _ := yaml.Marshal(smcp)
log.V(5).Info(fmt.Sprintf("after applying profile values:\n%s\n", string(rawValues)))
}
*smcp, err = v.recursivelyApplyProfiles(ctx, smcp, targetNamespace, profiles, visited)
if err != nil {
log.Info(fmt.Sprintf("error applying profiles: %s\n", err))
return *smcp, err
}
}
return *smcp, nil
}
func (v Ver) updateImagesWithSHAs(ctx context.Context, cr *common.ControllerResources, smcpSpec v1.ControlPlaneSpec) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
log.Info("updating image names for disconnected install")
var err error
if err = v.Strategy().SetImageValues(ctx, cr, &smcpSpec); err != nil {
return smcpSpec, err
}
err = updateOauthProxyConfig(ctx, cr, &smcpSpec)
return smcpSpec, err
}
func updateOauthProxyConfig(ctx context.Context, cr *common.ControllerResources, smcpSpec *v1.ControlPlaneSpec) error {
if !common.Config.OAuthProxy.Query || len(common.Config.OAuthProxy.Name) == 0 || len(common.Config.OAuthProxy.Namespace) == 0 {
return nil
}
log := common.LogFromContext(ctx)
is := &imagev1.ImageStream{}
if err := cr.Client.Get(ctx, client.ObjectKey{Namespace: common.Config.OAuthProxy.Namespace, Name: common.Config.OAuthProxy.Name}, is); err == nil {
foundTag := false
for _, tag := range is.Status.Tags {
if tag.Tag == common.Config.OAuthProxy.Tag {
foundTag = true
if len(tag.Items) > 0 && len(tag.Items[0].DockerImageReference) > 0 {
common.Config.OAuthProxy.Image = tag.Items[0].DockerImageReference
} else {
log.Info(fmt.Sprintf("warning: dockerImageReference not set for tag '%s' in ImageStream %s/%s",
common.Config.OAuthProxy.Tag, common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
break
}
}
if !foundTag {
log.Info(fmt.Sprintf("warning: could not find tag '%s' in ImageStream %s/%s",
common.Config.OAuthProxy.Tag, common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
} else if !apierrors.IsNotFound(err) {
log.Error(err, fmt.Sprintf("unexpected error retrieving ImageStream %s/%s", common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
if len(common.Config.OAuthProxy.Image) == 0 {
log.Info("global.oauthproxy.image will not be overridden")
return nil
}
log.Info(fmt.Sprintf("using '%s' for global.oauthproxy.image", common.Config.OAuthProxy.Image))
return updateImageField(smcpSpec.Istio, "global.oauthproxy.image", common.Config.OAuthProxy.Image)
}
func updateImageField(helmValues *v1.HelmValues, path, value string) error {
if len(value) == 0 {
return nil
}
return helmValues.SetField(path, value)
}
func (v Ver) ApplyProfiles(ctx context.Context, cr *common.ControllerResources,
smcpSpec *v1.ControlPlaneSpec, targetNamespace string,
) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
log.Info("applying profiles to ServiceMeshControlPlane")
profiles := smcpSpec.Profiles
if len(profiles) == 0 {
if smcpSpec.Template == "" {
profiles = []string{v1.DefaultTemplate}
log.Info("No profiles specified, applying default profile")
} else {
profiles = []string{smcpSpec.Template}
}
}
if smcpSpec.Istio == nil {
smcpSpec.Istio = v1.NewHelmValues(make(map[string]interface{}))
}
if smcpSpec.ThreeScale == nil {
smcpSpec.ThreeScale = v1.NewHelmValues(make(map[string]interface{}))
}
applyDisconnectedSettings := true
if tag, _, _ := smcpSpec.Istio.GetString("global.tag"); tag != "" {
// don't update anything
applyDisconnectedSettings = false
} else if hub, _, _ := smcpSpec.Istio.GetString("global.hub"); hub != "" {
// don't update anything
applyDisconnectedSettings = false
}
spec, err := v.recursivelyApplyProfiles(ctx, smcpSpec, targetNamespace, profiles, sets.NewString())
if err != nil {
return spec, err
}
if applyDisconnectedSettings {
spec, err = v.updateImagesWithSHAs(ctx, cr, spec)
if err != nil {
log.Error(err, "warning: failed to apply image names to support disconnected install")
return spec, err
}
}
log.Info("finished updating ServiceMeshControlPlane", "Spec", spec)
return spec, err
}
func isEnabled(spec *v1.HelmValues) bool {
if enabled, found, _ := spec.GetBool("enabled"); found {
return enabled
}
return false
}
func isComponentEnabled(spec *v1.HelmValues, path string) bool {
if enabled, found, _ := spec.GetBool(path + ".enabled"); found {
return enabled
}
return false
} | }
log.Info(fmt.Sprintf("processing smcp profile %s", profileName))
| random_line_split |
template.go | package versions
import (
"context"
"fmt"
"io/ioutil"
"path"
"strings"
imagev1 "github.com/openshift/api/image/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
v1 "github.com/maistra/istio-operator/pkg/apis/maistra/v1"
v2 "github.com/maistra/istio-operator/pkg/apis/maistra/v2"
"github.com/maistra/istio-operator/pkg/controller/common"
)
// GetChartsDir returns the location of the Helm charts. Similar layout to istio.io/istio/install/kubernetes/helm.
func (v Ver) GetChartsDir() string {
if len(common.Config.Rendering.ChartsDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "helm", v.String())
}
return path.Join(common.Config.Rendering.ChartsDir, v.String())
}
// GetTemplatesDir returns the location of the Operator templates files
func (v Ver) GetUserTemplatesDir() string {
if len(common.Config.Rendering.UserTemplatesDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "templates")
}
return common.Config.Rendering.UserTemplatesDir
}
// GetDefaultTemplatesDir returns the location of the Default Operator templates files
func (v Ver) GetDefaultTemplatesDir() string {
if len(common.Config.Rendering.DefaultTemplatesDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "default-templates", v.String())
}
return path.Join(common.Config.Rendering.DefaultTemplatesDir, v.String())
}
// common code for managing rendering
// mergeValues merges a map containing input values on top of a map containing
// base values, giving preference to the base values for conflicts
func mergeValues(base map[string]interface{}, input map[string]interface{}) map[string]interface{} {
if base == nil {
base = make(map[string]interface{}, 1)
}
for key, value := range input {
// if the key doesn't already exist, add it
if _, exists := base[key]; !exists {
base[key] = value
continue
}
// at this point, key exists in both input and base.
// If both are maps, recurse.
// If only input is a map, ignore it. We don't want to overrwrite base.
// If both are values, again, ignore it since we don't want to overrwrite base.
if baseKeyAsMap, baseOK := base[key].(map[string]interface{}); baseOK {
if inputAsMap, inputOK := value.(map[string]interface{}); inputOK {
base[key] = mergeValues(baseKeyAsMap, inputAsMap)
}
}
}
return base
}
func (v Ver) getSMCPProfile(name string, targetNamespace string) (*v1.ControlPlaneSpec, []string, error) {
if strings.Contains(name, "/") {
return nil, nil, fmt.Errorf("profile name contains invalid character '/'")
}
profileContent, err := ioutil.ReadFile(path.Join(v.GetUserTemplatesDir(), name))
if err != nil {
// if we can't read from the user profile path, try from the default path
// we use two paths because Kubernetes will not auto-update volume mounted
// configmaps mounted in directories with pre-existing content
defaultProfileContent, defaultErr := ioutil.ReadFile(path.Join(v.GetDefaultTemplatesDir(), name))
if defaultErr != nil {
return nil, nil, fmt.Errorf("template cannot be loaded from user or default directory. Error from user: %s. Error from default: %s", err, defaultErr)
}
profileContent = defaultProfileContent
}
obj, gvk, err := decoder.Decode(profileContent, nil, nil)
if err != nil || gvk == nil {
return nil, nil, fmt.Errorf("failed to parse profile %s contents: %s", name, err)
}
switch smcp := obj.(type) {
case *v1.ServiceMeshControlPlane:
// ensure version is set so conversion works correctly
smcp.Spec.Version = v.String()
if len(smcp.Spec.Profiles) == 0 {
if smcp.Spec.Template == "" {
return &smcp.Spec, nil, nil
}
return &smcp.Spec, []string{smcp.Spec.Template}, nil
}
return &smcp.Spec, smcp.Spec.Profiles, nil
case *v2.ServiceMeshControlPlane:
// ensure version is set so conversion works correctly
smcp.Spec.Version = v.String()
smcpv1 := &v1.ServiceMeshControlPlane{}
smcp.SetNamespace(targetNamespace)
if err := smcpv1.ConvertFrom(smcp); err != nil {
return nil, nil, err
}
return &smcpv1.Spec, smcp.Spec.Profiles, nil
default:
return nil, nil, fmt.Errorf("unsupported ServiceMeshControlPlane version: %s", gvk.String())
}
}
// renderSMCPTemplates traverses and processes all of the references templates
func (v Ver) recursivelyApplyProfiles(
ctx context.Context, smcp *v1.ControlPlaneSpec, targetNamespace string, profiles []string, visited sets.String,
) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
for index := len(profiles) - 1; index >= 0; index-- {
profileName := profiles[index]
if visited.Has(profileName) {
log.Info(fmt.Sprintf("smcp profile %s has already been applied", profileName))
continue
}
log.Info(fmt.Sprintf("processing smcp profile %s", profileName))
profile, profiles, err := v.getSMCPProfile(profileName, targetNamespace)
if err != nil {
return *smcp, err
}
if log.V(5).Enabled() {
rawValues, _ := yaml.Marshal(profile)
log.V(5).Info(fmt.Sprintf("profile values:\n%s\n", string(rawValues)))
rawValues, _ = yaml.Marshal(smcp)
log.V(5).Info(fmt.Sprintf("before applying profile values:\n%s\n", string(rawValues)))
}
// apply this profile first, then its children
smcp.Istio = v1.NewHelmValues(mergeValues(smcp.Istio.GetContent(), profile.Istio.GetContent()))
smcp.ThreeScale = v1.NewHelmValues(mergeValues(smcp.ThreeScale.GetContent(), profile.ThreeScale.GetContent()))
if log.V(5).Enabled() {
rawValues, _ := yaml.Marshal(smcp)
log.V(5).Info(fmt.Sprintf("after applying profile values:\n%s\n", string(rawValues)))
}
*smcp, err = v.recursivelyApplyProfiles(ctx, smcp, targetNamespace, profiles, visited)
if err != nil {
log.Info(fmt.Sprintf("error applying profiles: %s\n", err))
return *smcp, err
}
}
return *smcp, nil
}
func (v Ver) updateImagesWithSHAs(ctx context.Context, cr *common.ControllerResources, smcpSpec v1.ControlPlaneSpec) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
log.Info("updating image names for disconnected install")
var err error
if err = v.Strategy().SetImageValues(ctx, cr, &smcpSpec); err != nil {
return smcpSpec, err
}
err = updateOauthProxyConfig(ctx, cr, &smcpSpec)
return smcpSpec, err
}
func updateOauthProxyConfig(ctx context.Context, cr *common.ControllerResources, smcpSpec *v1.ControlPlaneSpec) error {
if !common.Config.OAuthProxy.Query || len(common.Config.OAuthProxy.Name) == 0 || len(common.Config.OAuthProxy.Namespace) == 0 {
return nil
}
log := common.LogFromContext(ctx)
is := &imagev1.ImageStream{}
if err := cr.Client.Get(ctx, client.ObjectKey{Namespace: common.Config.OAuthProxy.Namespace, Name: common.Config.OAuthProxy.Name}, is); err == nil {
foundTag := false
for _, tag := range is.Status.Tags {
if tag.Tag == common.Config.OAuthProxy.Tag {
foundTag = true
if len(tag.Items) > 0 && len(tag.Items[0].DockerImageReference) > 0 {
common.Config.OAuthProxy.Image = tag.Items[0].DockerImageReference
} else {
log.Info(fmt.Sprintf("warning: dockerImageReference not set for tag '%s' in ImageStream %s/%s",
common.Config.OAuthProxy.Tag, common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
break
}
}
if !foundTag {
log.Info(fmt.Sprintf("warning: could not find tag '%s' in ImageStream %s/%s",
common.Config.OAuthProxy.Tag, common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
} else if !apierrors.IsNotFound(err) {
log.Error(err, fmt.Sprintf("unexpected error retrieving ImageStream %s/%s", common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
if len(common.Config.OAuthProxy.Image) == 0 {
log.Info("global.oauthproxy.image will not be overridden")
return nil
}
log.Info(fmt.Sprintf("using '%s' for global.oauthproxy.image", common.Config.OAuthProxy.Image))
return updateImageField(smcpSpec.Istio, "global.oauthproxy.image", common.Config.OAuthProxy.Image)
}
func updateImageField(helmValues *v1.HelmValues, path, value string) error {
if len(value) == 0 {
return nil
}
return helmValues.SetField(path, value)
}
func (v Ver) ApplyProfiles(ctx context.Context, cr *common.ControllerResources,
smcpSpec *v1.ControlPlaneSpec, targetNamespace string,
) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
log.Info("applying profiles to ServiceMeshControlPlane")
profiles := smcpSpec.Profiles
if len(profiles) == 0 {
if smcpSpec.Template == "" {
profiles = []string{v1.DefaultTemplate}
log.Info("No profiles specified, applying default profile")
} else {
profiles = []string{smcpSpec.Template}
}
}
if smcpSpec.Istio == nil {
smcpSpec.Istio = v1.NewHelmValues(make(map[string]interface{}))
}
if smcpSpec.ThreeScale == nil {
smcpSpec.ThreeScale = v1.NewHelmValues(make(map[string]interface{}))
}
applyDisconnectedSettings := true
if tag, _, _ := smcpSpec.Istio.GetString("global.tag"); tag != "" {
// don't update anything
applyDisconnectedSettings = false
} else if hub, _, _ := smcpSpec.Istio.GetString("global.hub"); hub != "" {
// don't update anything
applyDisconnectedSettings = false
}
spec, err := v.recursivelyApplyProfiles(ctx, smcpSpec, targetNamespace, profiles, sets.NewString())
if err != nil {
return spec, err
}
if applyDisconnectedSettings {
spec, err = v.updateImagesWithSHAs(ctx, cr, spec)
if err != nil {
log.Error(err, "warning: failed to apply image names to support disconnected install")
return spec, err
}
}
log.Info("finished updating ServiceMeshControlPlane", "Spec", spec)
return spec, err
}
func isEnabled(spec *v1.HelmValues) bool {
if enabled, found, _ := spec.GetBool("enabled"); found {
return enabled
}
return false
}
func isComponentEnabled(spec *v1.HelmValues, path string) bool {
if enabled, found, _ := spec.GetBool(path + ".enabled"); found |
return false
}
| {
return enabled
} | conditional_block |
template.go | package versions
import (
"context"
"fmt"
"io/ioutil"
"path"
"strings"
imagev1 "github.com/openshift/api/image/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
v1 "github.com/maistra/istio-operator/pkg/apis/maistra/v1"
v2 "github.com/maistra/istio-operator/pkg/apis/maistra/v2"
"github.com/maistra/istio-operator/pkg/controller/common"
)
// GetChartsDir returns the location of the Helm charts. Similar layout to istio.io/istio/install/kubernetes/helm.
func (v Ver) GetChartsDir() string {
if len(common.Config.Rendering.ChartsDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "helm", v.String())
}
return path.Join(common.Config.Rendering.ChartsDir, v.String())
}
// GetTemplatesDir returns the location of the Operator templates files
func (v Ver) GetUserTemplatesDir() string {
if len(common.Config.Rendering.UserTemplatesDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "templates")
}
return common.Config.Rendering.UserTemplatesDir
}
// GetDefaultTemplatesDir returns the location of the Default Operator templates files
func (v Ver) GetDefaultTemplatesDir() string {
if len(common.Config.Rendering.DefaultTemplatesDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "default-templates", v.String())
}
return path.Join(common.Config.Rendering.DefaultTemplatesDir, v.String())
}
// common code for managing rendering
// mergeValues merges a map containing input values on top of a map containing
// base values, giving preference to the base values for conflicts
func mergeValues(base map[string]interface{}, input map[string]interface{}) map[string]interface{} {
if base == nil {
base = make(map[string]interface{}, 1)
}
for key, value := range input {
// if the key doesn't already exist, add it
if _, exists := base[key]; !exists {
base[key] = value
continue
}
// at this point, key exists in both input and base.
// If both are maps, recurse.
// If only input is a map, ignore it. We don't want to overrwrite base.
// If both are values, again, ignore it since we don't want to overrwrite base.
if baseKeyAsMap, baseOK := base[key].(map[string]interface{}); baseOK {
if inputAsMap, inputOK := value.(map[string]interface{}); inputOK {
base[key] = mergeValues(baseKeyAsMap, inputAsMap)
}
}
}
return base
}
func (v Ver) getSMCPProfile(name string, targetNamespace string) (*v1.ControlPlaneSpec, []string, error) {
if strings.Contains(name, "/") {
return nil, nil, fmt.Errorf("profile name contains invalid character '/'")
}
profileContent, err := ioutil.ReadFile(path.Join(v.GetUserTemplatesDir(), name))
if err != nil {
// if we can't read from the user profile path, try from the default path
// we use two paths because Kubernetes will not auto-update volume mounted
// configmaps mounted in directories with pre-existing content
defaultProfileContent, defaultErr := ioutil.ReadFile(path.Join(v.GetDefaultTemplatesDir(), name))
if defaultErr != nil {
return nil, nil, fmt.Errorf("template cannot be loaded from user or default directory. Error from user: %s. Error from default: %s", err, defaultErr)
}
profileContent = defaultProfileContent
}
obj, gvk, err := decoder.Decode(profileContent, nil, nil)
if err != nil || gvk == nil {
return nil, nil, fmt.Errorf("failed to parse profile %s contents: %s", name, err)
}
switch smcp := obj.(type) {
case *v1.ServiceMeshControlPlane:
// ensure version is set so conversion works correctly
smcp.Spec.Version = v.String()
if len(smcp.Spec.Profiles) == 0 {
if smcp.Spec.Template == "" {
return &smcp.Spec, nil, nil
}
return &smcp.Spec, []string{smcp.Spec.Template}, nil
}
return &smcp.Spec, smcp.Spec.Profiles, nil
case *v2.ServiceMeshControlPlane:
// ensure version is set so conversion works correctly
smcp.Spec.Version = v.String()
smcpv1 := &v1.ServiceMeshControlPlane{}
smcp.SetNamespace(targetNamespace)
if err := smcpv1.ConvertFrom(smcp); err != nil {
return nil, nil, err
}
return &smcpv1.Spec, smcp.Spec.Profiles, nil
default:
return nil, nil, fmt.Errorf("unsupported ServiceMeshControlPlane version: %s", gvk.String())
}
}
// renderSMCPTemplates traverses and processes all of the references templates
func (v Ver) recursivelyApplyProfiles(
ctx context.Context, smcp *v1.ControlPlaneSpec, targetNamespace string, profiles []string, visited sets.String,
) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
for index := len(profiles) - 1; index >= 0; index-- {
profileName := profiles[index]
if visited.Has(profileName) {
log.Info(fmt.Sprintf("smcp profile %s has already been applied", profileName))
continue
}
log.Info(fmt.Sprintf("processing smcp profile %s", profileName))
profile, profiles, err := v.getSMCPProfile(profileName, targetNamespace)
if err != nil {
return *smcp, err
}
if log.V(5).Enabled() {
rawValues, _ := yaml.Marshal(profile)
log.V(5).Info(fmt.Sprintf("profile values:\n%s\n", string(rawValues)))
rawValues, _ = yaml.Marshal(smcp)
log.V(5).Info(fmt.Sprintf("before applying profile values:\n%s\n", string(rawValues)))
}
// apply this profile first, then its children
smcp.Istio = v1.NewHelmValues(mergeValues(smcp.Istio.GetContent(), profile.Istio.GetContent()))
smcp.ThreeScale = v1.NewHelmValues(mergeValues(smcp.ThreeScale.GetContent(), profile.ThreeScale.GetContent()))
if log.V(5).Enabled() {
rawValues, _ := yaml.Marshal(smcp)
log.V(5).Info(fmt.Sprintf("after applying profile values:\n%s\n", string(rawValues)))
}
*smcp, err = v.recursivelyApplyProfiles(ctx, smcp, targetNamespace, profiles, visited)
if err != nil {
log.Info(fmt.Sprintf("error applying profiles: %s\n", err))
return *smcp, err
}
}
return *smcp, nil
}
func (v Ver) updateImagesWithSHAs(ctx context.Context, cr *common.ControllerResources, smcpSpec v1.ControlPlaneSpec) (v1.ControlPlaneSpec, error) |
func updateOauthProxyConfig(ctx context.Context, cr *common.ControllerResources, smcpSpec *v1.ControlPlaneSpec) error {
if !common.Config.OAuthProxy.Query || len(common.Config.OAuthProxy.Name) == 0 || len(common.Config.OAuthProxy.Namespace) == 0 {
return nil
}
log := common.LogFromContext(ctx)
is := &imagev1.ImageStream{}
if err := cr.Client.Get(ctx, client.ObjectKey{Namespace: common.Config.OAuthProxy.Namespace, Name: common.Config.OAuthProxy.Name}, is); err == nil {
foundTag := false
for _, tag := range is.Status.Tags {
if tag.Tag == common.Config.OAuthProxy.Tag {
foundTag = true
if len(tag.Items) > 0 && len(tag.Items[0].DockerImageReference) > 0 {
common.Config.OAuthProxy.Image = tag.Items[0].DockerImageReference
} else {
log.Info(fmt.Sprintf("warning: dockerImageReference not set for tag '%s' in ImageStream %s/%s",
common.Config.OAuthProxy.Tag, common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
break
}
}
if !foundTag {
log.Info(fmt.Sprintf("warning: could not find tag '%s' in ImageStream %s/%s",
common.Config.OAuthProxy.Tag, common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
} else if !apierrors.IsNotFound(err) {
log.Error(err, fmt.Sprintf("unexpected error retrieving ImageStream %s/%s", common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
if len(common.Config.OAuthProxy.Image) == 0 {
log.Info("global.oauthproxy.image will not be overridden")
return nil
}
log.Info(fmt.Sprintf("using '%s' for global.oauthproxy.image", common.Config.OAuthProxy.Image))
return updateImageField(smcpSpec.Istio, "global.oauthproxy.image", common.Config.OAuthProxy.Image)
}
func updateImageField(helmValues *v1.HelmValues, path, value string) error {
if len(value) == 0 {
return nil
}
return helmValues.SetField(path, value)
}
func (v Ver) ApplyProfiles(ctx context.Context, cr *common.ControllerResources,
smcpSpec *v1.ControlPlaneSpec, targetNamespace string,
) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
log.Info("applying profiles to ServiceMeshControlPlane")
profiles := smcpSpec.Profiles
if len(profiles) == 0 {
if smcpSpec.Template == "" {
profiles = []string{v1.DefaultTemplate}
log.Info("No profiles specified, applying default profile")
} else {
profiles = []string{smcpSpec.Template}
}
}
if smcpSpec.Istio == nil {
smcpSpec.Istio = v1.NewHelmValues(make(map[string]interface{}))
}
if smcpSpec.ThreeScale == nil {
smcpSpec.ThreeScale = v1.NewHelmValues(make(map[string]interface{}))
}
applyDisconnectedSettings := true
if tag, _, _ := smcpSpec.Istio.GetString("global.tag"); tag != "" {
// don't update anything
applyDisconnectedSettings = false
} else if hub, _, _ := smcpSpec.Istio.GetString("global.hub"); hub != "" {
// don't update anything
applyDisconnectedSettings = false
}
spec, err := v.recursivelyApplyProfiles(ctx, smcpSpec, targetNamespace, profiles, sets.NewString())
if err != nil {
return spec, err
}
if applyDisconnectedSettings {
spec, err = v.updateImagesWithSHAs(ctx, cr, spec)
if err != nil {
log.Error(err, "warning: failed to apply image names to support disconnected install")
return spec, err
}
}
log.Info("finished updating ServiceMeshControlPlane", "Spec", spec)
return spec, err
}
func isEnabled(spec *v1.HelmValues) bool {
if enabled, found, _ := spec.GetBool("enabled"); found {
return enabled
}
return false
}
func isComponentEnabled(spec *v1.HelmValues, path string) bool {
if enabled, found, _ := spec.GetBool(path + ".enabled"); found {
return enabled
}
return false
}
| {
log := common.LogFromContext(ctx)
log.Info("updating image names for disconnected install")
var err error
if err = v.Strategy().SetImageValues(ctx, cr, &smcpSpec); err != nil {
return smcpSpec, err
}
err = updateOauthProxyConfig(ctx, cr, &smcpSpec)
return smcpSpec, err
} | identifier_body |
template.go | package versions
import (
"context"
"fmt"
"io/ioutil"
"path"
"strings"
imagev1 "github.com/openshift/api/image/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
v1 "github.com/maistra/istio-operator/pkg/apis/maistra/v1"
v2 "github.com/maistra/istio-operator/pkg/apis/maistra/v2"
"github.com/maistra/istio-operator/pkg/controller/common"
)
// GetChartsDir returns the location of the Helm charts. Similar layout to istio.io/istio/install/kubernetes/helm.
func (v Ver) GetChartsDir() string {
if len(common.Config.Rendering.ChartsDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "helm", v.String())
}
return path.Join(common.Config.Rendering.ChartsDir, v.String())
}
// GetTemplatesDir returns the location of the Operator templates files
func (v Ver) GetUserTemplatesDir() string {
if len(common.Config.Rendering.UserTemplatesDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "templates")
}
return common.Config.Rendering.UserTemplatesDir
}
// GetDefaultTemplatesDir returns the location of the Default Operator templates files
func (v Ver) GetDefaultTemplatesDir() string {
if len(common.Config.Rendering.DefaultTemplatesDir) == 0 {
return path.Join(common.Config.Rendering.ResourceDir, "default-templates", v.String())
}
return path.Join(common.Config.Rendering.DefaultTemplatesDir, v.String())
}
// common code for managing rendering
// mergeValues merges a map containing input values on top of a map containing
// base values, giving preference to the base values for conflicts
func mergeValues(base map[string]interface{}, input map[string]interface{}) map[string]interface{} {
if base == nil {
base = make(map[string]interface{}, 1)
}
for key, value := range input {
// if the key doesn't already exist, add it
if _, exists := base[key]; !exists {
base[key] = value
continue
}
// at this point, key exists in both input and base.
// If both are maps, recurse.
// If only input is a map, ignore it. We don't want to overrwrite base.
// If both are values, again, ignore it since we don't want to overrwrite base.
if baseKeyAsMap, baseOK := base[key].(map[string]interface{}); baseOK {
if inputAsMap, inputOK := value.(map[string]interface{}); inputOK {
base[key] = mergeValues(baseKeyAsMap, inputAsMap)
}
}
}
return base
}
func (v Ver) getSMCPProfile(name string, targetNamespace string) (*v1.ControlPlaneSpec, []string, error) {
if strings.Contains(name, "/") {
return nil, nil, fmt.Errorf("profile name contains invalid character '/'")
}
profileContent, err := ioutil.ReadFile(path.Join(v.GetUserTemplatesDir(), name))
if err != nil {
// if we can't read from the user profile path, try from the default path
// we use two paths because Kubernetes will not auto-update volume mounted
// configmaps mounted in directories with pre-existing content
defaultProfileContent, defaultErr := ioutil.ReadFile(path.Join(v.GetDefaultTemplatesDir(), name))
if defaultErr != nil {
return nil, nil, fmt.Errorf("template cannot be loaded from user or default directory. Error from user: %s. Error from default: %s", err, defaultErr)
}
profileContent = defaultProfileContent
}
obj, gvk, err := decoder.Decode(profileContent, nil, nil)
if err != nil || gvk == nil {
return nil, nil, fmt.Errorf("failed to parse profile %s contents: %s", name, err)
}
switch smcp := obj.(type) {
case *v1.ServiceMeshControlPlane:
// ensure version is set so conversion works correctly
smcp.Spec.Version = v.String()
if len(smcp.Spec.Profiles) == 0 {
if smcp.Spec.Template == "" {
return &smcp.Spec, nil, nil
}
return &smcp.Spec, []string{smcp.Spec.Template}, nil
}
return &smcp.Spec, smcp.Spec.Profiles, nil
case *v2.ServiceMeshControlPlane:
// ensure version is set so conversion works correctly
smcp.Spec.Version = v.String()
smcpv1 := &v1.ServiceMeshControlPlane{}
smcp.SetNamespace(targetNamespace)
if err := smcpv1.ConvertFrom(smcp); err != nil {
return nil, nil, err
}
return &smcpv1.Spec, smcp.Spec.Profiles, nil
default:
return nil, nil, fmt.Errorf("unsupported ServiceMeshControlPlane version: %s", gvk.String())
}
}
// renderSMCPTemplates traverses and processes all of the references templates
func (v Ver) recursivelyApplyProfiles(
ctx context.Context, smcp *v1.ControlPlaneSpec, targetNamespace string, profiles []string, visited sets.String,
) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
for index := len(profiles) - 1; index >= 0; index-- {
profileName := profiles[index]
if visited.Has(profileName) {
log.Info(fmt.Sprintf("smcp profile %s has already been applied", profileName))
continue
}
log.Info(fmt.Sprintf("processing smcp profile %s", profileName))
profile, profiles, err := v.getSMCPProfile(profileName, targetNamespace)
if err != nil {
return *smcp, err
}
if log.V(5).Enabled() {
rawValues, _ := yaml.Marshal(profile)
log.V(5).Info(fmt.Sprintf("profile values:\n%s\n", string(rawValues)))
rawValues, _ = yaml.Marshal(smcp)
log.V(5).Info(fmt.Sprintf("before applying profile values:\n%s\n", string(rawValues)))
}
// apply this profile first, then its children
smcp.Istio = v1.NewHelmValues(mergeValues(smcp.Istio.GetContent(), profile.Istio.GetContent()))
smcp.ThreeScale = v1.NewHelmValues(mergeValues(smcp.ThreeScale.GetContent(), profile.ThreeScale.GetContent()))
if log.V(5).Enabled() {
rawValues, _ := yaml.Marshal(smcp)
log.V(5).Info(fmt.Sprintf("after applying profile values:\n%s\n", string(rawValues)))
}
*smcp, err = v.recursivelyApplyProfiles(ctx, smcp, targetNamespace, profiles, visited)
if err != nil {
log.Info(fmt.Sprintf("error applying profiles: %s\n", err))
return *smcp, err
}
}
return *smcp, nil
}
func (v Ver) updateImagesWithSHAs(ctx context.Context, cr *common.ControllerResources, smcpSpec v1.ControlPlaneSpec) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
log.Info("updating image names for disconnected install")
var err error
if err = v.Strategy().SetImageValues(ctx, cr, &smcpSpec); err != nil {
return smcpSpec, err
}
err = updateOauthProxyConfig(ctx, cr, &smcpSpec)
return smcpSpec, err
}
func updateOauthProxyConfig(ctx context.Context, cr *common.ControllerResources, smcpSpec *v1.ControlPlaneSpec) error {
if !common.Config.OAuthProxy.Query || len(common.Config.OAuthProxy.Name) == 0 || len(common.Config.OAuthProxy.Namespace) == 0 {
return nil
}
log := common.LogFromContext(ctx)
is := &imagev1.ImageStream{}
if err := cr.Client.Get(ctx, client.ObjectKey{Namespace: common.Config.OAuthProxy.Namespace, Name: common.Config.OAuthProxy.Name}, is); err == nil {
foundTag := false
for _, tag := range is.Status.Tags {
if tag.Tag == common.Config.OAuthProxy.Tag {
foundTag = true
if len(tag.Items) > 0 && len(tag.Items[0].DockerImageReference) > 0 {
common.Config.OAuthProxy.Image = tag.Items[0].DockerImageReference
} else {
log.Info(fmt.Sprintf("warning: dockerImageReference not set for tag '%s' in ImageStream %s/%s",
common.Config.OAuthProxy.Tag, common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
break
}
}
if !foundTag {
log.Info(fmt.Sprintf("warning: could not find tag '%s' in ImageStream %s/%s",
common.Config.OAuthProxy.Tag, common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
} else if !apierrors.IsNotFound(err) {
log.Error(err, fmt.Sprintf("unexpected error retrieving ImageStream %s/%s", common.Config.OAuthProxy.Namespace, common.Config.OAuthProxy.Name))
}
if len(common.Config.OAuthProxy.Image) == 0 {
log.Info("global.oauthproxy.image will not be overridden")
return nil
}
log.Info(fmt.Sprintf("using '%s' for global.oauthproxy.image", common.Config.OAuthProxy.Image))
return updateImageField(smcpSpec.Istio, "global.oauthproxy.image", common.Config.OAuthProxy.Image)
}
func updateImageField(helmValues *v1.HelmValues, path, value string) error {
if len(value) == 0 {
return nil
}
return helmValues.SetField(path, value)
}
func (v Ver) ApplyProfiles(ctx context.Context, cr *common.ControllerResources,
smcpSpec *v1.ControlPlaneSpec, targetNamespace string,
) (v1.ControlPlaneSpec, error) {
log := common.LogFromContext(ctx)
log.Info("applying profiles to ServiceMeshControlPlane")
profiles := smcpSpec.Profiles
if len(profiles) == 0 {
if smcpSpec.Template == "" {
profiles = []string{v1.DefaultTemplate}
log.Info("No profiles specified, applying default profile")
} else {
profiles = []string{smcpSpec.Template}
}
}
if smcpSpec.Istio == nil {
smcpSpec.Istio = v1.NewHelmValues(make(map[string]interface{}))
}
if smcpSpec.ThreeScale == nil {
smcpSpec.ThreeScale = v1.NewHelmValues(make(map[string]interface{}))
}
applyDisconnectedSettings := true
if tag, _, _ := smcpSpec.Istio.GetString("global.tag"); tag != "" {
// don't update anything
applyDisconnectedSettings = false
} else if hub, _, _ := smcpSpec.Istio.GetString("global.hub"); hub != "" {
// don't update anything
applyDisconnectedSettings = false
}
spec, err := v.recursivelyApplyProfiles(ctx, smcpSpec, targetNamespace, profiles, sets.NewString())
if err != nil {
return spec, err
}
if applyDisconnectedSettings {
spec, err = v.updateImagesWithSHAs(ctx, cr, spec)
if err != nil {
log.Error(err, "warning: failed to apply image names to support disconnected install")
return spec, err
}
}
log.Info("finished updating ServiceMeshControlPlane", "Spec", spec)
return spec, err
}
func | (spec *v1.HelmValues) bool {
if enabled, found, _ := spec.GetBool("enabled"); found {
return enabled
}
return false
}
func isComponentEnabled(spec *v1.HelmValues, path string) bool {
if enabled, found, _ := spec.GetBool(path + ".enabled"); found {
return enabled
}
return false
}
| isEnabled | identifier_name |
auto.go | //
// (C) Copyright 2020-2022 Intel Corporation.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
package control
import (
"context"
"fmt"
"math"
"sort"
"github.com/pkg/errors"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/lib/hardware"
"github.com/daos-stack/daos/src/control/logging"
"github.com/daos-stack/daos/src/control/server/config"
"github.com/daos-stack/daos/src/control/server/engine"
"github.com/daos-stack/daos/src/control/server/storage"
)
const (
scmMountPrefix = "/mnt/daos"
scmBdevDir = "/dev"
defaultFiPort = 31416
defaultFiPortInterval = 1000
defaultTargetCount = 16
defaultEngineLogFile = "/tmp/daos_engine"
defaultControlLogFile = "/tmp/daos_server.log"
minDMABuffer = 1024
errNoNuma = "zero numa nodes reported on hosts %s"
errUnsupNetDevClass = "unsupported net dev class in request: %s"
errInsufNrIfaces = "insufficient matching %s network interfaces, want %d got %d %v"
errInsufNrPMemGroups = "insufficient number of pmem device numa groups %v, want %d got %d"
errInvalNrEngines = "unexpected number of engines requested, want %d got %d"
errInsufNrSSDs = "insufficient number of ssds for numa %d, want %d got %d"
errInvalNrCores = "invalid number of cores for numa %d"
)
type (
// ConfigGenerateReq contains the inputs for the request.
ConfigGenerateReq struct {
unaryRequest
msRequest
NrEngines int
MinNrSSDs int
NetClass hardware.NetDevClass
Client UnaryInvoker
HostList []string
AccessPoints []string
Log logging.Logger
}
// ConfigGenerateResp contains the request response.
ConfigGenerateResp struct {
ConfigOut *config.Server
}
// ConfigGenerateError implements the error interface and
// contains a set of host-specific errors encountered while
// attempting to generate a configuration.
ConfigGenerateError struct {
HostErrorsResp
}
)
func (cge *ConfigGenerateError) Error() string {
return cge.Errors().Error()
}
// GetHostErrors returns the wrapped HostErrorsMap.
func (cge *ConfigGenerateError) GetHostErrors() HostErrorsMap {
return cge.HostErrors
}
// IsConfigGenerateError returns true if the provided error is a *ConfigGenerateError.
func IsConfigGenerateError(err error) bool {
_, ok := errors.Cause(err).(*ConfigGenerateError)
return ok
}
// ConfigGenerate attempts to automatically detect hardware and generate a DAOS
// server config file for a set of hosts with homogeneous hardware setup.
//
// Returns API response or error.
func ConfigGenerate(ctx context.Context, req ConfigGenerateReq) (*ConfigGenerateResp, error) {
req.Log.Debugf("ConfigGenerate called with request %+v", req)
if len(req.HostList) == 0 {
return nil, errors.New("no hosts specified")
}
if len(req.AccessPoints) == 0 {
return nil, errors.New("no access points specified")
}
nd, err := getNetworkDetails(ctx, req)
if err != nil {
return nil, err
}
sd, err := getStorageDetails(ctx, req, nd.engineCount)
if err != nil {
return nil, err
}
ccs, err := getCPUDetails(req.Log, sd.numaSSDs, nd.numaCoreCount)
if err != nil {
return nil, err
}
cfg, err := genConfig(req.Log, defaultEngineCfg, req.AccessPoints, nd, sd, ccs)
if err != nil {
return nil, err
}
return &ConfigGenerateResp{ConfigOut: cfg}, nil
}
// getNetworkSet retrieves the result of network scan over host list and
// verifies that there is only a single network set in response which indicates
// that network hardware setup is homogeneous across all hosts.
//
// Return host errors, network scan results for the host set or error.
func getNetworkSet(ctx context.Context, log logging.Logger, hostList []string, client UnaryInvoker) (*HostFabricSet, error) {
scanReq := new(NetworkScanReq)
scanReq.SetHostList(hostList)
scanResp, err := NetworkScan(ctx, client, scanReq)
if err != nil {
return nil, err
}
if len(scanResp.GetHostErrors()) > 0 {
return nil, &ConfigGenerateError{HostErrorsResp: scanResp.HostErrorsResp}
}
// verify homogeneous network
switch len(scanResp.HostFabrics) {
case 0:
return nil, errors.New("no host responses")
case 1: // success
default: // more than one means non-homogeneous hardware
log.Info("Heterogeneous network hardware configurations detected, " +
"cannot proceed. The following sets of hosts have different " +
"network hardware:")
for _, hns := range scanResp.HostFabrics {
log.Info(hns.HostSet.String())
}
return nil, errors.New("network hardware not consistent across hosts")
}
networkSet := scanResp.HostFabrics[scanResp.HostFabrics.Keys()[0]]
log.Debugf("Network hardware is consistent for hosts %s:\n\t%v",
networkSet.HostSet, networkSet.HostFabric.Interfaces)
return networkSet, nil
}
// numaNetIfaceMap is an alias for a map of NUMA node ID to optimal
// fabric network interface.
type numaNetIfaceMap map[int]*HostFabricInterface
// hasNUMAs returns true if interfaces exist for given NUMA node range.
func (nnim numaNetIfaceMap) hasNUMAs(numaCount int) bool {
for nn := 0; nn < numaCount; nn++ {
if _, exists := nnim[nn]; !exists {
return false
}
}
return true
}
// classInterfaces is an alias for a map of netdev class ID to slice of
// fabric network interfaces.
type classInterfaces map[hardware.NetDevClass]numaNetIfaceMap
// add network device to bucket corresponding to provider, network class type and
// NUMA node binding. Ignore add if there is an existing entry as the interfaces
// are processed in descending order of performance (best first).
func (cis classInterfaces) add(log logging.Logger, iface *HostFabricInterface) {
nn := int(iface.NumaNode)
if _, exists := cis[iface.NetDevClass]; !exists {
cis[iface.NetDevClass] = make(numaNetIfaceMap)
}
if _, exists := cis[iface.NetDevClass][nn]; exists {
return // already have interface for this NUMA
}
log.Debugf("%s class iface %s found for NUMA %d", iface.NetDevClass,
iface.Device, nn)
cis[iface.NetDevClass][nn] = iface
}
// parseInterfaces processes network devices in scan result, adding to a match
// list given the following conditions:
// IF class == (ether OR infiniband) AND requested_class == (ANY OR <class>).
//
// Returns when network devices matching criteria have been found for each
// required NUMA node.
func parseInterfaces(log logging.Logger, reqClass hardware.NetDevClass, engineCount int, interfaces []*HostFabricInterface) (numaNetIfaceMap, bool) {
// sort network interfaces by priority to get best available
sort.Slice(interfaces, func(i, j int) bool {
return interfaces[i].Priority < interfaces[j].Priority
})
var matches numaNetIfaceMap
buckets := make(map[string]classInterfaces)
for _, iface := range interfaces {
switch iface.NetDevClass {
case hardware.Ether, hardware.Infiniband:
switch reqClass {
case hardware.NetDevAny, iface.NetDevClass:
default:
continue // iface class not requested
}
default:
continue // iface class unsupported
}
// init network device slice for a new provider
if _, exists := buckets[iface.Provider]; !exists {
buckets[iface.Provider] = make(classInterfaces)
}
buckets[iface.Provider].add(log, iface)
matches = buckets[iface.Provider][iface.NetDevClass]
if matches.hasNUMAs(engineCount) {
return matches, true
}
}
return matches, false
}
// getNetIfaces scans fabric network devices and returns a NUMA keyed map for a
// provider/class combination.
func getNetIfaces(log logging.Logger, reqClass hardware.NetDevClass, engineCount int, hfs *HostFabricSet) (numaNetIfaceMap, error) {
switch reqClass {
case hardware.NetDevAny, hardware.Ether, hardware.Infiniband:
default:
return nil, errors.Errorf(errUnsupNetDevClass, reqClass.String())
}
matchIfaces, complete := parseInterfaces(log, reqClass, engineCount, hfs.HostFabric.Interfaces)
if !complete {
class := "best-available"
if reqClass != hardware.NetDevAny {
class = reqClass.String()
}
return nil, errors.Errorf(errInsufNrIfaces, class, engineCount, len(matchIfaces),
matchIfaces)
}
log.Debugf("selected network interfaces: %v", matchIfaces)
return matchIfaces, nil
}
type networkDetails struct {
engineCount int
numaIfaces numaNetIfaceMap
numaCoreCount int
}
// getNetworkDetails retrieves recommended network interfaces.
//
// Returns map of NUMA node ID to chosen fabric interfaces, number of engines to
// provide mappings for, per-NUMA core count and any host errors.
func getNetworkDetails(ctx context.Context, req ConfigGenerateReq) (*networkDetails, error) {
netSet, err := getNetworkSet(ctx, req.Log, req.HostList, req.Client)
if err != nil {
return nil, err
}
nd := &networkDetails{
engineCount: req.NrEngines,
numaCoreCount: int(netSet.HostFabric.CoresPerNuma),
}
// set number of engines if unset based on number of NUMA nodes on hosts
if nd.engineCount == 0 {
nd.engineCount = int(netSet.HostFabric.NumaCount)
}
if nd.engineCount == 0 {
return nil, errors.Errorf(errNoNuma, netSet.HostSet)
}
req.Log.Debugf("engine count for generated config set to %d", nd.engineCount)
numaIfaces, err := getNetIfaces(req.Log, req.NetClass, nd.engineCount, netSet)
if err != nil {
return nil, err
}
nd.numaIfaces = numaIfaces
return nd, nil
}
// getStorageSet retrieves the result of storage scan over host list and
// verifies that there is only a single storage set in response which indicates
// that storage hardware setup is homogeneous across all hosts.
//
// Filter NVMe storage scan so only NUMA affinity and PCI address is taking into
// account by supplying NvmeBasic flag in scan request. This enables
// configuration to work with different combinations of SSD models.
//
// Return host errors, storage scan results for the host set or error.
func getStorageSet(ctx context.Context, log logging.Logger, hostList []string, client UnaryInvoker) (*HostStorageSet, error) {
scanReq := &StorageScanReq{NvmeBasic: true}
scanReq.SetHostList(hostList)
scanResp, err := StorageScan(ctx, client, scanReq)
if err != nil {
return nil, err
}
if len(scanResp.GetHostErrors()) > 0 {
return nil, &ConfigGenerateError{HostErrorsResp: scanResp.HostErrorsResp}
}
// verify homogeneous storage
switch len(scanResp.HostStorage) {
case 0:
return nil, errors.New("no host responses")
case 1: // success
default: // more than one means non-homogeneous hardware
log.Info("Heterogeneous storage hardware configurations detected, " +
"cannot proceed. The following sets of hosts have different " +
"storage hardware:")
for _, hss := range scanResp.HostStorage { | return nil, errors.New("storage hardware not consistent across hosts")
}
storageSet := scanResp.HostStorage[scanResp.HostStorage.Keys()[0]]
log.Debugf("Storage hardware is consistent for hosts %s:\n\t%s\n\t%s",
storageSet.HostSet.String(), storageSet.HostStorage.ScmNamespaces.Summary(),
storageSet.HostStorage.NvmeDevices.Summary())
return storageSet, nil
}
// numaPMemsMap is an alias for a map of NUMA node ID to slice of string sorted
// PMem block device paths.
type numaPMemsMap map[int]sort.StringSlice
// mapPMems maps NUMA node ID to pmem block device paths, sort paths to attempt
// selection of desired devices if named appropriately in the case that multiple
// devices exist for a given NUMA node.
func mapPMems(nss storage.ScmNamespaces) numaPMemsMap {
npms := make(numaPMemsMap)
for _, ns := range nss {
nn := int(ns.NumaNode)
npms[nn] = append(npms[nn], fmt.Sprintf("%s/%s", scmBdevDir, ns.BlockDevice))
}
for _, pms := range npms {
pms.Sort()
}
return npms
}
// numSSDsMap is an alias for a map of NUMA node ID to slice of NVMe SSD PCI
// addresses.
type numaSSDsMap map[int]sort.StringSlice
// mapSSDs maps NUMA node ID to NVMe SSD PCI addresses, sort addresses.
func mapSSDs(ssds storage.NvmeControllers) numaSSDsMap {
nssds := make(numaSSDsMap)
for _, ssd := range ssds {
nn := int(ssd.SocketID)
nssds[nn] = append(nssds[nn], ssd.PciAddr)
}
for _, ssds := range nssds {
ssds.Sort()
}
return nssds
}
type storageDetails struct {
hugePageSize int
numaPMems numaPMemsMap
numaSSDs numaSSDsMap
}
// validate checks sufficient PMem devices and SSD NUMA groups exist for the
// required number of engines. Minimum thresholds for SSD group size is also
// checked.
func (sd *storageDetails) validate(log logging.Logger, engineCount int, minNrSSDs int) error {
log.Debugf("numa to pmem mappings: %v", sd.numaPMems)
if len(sd.numaPMems) < engineCount {
return errors.Errorf(errInsufNrPMemGroups, sd.numaPMems, engineCount, len(sd.numaPMems))
}
if minNrSSDs == 0 {
// set empty ssd lists and skip validation
log.Debug("nvme disabled, skip validation")
for nn := 0; nn < engineCount; nn++ {
sd.numaSSDs[nn] = []string{}
}
return nil
}
for nn := 0; nn < engineCount; nn++ {
ssds, exists := sd.numaSSDs[nn]
if !exists {
sd.numaSSDs[nn] = []string{} // populate empty lists for missing entries
}
log.Debugf("ssds bound to numa %d: %v", nn, ssds)
if len(ssds) < minNrSSDs {
return errors.Errorf(errInsufNrSSDs, nn, minNrSSDs, len(ssds))
}
}
return nil
}
// getStorageDetails retrieves mappings of NUMA node to PMem and NVMe SSD
// devices.
//
// Returns storage details struct or host error response and outer error.
func getStorageDetails(ctx context.Context, req ConfigGenerateReq, engineCount int) (*storageDetails, error) {
if engineCount < 1 {
return nil, errors.Errorf(errInvalNrEngines, 1, engineCount)
}
storageSet, err := getStorageSet(ctx, req.Log, req.HostList, req.Client)
if err != nil {
return nil, err
}
sd := &storageDetails{
numaPMems: mapPMems(storageSet.HostStorage.ScmNamespaces),
numaSSDs: mapSSDs(storageSet.HostStorage.NvmeDevices),
hugePageSize: storageSet.HostStorage.HugePageInfo.PageSizeKb,
}
if err := sd.validate(req.Log, engineCount, req.MinNrSSDs); err != nil {
return nil, err
}
return sd, nil
}
func calcHelpers(log logging.Logger, targets, cores int) int {
helpers := cores - targets - 1
if helpers <= 1 {
return helpers
}
if helpers > targets {
log.Debugf("adjusting num helpers (%d) to < num targets (%d), new: %d",
helpers, targets, targets-1)
return targets - 1
}
return helpers
}
type coreCounts struct {
nrTgts int
nrHlprs int
}
// numaCoreCountsMap is an alias for a map of NUMA node ID to calculate target
// and helper core counts.
type numaCoreCountsMap map[int]*coreCounts
// checkCPUs validates and returns recommended values for I/O service and
// offload thread counts.
//
// The target count should be a multiplier of the number of SSDs and typically
// daos gets the best performance with 16x targets per I/O Engine so target
// count will typically be between 12 and 20.
//
// Validate number of targets + 1 cores are available per IO engine, not
// usually a problem as sockets normally have at least 18 cores.
//
// Create helper threads for the remaining available cores, e.g. with 24 cores,
// allocate 7 helper threads. Number of helper threads should never be more than
// number of targets.
func checkCPUs(log logging.Logger, numSSDs, numaCoreCount int) (*coreCounts, error) {
var numTargets int
if numSSDs == 0 {
numTargets = defaultTargetCount
if numTargets >= numaCoreCount {
return &coreCounts{
nrTgts: numaCoreCount - 1,
nrHlprs: 0,
}, nil
}
return &coreCounts{
nrTgts: numTargets,
nrHlprs: calcHelpers(log, numTargets, numaCoreCount),
}, nil
}
if numSSDs >= numaCoreCount {
return nil, errors.Errorf("need more cores than ssds, got %d want %d",
numaCoreCount, numSSDs)
}
for tgts := numSSDs; tgts < numaCoreCount; tgts += numSSDs {
numTargets = tgts
}
log.Debugf("%d targets assigned with %d ssds", numTargets, numSSDs)
return &coreCounts{
nrTgts: numTargets,
nrHlprs: calcHelpers(log, numTargets, numaCoreCount),
}, nil
}
// getCPUDetails retrieves recommended values for I/O service and offload
// threads suitable for the server config file.
//
// Returns core counts struct or error.
func getCPUDetails(log logging.Logger, numaSSDs numaSSDsMap, coresPerNuma int) (numaCoreCountsMap, error) {
if coresPerNuma < 1 {
return nil, errors.Errorf(errInvalNrCores, coresPerNuma)
}
numaCoreCounts := make(numaCoreCountsMap)
for numaID, ssds := range numaSSDs {
coreCounts, err := checkCPUs(log, len(ssds), coresPerNuma)
if err != nil {
return nil, err
}
numaCoreCounts[numaID] = coreCounts
}
return numaCoreCounts, nil
}
type newEngineCfgFn func(int) *engine.Config
func defaultEngineCfg(idx int) *engine.Config {
return engine.NewConfig().
WithTargetCount(defaultTargetCount).
WithLogFile(fmt.Sprintf("%s.%d.log", defaultEngineLogFile, idx))
}
// genConfig generates server config file from details of network, storage and CPU hardware after
// performing some basic sanity checks.
func genConfig(log logging.Logger, newEngineCfg newEngineCfgFn, accessPoints []string, nd *networkDetails, sd *storageDetails, ccs numaCoreCountsMap) (*config.Server, error) {
if nd.engineCount == 0 {
return nil, errors.Errorf(errInvalNrEngines, 1, 0)
}
if len(nd.numaIfaces) < nd.engineCount {
return nil, errors.Errorf(errInsufNrIfaces, "", nd.engineCount,
len(nd.numaIfaces), nd.numaIfaces)
}
if len(sd.numaPMems) < nd.engineCount {
return nil, errors.Errorf(errInsufNrPMemGroups, sd.numaPMems, nd.engineCount,
len(sd.numaPMems))
}
// enforce consistent ssd count across engine configs
minSsds := math.MaxUint32
numaWithMinSsds := 0
if len(sd.numaSSDs) > 0 {
if len(sd.numaSSDs) < nd.engineCount {
return nil, errors.New("invalid number of ssd groups") // should never happen
}
for numa, ssds := range sd.numaSSDs {
if len(ssds) < minSsds {
minSsds = len(ssds)
numaWithMinSsds = numa
}
}
}
if len(ccs) < nd.engineCount {
return nil, errors.New("invalid number of core count groups") // should never happen
}
// enforce consistent target and helper count across engine configs
nrTgts := ccs[numaWithMinSsds].nrTgts
nrHlprs := ccs[numaWithMinSsds].nrHlprs
engines := make([]*engine.Config, 0, nd.engineCount)
for nn := 0; nn < nd.engineCount; nn++ {
engineCfg := newEngineCfg(nn).
WithTargetCount(nrTgts).
WithHelperStreamCount(nrHlprs)
if len(sd.numaPMems) > 0 {
engineCfg.WithStorage(
storage.NewTierConfig().
WithStorageClass(storage.ClassDcpm.String()).
WithScmMountPoint(fmt.Sprintf("%s%d", scmMountPrefix, nn)).
WithScmDeviceList(sd.numaPMems[nn][0]),
)
}
if len(sd.numaSSDs) > 0 && len(sd.numaSSDs[nn]) > 0 {
engineCfg.WithStorage(
storage.NewTierConfig().
WithStorageClass(storage.ClassNvme.String()).
WithBdevDeviceList(sd.numaSSDs[nn][:minSsds]...),
)
}
pnn := uint(nn)
engineCfg.PinnedNumaNode = &pnn
engineCfg.Fabric = engine.FabricConfig{
Provider: nd.numaIfaces[nn].Provider,
Interface: nd.numaIfaces[nn].Device,
InterfacePort: int(defaultFiPort + (nn * defaultFiPortInterval)),
}
engines = append(engines, engineCfg)
}
numTargets := 0
for _, e := range engines {
numTargets += e.TargetCount
}
reqHugePages, err := common.CalcMinHugePages(sd.hugePageSize, numTargets)
if err != nil {
return nil, errors.Wrap(err, "unable to calculate minimum hugepages")
}
cfg := config.DefaultServer().
WithAccessPoints(accessPoints...).
WithFabricProvider(engines[0].Fabric.Provider).
WithEngines(engines...).
WithControlLogFile(defaultControlLogFile).
WithNrHugePages(reqHugePages)
return cfg, cfg.Validate(log, sd.hugePageSize, nil)
} | log.Info(hss.HostSet.String())
}
| random_line_split |
auto.go | //
// (C) Copyright 2020-2022 Intel Corporation.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
package control
import (
"context"
"fmt"
"math"
"sort"
"github.com/pkg/errors"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/lib/hardware"
"github.com/daos-stack/daos/src/control/logging"
"github.com/daos-stack/daos/src/control/server/config"
"github.com/daos-stack/daos/src/control/server/engine"
"github.com/daos-stack/daos/src/control/server/storage"
)
const (
scmMountPrefix = "/mnt/daos"
scmBdevDir = "/dev"
defaultFiPort = 31416
defaultFiPortInterval = 1000
defaultTargetCount = 16
defaultEngineLogFile = "/tmp/daos_engine"
defaultControlLogFile = "/tmp/daos_server.log"
minDMABuffer = 1024
errNoNuma = "zero numa nodes reported on hosts %s"
errUnsupNetDevClass = "unsupported net dev class in request: %s"
errInsufNrIfaces = "insufficient matching %s network interfaces, want %d got %d %v"
errInsufNrPMemGroups = "insufficient number of pmem device numa groups %v, want %d got %d"
errInvalNrEngines = "unexpected number of engines requested, want %d got %d"
errInsufNrSSDs = "insufficient number of ssds for numa %d, want %d got %d"
errInvalNrCores = "invalid number of cores for numa %d"
)
type (
// ConfigGenerateReq contains the inputs for the request.
ConfigGenerateReq struct {
unaryRequest
msRequest
NrEngines int
MinNrSSDs int
NetClass hardware.NetDevClass
Client UnaryInvoker
HostList []string
AccessPoints []string
Log logging.Logger
}
// ConfigGenerateResp contains the request response.
ConfigGenerateResp struct {
ConfigOut *config.Server
}
// ConfigGenerateError implements the error interface and
// contains a set of host-specific errors encountered while
// attempting to generate a configuration.
ConfigGenerateError struct {
HostErrorsResp
}
)
func (cge *ConfigGenerateError) Error() string {
return cge.Errors().Error()
}
// GetHostErrors returns the wrapped HostErrorsMap.
func (cge *ConfigGenerateError) GetHostErrors() HostErrorsMap {
return cge.HostErrors
}
// IsConfigGenerateError returns true if the provided error is a *ConfigGenerateError.
func IsConfigGenerateError(err error) bool {
_, ok := errors.Cause(err).(*ConfigGenerateError)
return ok
}
// ConfigGenerate attempts to automatically detect hardware and generate a DAOS
// server config file for a set of hosts with homogeneous hardware setup.
//
// Returns API response or error.
func ConfigGenerate(ctx context.Context, req ConfigGenerateReq) (*ConfigGenerateResp, error) {
req.Log.Debugf("ConfigGenerate called with request %+v", req)
if len(req.HostList) == 0 {
return nil, errors.New("no hosts specified")
}
if len(req.AccessPoints) == 0 {
return nil, errors.New("no access points specified")
}
nd, err := getNetworkDetails(ctx, req)
if err != nil {
return nil, err
}
sd, err := getStorageDetails(ctx, req, nd.engineCount)
if err != nil {
return nil, err
}
ccs, err := getCPUDetails(req.Log, sd.numaSSDs, nd.numaCoreCount)
if err != nil {
return nil, err
}
cfg, err := genConfig(req.Log, defaultEngineCfg, req.AccessPoints, nd, sd, ccs)
if err != nil {
return nil, err
}
return &ConfigGenerateResp{ConfigOut: cfg}, nil
}
// getNetworkSet retrieves the result of network scan over host list and
// verifies that there is only a single network set in response which indicates
// that network hardware setup is homogeneous across all hosts.
//
// Return host errors, network scan results for the host set or error.
func getNetworkSet(ctx context.Context, log logging.Logger, hostList []string, client UnaryInvoker) (*HostFabricSet, error) {
scanReq := new(NetworkScanReq)
scanReq.SetHostList(hostList)
scanResp, err := NetworkScan(ctx, client, scanReq)
if err != nil {
return nil, err
}
if len(scanResp.GetHostErrors()) > 0 {
return nil, &ConfigGenerateError{HostErrorsResp: scanResp.HostErrorsResp}
}
// verify homogeneous network
switch len(scanResp.HostFabrics) {
case 0:
return nil, errors.New("no host responses")
case 1: // success
default: // more than one means non-homogeneous hardware
log.Info("Heterogeneous network hardware configurations detected, " +
"cannot proceed. The following sets of hosts have different " +
"network hardware:")
for _, hns := range scanResp.HostFabrics {
log.Info(hns.HostSet.String())
}
return nil, errors.New("network hardware not consistent across hosts")
}
networkSet := scanResp.HostFabrics[scanResp.HostFabrics.Keys()[0]]
log.Debugf("Network hardware is consistent for hosts %s:\n\t%v",
networkSet.HostSet, networkSet.HostFabric.Interfaces)
return networkSet, nil
}
// numaNetIfaceMap is an alias for a map of NUMA node ID to optimal
// fabric network interface.
type numaNetIfaceMap map[int]*HostFabricInterface
// hasNUMAs returns true if interfaces exist for given NUMA node range.
func (nnim numaNetIfaceMap) hasNUMAs(numaCount int) bool {
for nn := 0; nn < numaCount; nn++ {
if _, exists := nnim[nn]; !exists {
return false
}
}
return true
}
// classInterfaces is an alias for a map of netdev class ID to slice of
// fabric network interfaces.
type classInterfaces map[hardware.NetDevClass]numaNetIfaceMap
// add network device to bucket corresponding to provider, network class type and
// NUMA node binding. Ignore add if there is an existing entry as the interfaces
// are processed in descending order of performance (best first).
func (cis classInterfaces) add(log logging.Logger, iface *HostFabricInterface) {
nn := int(iface.NumaNode)
if _, exists := cis[iface.NetDevClass]; !exists {
cis[iface.NetDevClass] = make(numaNetIfaceMap)
}
if _, exists := cis[iface.NetDevClass][nn]; exists {
return // already have interface for this NUMA
}
log.Debugf("%s class iface %s found for NUMA %d", iface.NetDevClass,
iface.Device, nn)
cis[iface.NetDevClass][nn] = iface
}
// parseInterfaces processes network devices in scan result, adding to a match
// list given the following conditions:
// IF class == (ether OR infiniband) AND requested_class == (ANY OR <class>).
//
// Returns when network devices matching criteria have been found for each
// required NUMA node.
func parseInterfaces(log logging.Logger, reqClass hardware.NetDevClass, engineCount int, interfaces []*HostFabricInterface) (numaNetIfaceMap, bool) {
// sort network interfaces by priority to get best available
sort.Slice(interfaces, func(i, j int) bool {
return interfaces[i].Priority < interfaces[j].Priority
})
var matches numaNetIfaceMap
buckets := make(map[string]classInterfaces)
for _, iface := range interfaces {
switch iface.NetDevClass {
case hardware.Ether, hardware.Infiniband:
switch reqClass {
case hardware.NetDevAny, iface.NetDevClass:
default:
continue // iface class not requested
}
default:
continue // iface class unsupported
}
// init network device slice for a new provider
if _, exists := buckets[iface.Provider]; !exists {
buckets[iface.Provider] = make(classInterfaces)
}
buckets[iface.Provider].add(log, iface)
matches = buckets[iface.Provider][iface.NetDevClass]
if matches.hasNUMAs(engineCount) {
return matches, true
}
}
return matches, false
}
// getNetIfaces scans fabric network devices and returns a NUMA keyed map for a
// provider/class combination.
func getNetIfaces(log logging.Logger, reqClass hardware.NetDevClass, engineCount int, hfs *HostFabricSet) (numaNetIfaceMap, error) {
switch reqClass {
case hardware.NetDevAny, hardware.Ether, hardware.Infiniband:
default:
return nil, errors.Errorf(errUnsupNetDevClass, reqClass.String())
}
matchIfaces, complete := parseInterfaces(log, reqClass, engineCount, hfs.HostFabric.Interfaces)
if !complete {
class := "best-available"
if reqClass != hardware.NetDevAny {
class = reqClass.String()
}
return nil, errors.Errorf(errInsufNrIfaces, class, engineCount, len(matchIfaces),
matchIfaces)
}
log.Debugf("selected network interfaces: %v", matchIfaces)
return matchIfaces, nil
}
type networkDetails struct {
engineCount int
numaIfaces numaNetIfaceMap
numaCoreCount int
}
// getNetworkDetails retrieves recommended network interfaces.
//
// Returns map of NUMA node ID to chosen fabric interfaces, number of engines to
// provide mappings for, per-NUMA core count and any host errors.
func getNetworkDetails(ctx context.Context, req ConfigGenerateReq) (*networkDetails, error) {
netSet, err := getNetworkSet(ctx, req.Log, req.HostList, req.Client)
if err != nil {
return nil, err
}
nd := &networkDetails{
engineCount: req.NrEngines,
numaCoreCount: int(netSet.HostFabric.CoresPerNuma),
}
// set number of engines if unset based on number of NUMA nodes on hosts
if nd.engineCount == 0 {
nd.engineCount = int(netSet.HostFabric.NumaCount)
}
if nd.engineCount == 0 {
return nil, errors.Errorf(errNoNuma, netSet.HostSet)
}
req.Log.Debugf("engine count for generated config set to %d", nd.engineCount)
numaIfaces, err := getNetIfaces(req.Log, req.NetClass, nd.engineCount, netSet)
if err != nil {
return nil, err
}
nd.numaIfaces = numaIfaces
return nd, nil
}
// getStorageSet retrieves the result of storage scan over host list and
// verifies that there is only a single storage set in response which indicates
// that storage hardware setup is homogeneous across all hosts.
//
// Filter NVMe storage scan so only NUMA affinity and PCI address is taking into
// account by supplying NvmeBasic flag in scan request. This enables
// configuration to work with different combinations of SSD models.
//
// Return host errors, storage scan results for the host set or error.
func getStorageSet(ctx context.Context, log logging.Logger, hostList []string, client UnaryInvoker) (*HostStorageSet, error) {
scanReq := &StorageScanReq{NvmeBasic: true}
scanReq.SetHostList(hostList)
scanResp, err := StorageScan(ctx, client, scanReq)
if err != nil {
return nil, err
}
if len(scanResp.GetHostErrors()) > 0 {
return nil, &ConfigGenerateError{HostErrorsResp: scanResp.HostErrorsResp}
}
// verify homogeneous storage
switch len(scanResp.HostStorage) {
case 0:
return nil, errors.New("no host responses")
case 1: // success
default: // more than one means non-homogeneous hardware
log.Info("Heterogeneous storage hardware configurations detected, " +
"cannot proceed. The following sets of hosts have different " +
"storage hardware:")
for _, hss := range scanResp.HostStorage {
log.Info(hss.HostSet.String())
}
return nil, errors.New("storage hardware not consistent across hosts")
}
storageSet := scanResp.HostStorage[scanResp.HostStorage.Keys()[0]]
log.Debugf("Storage hardware is consistent for hosts %s:\n\t%s\n\t%s",
storageSet.HostSet.String(), storageSet.HostStorage.ScmNamespaces.Summary(),
storageSet.HostStorage.NvmeDevices.Summary())
return storageSet, nil
}
// numaPMemsMap is an alias for a map of NUMA node ID to slice of string sorted
// PMem block device paths.
type numaPMemsMap map[int]sort.StringSlice
// mapPMems maps NUMA node ID to pmem block device paths, sort paths to attempt
// selection of desired devices if named appropriately in the case that multiple
// devices exist for a given NUMA node.
func mapPMems(nss storage.ScmNamespaces) numaPMemsMap {
npms := make(numaPMemsMap)
for _, ns := range nss {
nn := int(ns.NumaNode)
npms[nn] = append(npms[nn], fmt.Sprintf("%s/%s", scmBdevDir, ns.BlockDevice))
}
for _, pms := range npms {
pms.Sort()
}
return npms
}
// numSSDsMap is an alias for a map of NUMA node ID to slice of NVMe SSD PCI
// addresses.
type numaSSDsMap map[int]sort.StringSlice
// mapSSDs maps NUMA node ID to NVMe SSD PCI addresses, sort addresses.
func mapSSDs(ssds storage.NvmeControllers) numaSSDsMap {
nssds := make(numaSSDsMap)
for _, ssd := range ssds {
nn := int(ssd.SocketID)
nssds[nn] = append(nssds[nn], ssd.PciAddr)
}
for _, ssds := range nssds {
ssds.Sort()
}
return nssds
}
type storageDetails struct {
hugePageSize int
numaPMems numaPMemsMap
numaSSDs numaSSDsMap
}
// validate checks sufficient PMem devices and SSD NUMA groups exist for the
// required number of engines. Minimum thresholds for SSD group size is also
// checked.
func (sd *storageDetails) validate(log logging.Logger, engineCount int, minNrSSDs int) error {
log.Debugf("numa to pmem mappings: %v", sd.numaPMems)
if len(sd.numaPMems) < engineCount {
return errors.Errorf(errInsufNrPMemGroups, sd.numaPMems, engineCount, len(sd.numaPMems))
}
if minNrSSDs == 0 {
// set empty ssd lists and skip validation
log.Debug("nvme disabled, skip validation")
for nn := 0; nn < engineCount; nn++ {
sd.numaSSDs[nn] = []string{}
}
return nil
}
for nn := 0; nn < engineCount; nn++ {
ssds, exists := sd.numaSSDs[nn]
if !exists {
sd.numaSSDs[nn] = []string{} // populate empty lists for missing entries
}
log.Debugf("ssds bound to numa %d: %v", nn, ssds)
if len(ssds) < minNrSSDs {
return errors.Errorf(errInsufNrSSDs, nn, minNrSSDs, len(ssds))
}
}
return nil
}
// getStorageDetails retrieves mappings of NUMA node to PMem and NVMe SSD
// devices.
//
// Returns storage details struct or host error response and outer error.
func getStorageDetails(ctx context.Context, req ConfigGenerateReq, engineCount int) (*storageDetails, error) {
if engineCount < 1 {
return nil, errors.Errorf(errInvalNrEngines, 1, engineCount)
}
storageSet, err := getStorageSet(ctx, req.Log, req.HostList, req.Client)
if err != nil {
return nil, err
}
sd := &storageDetails{
numaPMems: mapPMems(storageSet.HostStorage.ScmNamespaces),
numaSSDs: mapSSDs(storageSet.HostStorage.NvmeDevices),
hugePageSize: storageSet.HostStorage.HugePageInfo.PageSizeKb,
}
if err := sd.validate(req.Log, engineCount, req.MinNrSSDs); err != nil {
return nil, err
}
return sd, nil
}
func calcHelpers(log logging.Logger, targets, cores int) int {
helpers := cores - targets - 1
if helpers <= 1 {
return helpers
}
if helpers > targets {
log.Debugf("adjusting num helpers (%d) to < num targets (%d), new: %d",
helpers, targets, targets-1)
return targets - 1
}
return helpers
}
type coreCounts struct {
nrTgts int
nrHlprs int
}
// numaCoreCountsMap is an alias for a map of NUMA node ID to calculate target
// and helper core counts.
type numaCoreCountsMap map[int]*coreCounts
// checkCPUs validates and returns recommended values for I/O service and
// offload thread counts.
//
// The target count should be a multiplier of the number of SSDs and typically
// daos gets the best performance with 16x targets per I/O Engine so target
// count will typically be between 12 and 20.
//
// Validate number of targets + 1 cores are available per IO engine, not
// usually a problem as sockets normally have at least 18 cores.
//
// Create helper threads for the remaining available cores, e.g. with 24 cores,
// allocate 7 helper threads. Number of helper threads should never be more than
// number of targets.
func checkCPUs(log logging.Logger, numSSDs, numaCoreCount int) (*coreCounts, error) {
var numTargets int
if numSSDs == 0 {
numTargets = defaultTargetCount
if numTargets >= numaCoreCount {
return &coreCounts{
nrTgts: numaCoreCount - 1,
nrHlprs: 0,
}, nil
}
return &coreCounts{
nrTgts: numTargets,
nrHlprs: calcHelpers(log, numTargets, numaCoreCount),
}, nil
}
if numSSDs >= numaCoreCount {
return nil, errors.Errorf("need more cores than ssds, got %d want %d",
numaCoreCount, numSSDs)
}
for tgts := numSSDs; tgts < numaCoreCount; tgts += numSSDs {
numTargets = tgts
}
log.Debugf("%d targets assigned with %d ssds", numTargets, numSSDs)
return &coreCounts{
nrTgts: numTargets,
nrHlprs: calcHelpers(log, numTargets, numaCoreCount),
}, nil
}
// getCPUDetails retrieves recommended values for I/O service and offload
// threads suitable for the server config file.
//
// Returns core counts struct or error.
func getCPUDetails(log logging.Logger, numaSSDs numaSSDsMap, coresPerNuma int) (numaCoreCountsMap, error) {
if coresPerNuma < 1 {
return nil, errors.Errorf(errInvalNrCores, coresPerNuma)
}
numaCoreCounts := make(numaCoreCountsMap)
for numaID, ssds := range numaSSDs {
coreCounts, err := checkCPUs(log, len(ssds), coresPerNuma)
if err != nil |
numaCoreCounts[numaID] = coreCounts
}
return numaCoreCounts, nil
}
type newEngineCfgFn func(int) *engine.Config
func defaultEngineCfg(idx int) *engine.Config {
return engine.NewConfig().
WithTargetCount(defaultTargetCount).
WithLogFile(fmt.Sprintf("%s.%d.log", defaultEngineLogFile, idx))
}
// genConfig generates server config file from details of network, storage and CPU hardware after
// performing some basic sanity checks.
func genConfig(log logging.Logger, newEngineCfg newEngineCfgFn, accessPoints []string, nd *networkDetails, sd *storageDetails, ccs numaCoreCountsMap) (*config.Server, error) {
if nd.engineCount == 0 {
return nil, errors.Errorf(errInvalNrEngines, 1, 0)
}
if len(nd.numaIfaces) < nd.engineCount {
return nil, errors.Errorf(errInsufNrIfaces, "", nd.engineCount,
len(nd.numaIfaces), nd.numaIfaces)
}
if len(sd.numaPMems) < nd.engineCount {
return nil, errors.Errorf(errInsufNrPMemGroups, sd.numaPMems, nd.engineCount,
len(sd.numaPMems))
}
// enforce consistent ssd count across engine configs
minSsds := math.MaxUint32
numaWithMinSsds := 0
if len(sd.numaSSDs) > 0 {
if len(sd.numaSSDs) < nd.engineCount {
return nil, errors.New("invalid number of ssd groups") // should never happen
}
for numa, ssds := range sd.numaSSDs {
if len(ssds) < minSsds {
minSsds = len(ssds)
numaWithMinSsds = numa
}
}
}
if len(ccs) < nd.engineCount {
return nil, errors.New("invalid number of core count groups") // should never happen
}
// enforce consistent target and helper count across engine configs
nrTgts := ccs[numaWithMinSsds].nrTgts
nrHlprs := ccs[numaWithMinSsds].nrHlprs
engines := make([]*engine.Config, 0, nd.engineCount)
for nn := 0; nn < nd.engineCount; nn++ {
engineCfg := newEngineCfg(nn).
WithTargetCount(nrTgts).
WithHelperStreamCount(nrHlprs)
if len(sd.numaPMems) > 0 {
engineCfg.WithStorage(
storage.NewTierConfig().
WithStorageClass(storage.ClassDcpm.String()).
WithScmMountPoint(fmt.Sprintf("%s%d", scmMountPrefix, nn)).
WithScmDeviceList(sd.numaPMems[nn][0]),
)
}
if len(sd.numaSSDs) > 0 && len(sd.numaSSDs[nn]) > 0 {
engineCfg.WithStorage(
storage.NewTierConfig().
WithStorageClass(storage.ClassNvme.String()).
WithBdevDeviceList(sd.numaSSDs[nn][:minSsds]...),
)
}
pnn := uint(nn)
engineCfg.PinnedNumaNode = &pnn
engineCfg.Fabric = engine.FabricConfig{
Provider: nd.numaIfaces[nn].Provider,
Interface: nd.numaIfaces[nn].Device,
InterfacePort: int(defaultFiPort + (nn * defaultFiPortInterval)),
}
engines = append(engines, engineCfg)
}
numTargets := 0
for _, e := range engines {
numTargets += e.TargetCount
}
reqHugePages, err := common.CalcMinHugePages(sd.hugePageSize, numTargets)
if err != nil {
return nil, errors.Wrap(err, "unable to calculate minimum hugepages")
}
cfg := config.DefaultServer().
WithAccessPoints(accessPoints...).
WithFabricProvider(engines[0].Fabric.Provider).
WithEngines(engines...).
WithControlLogFile(defaultControlLogFile).
WithNrHugePages(reqHugePages)
return cfg, cfg.Validate(log, sd.hugePageSize, nil)
}
| {
return nil, err
} | conditional_block |
auto.go | //
// (C) Copyright 2020-2022 Intel Corporation.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
package control
import (
"context"
"fmt"
"math"
"sort"
"github.com/pkg/errors"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/lib/hardware"
"github.com/daos-stack/daos/src/control/logging"
"github.com/daos-stack/daos/src/control/server/config"
"github.com/daos-stack/daos/src/control/server/engine"
"github.com/daos-stack/daos/src/control/server/storage"
)
const (
scmMountPrefix = "/mnt/daos"
scmBdevDir = "/dev"
defaultFiPort = 31416
defaultFiPortInterval = 1000
defaultTargetCount = 16
defaultEngineLogFile = "/tmp/daos_engine"
defaultControlLogFile = "/tmp/daos_server.log"
minDMABuffer = 1024
errNoNuma = "zero numa nodes reported on hosts %s"
errUnsupNetDevClass = "unsupported net dev class in request: %s"
errInsufNrIfaces = "insufficient matching %s network interfaces, want %d got %d %v"
errInsufNrPMemGroups = "insufficient number of pmem device numa groups %v, want %d got %d"
errInvalNrEngines = "unexpected number of engines requested, want %d got %d"
errInsufNrSSDs = "insufficient number of ssds for numa %d, want %d got %d"
errInvalNrCores = "invalid number of cores for numa %d"
)
type (
// ConfigGenerateReq contains the inputs for the request.
ConfigGenerateReq struct {
unaryRequest
msRequest
NrEngines int
MinNrSSDs int
NetClass hardware.NetDevClass
Client UnaryInvoker
HostList []string
AccessPoints []string
Log logging.Logger
}
// ConfigGenerateResp contains the request response.
ConfigGenerateResp struct {
ConfigOut *config.Server
}
// ConfigGenerateError implements the error interface and
// contains a set of host-specific errors encountered while
// attempting to generate a configuration.
ConfigGenerateError struct {
HostErrorsResp
}
)
func (cge *ConfigGenerateError) Error() string {
return cge.Errors().Error()
}
// GetHostErrors returns the wrapped HostErrorsMap.
func (cge *ConfigGenerateError) GetHostErrors() HostErrorsMap {
return cge.HostErrors
}
// IsConfigGenerateError returns true if the provided error is a *ConfigGenerateError.
func | (err error) bool {
_, ok := errors.Cause(err).(*ConfigGenerateError)
return ok
}
// ConfigGenerate attempts to automatically detect hardware and generate a DAOS
// server config file for a set of hosts with homogeneous hardware setup.
//
// Returns API response or error.
func ConfigGenerate(ctx context.Context, req ConfigGenerateReq) (*ConfigGenerateResp, error) {
req.Log.Debugf("ConfigGenerate called with request %+v", req)
if len(req.HostList) == 0 {
return nil, errors.New("no hosts specified")
}
if len(req.AccessPoints) == 0 {
return nil, errors.New("no access points specified")
}
nd, err := getNetworkDetails(ctx, req)
if err != nil {
return nil, err
}
sd, err := getStorageDetails(ctx, req, nd.engineCount)
if err != nil {
return nil, err
}
ccs, err := getCPUDetails(req.Log, sd.numaSSDs, nd.numaCoreCount)
if err != nil {
return nil, err
}
cfg, err := genConfig(req.Log, defaultEngineCfg, req.AccessPoints, nd, sd, ccs)
if err != nil {
return nil, err
}
return &ConfigGenerateResp{ConfigOut: cfg}, nil
}
// getNetworkSet retrieves the result of network scan over host list and
// verifies that there is only a single network set in response which indicates
// that network hardware setup is homogeneous across all hosts.
//
// Return host errors, network scan results for the host set or error.
func getNetworkSet(ctx context.Context, log logging.Logger, hostList []string, client UnaryInvoker) (*HostFabricSet, error) {
scanReq := new(NetworkScanReq)
scanReq.SetHostList(hostList)
scanResp, err := NetworkScan(ctx, client, scanReq)
if err != nil {
return nil, err
}
if len(scanResp.GetHostErrors()) > 0 {
return nil, &ConfigGenerateError{HostErrorsResp: scanResp.HostErrorsResp}
}
// verify homogeneous network
switch len(scanResp.HostFabrics) {
case 0:
return nil, errors.New("no host responses")
case 1: // success
default: // more than one means non-homogeneous hardware
log.Info("Heterogeneous network hardware configurations detected, " +
"cannot proceed. The following sets of hosts have different " +
"network hardware:")
for _, hns := range scanResp.HostFabrics {
log.Info(hns.HostSet.String())
}
return nil, errors.New("network hardware not consistent across hosts")
}
networkSet := scanResp.HostFabrics[scanResp.HostFabrics.Keys()[0]]
log.Debugf("Network hardware is consistent for hosts %s:\n\t%v",
networkSet.HostSet, networkSet.HostFabric.Interfaces)
return networkSet, nil
}
// numaNetIfaceMap is an alias for a map of NUMA node ID to optimal
// fabric network interface.
type numaNetIfaceMap map[int]*HostFabricInterface
// hasNUMAs returns true if interfaces exist for given NUMA node range.
func (nnim numaNetIfaceMap) hasNUMAs(numaCount int) bool {
for nn := 0; nn < numaCount; nn++ {
if _, exists := nnim[nn]; !exists {
return false
}
}
return true
}
// classInterfaces is an alias for a map of netdev class ID to slice of
// fabric network interfaces.
type classInterfaces map[hardware.NetDevClass]numaNetIfaceMap
// add network device to bucket corresponding to provider, network class type and
// NUMA node binding. Ignore add if there is an existing entry as the interfaces
// are processed in descending order of performance (best first).
func (cis classInterfaces) add(log logging.Logger, iface *HostFabricInterface) {
nn := int(iface.NumaNode)
if _, exists := cis[iface.NetDevClass]; !exists {
cis[iface.NetDevClass] = make(numaNetIfaceMap)
}
if _, exists := cis[iface.NetDevClass][nn]; exists {
return // already have interface for this NUMA
}
log.Debugf("%s class iface %s found for NUMA %d", iface.NetDevClass,
iface.Device, nn)
cis[iface.NetDevClass][nn] = iface
}
// parseInterfaces processes network devices in scan result, adding to a match
// list given the following conditions:
// IF class == (ether OR infiniband) AND requested_class == (ANY OR <class>).
//
// Returns when network devices matching criteria have been found for each
// required NUMA node.
func parseInterfaces(log logging.Logger, reqClass hardware.NetDevClass, engineCount int, interfaces []*HostFabricInterface) (numaNetIfaceMap, bool) {
// sort network interfaces by priority to get best available
sort.Slice(interfaces, func(i, j int) bool {
return interfaces[i].Priority < interfaces[j].Priority
})
var matches numaNetIfaceMap
buckets := make(map[string]classInterfaces)
for _, iface := range interfaces {
switch iface.NetDevClass {
case hardware.Ether, hardware.Infiniband:
switch reqClass {
case hardware.NetDevAny, iface.NetDevClass:
default:
continue // iface class not requested
}
default:
continue // iface class unsupported
}
// init network device slice for a new provider
if _, exists := buckets[iface.Provider]; !exists {
buckets[iface.Provider] = make(classInterfaces)
}
buckets[iface.Provider].add(log, iface)
matches = buckets[iface.Provider][iface.NetDevClass]
if matches.hasNUMAs(engineCount) {
return matches, true
}
}
return matches, false
}
// getNetIfaces scans fabric network devices and returns a NUMA keyed map for a
// provider/class combination.
func getNetIfaces(log logging.Logger, reqClass hardware.NetDevClass, engineCount int, hfs *HostFabricSet) (numaNetIfaceMap, error) {
switch reqClass {
case hardware.NetDevAny, hardware.Ether, hardware.Infiniband:
default:
return nil, errors.Errorf(errUnsupNetDevClass, reqClass.String())
}
matchIfaces, complete := parseInterfaces(log, reqClass, engineCount, hfs.HostFabric.Interfaces)
if !complete {
class := "best-available"
if reqClass != hardware.NetDevAny {
class = reqClass.String()
}
return nil, errors.Errorf(errInsufNrIfaces, class, engineCount, len(matchIfaces),
matchIfaces)
}
log.Debugf("selected network interfaces: %v", matchIfaces)
return matchIfaces, nil
}
type networkDetails struct {
engineCount int
numaIfaces numaNetIfaceMap
numaCoreCount int
}
// getNetworkDetails retrieves recommended network interfaces.
//
// Returns map of NUMA node ID to chosen fabric interfaces, number of engines to
// provide mappings for, per-NUMA core count and any host errors.
func getNetworkDetails(ctx context.Context, req ConfigGenerateReq) (*networkDetails, error) {
netSet, err := getNetworkSet(ctx, req.Log, req.HostList, req.Client)
if err != nil {
return nil, err
}
nd := &networkDetails{
engineCount: req.NrEngines,
numaCoreCount: int(netSet.HostFabric.CoresPerNuma),
}
// set number of engines if unset based on number of NUMA nodes on hosts
if nd.engineCount == 0 {
nd.engineCount = int(netSet.HostFabric.NumaCount)
}
if nd.engineCount == 0 {
return nil, errors.Errorf(errNoNuma, netSet.HostSet)
}
req.Log.Debugf("engine count for generated config set to %d", nd.engineCount)
numaIfaces, err := getNetIfaces(req.Log, req.NetClass, nd.engineCount, netSet)
if err != nil {
return nil, err
}
nd.numaIfaces = numaIfaces
return nd, nil
}
// getStorageSet retrieves the result of storage scan over host list and
// verifies that there is only a single storage set in response which indicates
// that storage hardware setup is homogeneous across all hosts.
//
// Filter NVMe storage scan so only NUMA affinity and PCI address is taking into
// account by supplying NvmeBasic flag in scan request. This enables
// configuration to work with different combinations of SSD models.
//
// Return host errors, storage scan results for the host set or error.
func getStorageSet(ctx context.Context, log logging.Logger, hostList []string, client UnaryInvoker) (*HostStorageSet, error) {
scanReq := &StorageScanReq{NvmeBasic: true}
scanReq.SetHostList(hostList)
scanResp, err := StorageScan(ctx, client, scanReq)
if err != nil {
return nil, err
}
if len(scanResp.GetHostErrors()) > 0 {
return nil, &ConfigGenerateError{HostErrorsResp: scanResp.HostErrorsResp}
}
// verify homogeneous storage
switch len(scanResp.HostStorage) {
case 0:
return nil, errors.New("no host responses")
case 1: // success
default: // more than one means non-homogeneous hardware
log.Info("Heterogeneous storage hardware configurations detected, " +
"cannot proceed. The following sets of hosts have different " +
"storage hardware:")
for _, hss := range scanResp.HostStorage {
log.Info(hss.HostSet.String())
}
return nil, errors.New("storage hardware not consistent across hosts")
}
storageSet := scanResp.HostStorage[scanResp.HostStorage.Keys()[0]]
log.Debugf("Storage hardware is consistent for hosts %s:\n\t%s\n\t%s",
storageSet.HostSet.String(), storageSet.HostStorage.ScmNamespaces.Summary(),
storageSet.HostStorage.NvmeDevices.Summary())
return storageSet, nil
}
// numaPMemsMap is an alias for a map of NUMA node ID to slice of string sorted
// PMem block device paths.
type numaPMemsMap map[int]sort.StringSlice
// mapPMems maps NUMA node ID to pmem block device paths, sort paths to attempt
// selection of desired devices if named appropriately in the case that multiple
// devices exist for a given NUMA node.
func mapPMems(nss storage.ScmNamespaces) numaPMemsMap {
npms := make(numaPMemsMap)
for _, ns := range nss {
nn := int(ns.NumaNode)
npms[nn] = append(npms[nn], fmt.Sprintf("%s/%s", scmBdevDir, ns.BlockDevice))
}
for _, pms := range npms {
pms.Sort()
}
return npms
}
// numSSDsMap is an alias for a map of NUMA node ID to slice of NVMe SSD PCI
// addresses.
type numaSSDsMap map[int]sort.StringSlice
// mapSSDs maps NUMA node ID to NVMe SSD PCI addresses, sort addresses.
func mapSSDs(ssds storage.NvmeControllers) numaSSDsMap {
nssds := make(numaSSDsMap)
for _, ssd := range ssds {
nn := int(ssd.SocketID)
nssds[nn] = append(nssds[nn], ssd.PciAddr)
}
for _, ssds := range nssds {
ssds.Sort()
}
return nssds
}
type storageDetails struct {
hugePageSize int
numaPMems numaPMemsMap
numaSSDs numaSSDsMap
}
// validate checks sufficient PMem devices and SSD NUMA groups exist for the
// required number of engines. Minimum thresholds for SSD group size is also
// checked.
func (sd *storageDetails) validate(log logging.Logger, engineCount int, minNrSSDs int) error {
log.Debugf("numa to pmem mappings: %v", sd.numaPMems)
if len(sd.numaPMems) < engineCount {
return errors.Errorf(errInsufNrPMemGroups, sd.numaPMems, engineCount, len(sd.numaPMems))
}
if minNrSSDs == 0 {
// set empty ssd lists and skip validation
log.Debug("nvme disabled, skip validation")
for nn := 0; nn < engineCount; nn++ {
sd.numaSSDs[nn] = []string{}
}
return nil
}
for nn := 0; nn < engineCount; nn++ {
ssds, exists := sd.numaSSDs[nn]
if !exists {
sd.numaSSDs[nn] = []string{} // populate empty lists for missing entries
}
log.Debugf("ssds bound to numa %d: %v", nn, ssds)
if len(ssds) < minNrSSDs {
return errors.Errorf(errInsufNrSSDs, nn, minNrSSDs, len(ssds))
}
}
return nil
}
// getStorageDetails retrieves mappings of NUMA node to PMem and NVMe SSD
// devices.
//
// Returns storage details struct or host error response and outer error.
func getStorageDetails(ctx context.Context, req ConfigGenerateReq, engineCount int) (*storageDetails, error) {
if engineCount < 1 {
return nil, errors.Errorf(errInvalNrEngines, 1, engineCount)
}
storageSet, err := getStorageSet(ctx, req.Log, req.HostList, req.Client)
if err != nil {
return nil, err
}
sd := &storageDetails{
numaPMems: mapPMems(storageSet.HostStorage.ScmNamespaces),
numaSSDs: mapSSDs(storageSet.HostStorage.NvmeDevices),
hugePageSize: storageSet.HostStorage.HugePageInfo.PageSizeKb,
}
if err := sd.validate(req.Log, engineCount, req.MinNrSSDs); err != nil {
return nil, err
}
return sd, nil
}
func calcHelpers(log logging.Logger, targets, cores int) int {
helpers := cores - targets - 1
if helpers <= 1 {
return helpers
}
if helpers > targets {
log.Debugf("adjusting num helpers (%d) to < num targets (%d), new: %d",
helpers, targets, targets-1)
return targets - 1
}
return helpers
}
type coreCounts struct {
nrTgts int
nrHlprs int
}
// numaCoreCountsMap is an alias for a map of NUMA node ID to calculate target
// and helper core counts.
type numaCoreCountsMap map[int]*coreCounts
// checkCPUs validates and returns recommended values for I/O service and
// offload thread counts.
//
// The target count should be a multiplier of the number of SSDs and typically
// daos gets the best performance with 16x targets per I/O Engine so target
// count will typically be between 12 and 20.
//
// Validate number of targets + 1 cores are available per IO engine, not
// usually a problem as sockets normally have at least 18 cores.
//
// Create helper threads for the remaining available cores, e.g. with 24 cores,
// allocate 7 helper threads. Number of helper threads should never be more than
// number of targets.
func checkCPUs(log logging.Logger, numSSDs, numaCoreCount int) (*coreCounts, error) {
var numTargets int
if numSSDs == 0 {
numTargets = defaultTargetCount
if numTargets >= numaCoreCount {
return &coreCounts{
nrTgts: numaCoreCount - 1,
nrHlprs: 0,
}, nil
}
return &coreCounts{
nrTgts: numTargets,
nrHlprs: calcHelpers(log, numTargets, numaCoreCount),
}, nil
}
if numSSDs >= numaCoreCount {
return nil, errors.Errorf("need more cores than ssds, got %d want %d",
numaCoreCount, numSSDs)
}
for tgts := numSSDs; tgts < numaCoreCount; tgts += numSSDs {
numTargets = tgts
}
log.Debugf("%d targets assigned with %d ssds", numTargets, numSSDs)
return &coreCounts{
nrTgts: numTargets,
nrHlprs: calcHelpers(log, numTargets, numaCoreCount),
}, nil
}
// getCPUDetails retrieves recommended values for I/O service and offload
// threads suitable for the server config file.
//
// Returns core counts struct or error.
func getCPUDetails(log logging.Logger, numaSSDs numaSSDsMap, coresPerNuma int) (numaCoreCountsMap, error) {
if coresPerNuma < 1 {
return nil, errors.Errorf(errInvalNrCores, coresPerNuma)
}
numaCoreCounts := make(numaCoreCountsMap)
for numaID, ssds := range numaSSDs {
coreCounts, err := checkCPUs(log, len(ssds), coresPerNuma)
if err != nil {
return nil, err
}
numaCoreCounts[numaID] = coreCounts
}
return numaCoreCounts, nil
}
type newEngineCfgFn func(int) *engine.Config
func defaultEngineCfg(idx int) *engine.Config {
return engine.NewConfig().
WithTargetCount(defaultTargetCount).
WithLogFile(fmt.Sprintf("%s.%d.log", defaultEngineLogFile, idx))
}
// genConfig generates server config file from details of network, storage and CPU hardware after
// performing some basic sanity checks.
func genConfig(log logging.Logger, newEngineCfg newEngineCfgFn, accessPoints []string, nd *networkDetails, sd *storageDetails, ccs numaCoreCountsMap) (*config.Server, error) {
if nd.engineCount == 0 {
return nil, errors.Errorf(errInvalNrEngines, 1, 0)
}
if len(nd.numaIfaces) < nd.engineCount {
return nil, errors.Errorf(errInsufNrIfaces, "", nd.engineCount,
len(nd.numaIfaces), nd.numaIfaces)
}
if len(sd.numaPMems) < nd.engineCount {
return nil, errors.Errorf(errInsufNrPMemGroups, sd.numaPMems, nd.engineCount,
len(sd.numaPMems))
}
// enforce consistent ssd count across engine configs
minSsds := math.MaxUint32
numaWithMinSsds := 0
if len(sd.numaSSDs) > 0 {
if len(sd.numaSSDs) < nd.engineCount {
return nil, errors.New("invalid number of ssd groups") // should never happen
}
for numa, ssds := range sd.numaSSDs {
if len(ssds) < minSsds {
minSsds = len(ssds)
numaWithMinSsds = numa
}
}
}
if len(ccs) < nd.engineCount {
return nil, errors.New("invalid number of core count groups") // should never happen
}
// enforce consistent target and helper count across engine configs
nrTgts := ccs[numaWithMinSsds].nrTgts
nrHlprs := ccs[numaWithMinSsds].nrHlprs
engines := make([]*engine.Config, 0, nd.engineCount)
for nn := 0; nn < nd.engineCount; nn++ {
engineCfg := newEngineCfg(nn).
WithTargetCount(nrTgts).
WithHelperStreamCount(nrHlprs)
if len(sd.numaPMems) > 0 {
engineCfg.WithStorage(
storage.NewTierConfig().
WithStorageClass(storage.ClassDcpm.String()).
WithScmMountPoint(fmt.Sprintf("%s%d", scmMountPrefix, nn)).
WithScmDeviceList(sd.numaPMems[nn][0]),
)
}
if len(sd.numaSSDs) > 0 && len(sd.numaSSDs[nn]) > 0 {
engineCfg.WithStorage(
storage.NewTierConfig().
WithStorageClass(storage.ClassNvme.String()).
WithBdevDeviceList(sd.numaSSDs[nn][:minSsds]...),
)
}
pnn := uint(nn)
engineCfg.PinnedNumaNode = &pnn
engineCfg.Fabric = engine.FabricConfig{
Provider: nd.numaIfaces[nn].Provider,
Interface: nd.numaIfaces[nn].Device,
InterfacePort: int(defaultFiPort + (nn * defaultFiPortInterval)),
}
engines = append(engines, engineCfg)
}
numTargets := 0
for _, e := range engines {
numTargets += e.TargetCount
}
reqHugePages, err := common.CalcMinHugePages(sd.hugePageSize, numTargets)
if err != nil {
return nil, errors.Wrap(err, "unable to calculate minimum hugepages")
}
cfg := config.DefaultServer().
WithAccessPoints(accessPoints...).
WithFabricProvider(engines[0].Fabric.Provider).
WithEngines(engines...).
WithControlLogFile(defaultControlLogFile).
WithNrHugePages(reqHugePages)
return cfg, cfg.Validate(log, sd.hugePageSize, nil)
}
| IsConfigGenerateError | identifier_name |
auto.go | //
// (C) Copyright 2020-2022 Intel Corporation.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
package control
import (
"context"
"fmt"
"math"
"sort"
"github.com/pkg/errors"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/lib/hardware"
"github.com/daos-stack/daos/src/control/logging"
"github.com/daos-stack/daos/src/control/server/config"
"github.com/daos-stack/daos/src/control/server/engine"
"github.com/daos-stack/daos/src/control/server/storage"
)
const (
scmMountPrefix = "/mnt/daos"
scmBdevDir = "/dev"
defaultFiPort = 31416
defaultFiPortInterval = 1000
defaultTargetCount = 16
defaultEngineLogFile = "/tmp/daos_engine"
defaultControlLogFile = "/tmp/daos_server.log"
minDMABuffer = 1024
errNoNuma = "zero numa nodes reported on hosts %s"
errUnsupNetDevClass = "unsupported net dev class in request: %s"
errInsufNrIfaces = "insufficient matching %s network interfaces, want %d got %d %v"
errInsufNrPMemGroups = "insufficient number of pmem device numa groups %v, want %d got %d"
errInvalNrEngines = "unexpected number of engines requested, want %d got %d"
errInsufNrSSDs = "insufficient number of ssds for numa %d, want %d got %d"
errInvalNrCores = "invalid number of cores for numa %d"
)
type (
// ConfigGenerateReq contains the inputs for the request.
ConfigGenerateReq struct {
unaryRequest
msRequest
NrEngines int
MinNrSSDs int
NetClass hardware.NetDevClass
Client UnaryInvoker
HostList []string
AccessPoints []string
Log logging.Logger
}
// ConfigGenerateResp contains the request response.
ConfigGenerateResp struct {
ConfigOut *config.Server
}
// ConfigGenerateError implements the error interface and
// contains a set of host-specific errors encountered while
// attempting to generate a configuration.
ConfigGenerateError struct {
HostErrorsResp
}
)
func (cge *ConfigGenerateError) Error() string |
// GetHostErrors returns the wrapped HostErrorsMap.
func (cge *ConfigGenerateError) GetHostErrors() HostErrorsMap {
return cge.HostErrors
}
// IsConfigGenerateError returns true if the provided error is a *ConfigGenerateError.
func IsConfigGenerateError(err error) bool {
_, ok := errors.Cause(err).(*ConfigGenerateError)
return ok
}
// ConfigGenerate attempts to automatically detect hardware and generate a DAOS
// server config file for a set of hosts with homogeneous hardware setup.
//
// Returns API response or error.
func ConfigGenerate(ctx context.Context, req ConfigGenerateReq) (*ConfigGenerateResp, error) {
req.Log.Debugf("ConfigGenerate called with request %+v", req)
if len(req.HostList) == 0 {
return nil, errors.New("no hosts specified")
}
if len(req.AccessPoints) == 0 {
return nil, errors.New("no access points specified")
}
nd, err := getNetworkDetails(ctx, req)
if err != nil {
return nil, err
}
sd, err := getStorageDetails(ctx, req, nd.engineCount)
if err != nil {
return nil, err
}
ccs, err := getCPUDetails(req.Log, sd.numaSSDs, nd.numaCoreCount)
if err != nil {
return nil, err
}
cfg, err := genConfig(req.Log, defaultEngineCfg, req.AccessPoints, nd, sd, ccs)
if err != nil {
return nil, err
}
return &ConfigGenerateResp{ConfigOut: cfg}, nil
}
// getNetworkSet retrieves the result of network scan over host list and
// verifies that there is only a single network set in response which indicates
// that network hardware setup is homogeneous across all hosts.
//
// Return host errors, network scan results for the host set or error.
func getNetworkSet(ctx context.Context, log logging.Logger, hostList []string, client UnaryInvoker) (*HostFabricSet, error) {
scanReq := new(NetworkScanReq)
scanReq.SetHostList(hostList)
scanResp, err := NetworkScan(ctx, client, scanReq)
if err != nil {
return nil, err
}
if len(scanResp.GetHostErrors()) > 0 {
return nil, &ConfigGenerateError{HostErrorsResp: scanResp.HostErrorsResp}
}
// verify homogeneous network
switch len(scanResp.HostFabrics) {
case 0:
return nil, errors.New("no host responses")
case 1: // success
default: // more than one means non-homogeneous hardware
log.Info("Heterogeneous network hardware configurations detected, " +
"cannot proceed. The following sets of hosts have different " +
"network hardware:")
for _, hns := range scanResp.HostFabrics {
log.Info(hns.HostSet.String())
}
return nil, errors.New("network hardware not consistent across hosts")
}
networkSet := scanResp.HostFabrics[scanResp.HostFabrics.Keys()[0]]
log.Debugf("Network hardware is consistent for hosts %s:\n\t%v",
networkSet.HostSet, networkSet.HostFabric.Interfaces)
return networkSet, nil
}
// numaNetIfaceMap is an alias for a map of NUMA node ID to optimal
// fabric network interface.
type numaNetIfaceMap map[int]*HostFabricInterface
// hasNUMAs returns true if interfaces exist for given NUMA node range.
func (nnim numaNetIfaceMap) hasNUMAs(numaCount int) bool {
for nn := 0; nn < numaCount; nn++ {
if _, exists := nnim[nn]; !exists {
return false
}
}
return true
}
// classInterfaces is an alias for a map of netdev class ID to slice of
// fabric network interfaces.
type classInterfaces map[hardware.NetDevClass]numaNetIfaceMap
// add network device to bucket corresponding to provider, network class type and
// NUMA node binding. Ignore add if there is an existing entry as the interfaces
// are processed in descending order of performance (best first).
func (cis classInterfaces) add(log logging.Logger, iface *HostFabricInterface) {
nn := int(iface.NumaNode)
if _, exists := cis[iface.NetDevClass]; !exists {
cis[iface.NetDevClass] = make(numaNetIfaceMap)
}
if _, exists := cis[iface.NetDevClass][nn]; exists {
return // already have interface for this NUMA
}
log.Debugf("%s class iface %s found for NUMA %d", iface.NetDevClass,
iface.Device, nn)
cis[iface.NetDevClass][nn] = iface
}
// parseInterfaces processes network devices in scan result, adding to a match
// list given the following conditions:
// IF class == (ether OR infiniband) AND requested_class == (ANY OR <class>).
//
// Returns when network devices matching criteria have been found for each
// required NUMA node.
func parseInterfaces(log logging.Logger, reqClass hardware.NetDevClass, engineCount int, interfaces []*HostFabricInterface) (numaNetIfaceMap, bool) {
// sort network interfaces by priority to get best available
sort.Slice(interfaces, func(i, j int) bool {
return interfaces[i].Priority < interfaces[j].Priority
})
var matches numaNetIfaceMap
buckets := make(map[string]classInterfaces)
for _, iface := range interfaces {
switch iface.NetDevClass {
case hardware.Ether, hardware.Infiniband:
switch reqClass {
case hardware.NetDevAny, iface.NetDevClass:
default:
continue // iface class not requested
}
default:
continue // iface class unsupported
}
// init network device slice for a new provider
if _, exists := buckets[iface.Provider]; !exists {
buckets[iface.Provider] = make(classInterfaces)
}
buckets[iface.Provider].add(log, iface)
matches = buckets[iface.Provider][iface.NetDevClass]
if matches.hasNUMAs(engineCount) {
return matches, true
}
}
return matches, false
}
// getNetIfaces scans fabric network devices and returns a NUMA keyed map for a
// provider/class combination.
func getNetIfaces(log logging.Logger, reqClass hardware.NetDevClass, engineCount int, hfs *HostFabricSet) (numaNetIfaceMap, error) {
switch reqClass {
case hardware.NetDevAny, hardware.Ether, hardware.Infiniband:
default:
return nil, errors.Errorf(errUnsupNetDevClass, reqClass.String())
}
matchIfaces, complete := parseInterfaces(log, reqClass, engineCount, hfs.HostFabric.Interfaces)
if !complete {
class := "best-available"
if reqClass != hardware.NetDevAny {
class = reqClass.String()
}
return nil, errors.Errorf(errInsufNrIfaces, class, engineCount, len(matchIfaces),
matchIfaces)
}
log.Debugf("selected network interfaces: %v", matchIfaces)
return matchIfaces, nil
}
type networkDetails struct {
engineCount int
numaIfaces numaNetIfaceMap
numaCoreCount int
}
// getNetworkDetails retrieves recommended network interfaces.
//
// Returns map of NUMA node ID to chosen fabric interfaces, number of engines to
// provide mappings for, per-NUMA core count and any host errors.
func getNetworkDetails(ctx context.Context, req ConfigGenerateReq) (*networkDetails, error) {
netSet, err := getNetworkSet(ctx, req.Log, req.HostList, req.Client)
if err != nil {
return nil, err
}
nd := &networkDetails{
engineCount: req.NrEngines,
numaCoreCount: int(netSet.HostFabric.CoresPerNuma),
}
// set number of engines if unset based on number of NUMA nodes on hosts
if nd.engineCount == 0 {
nd.engineCount = int(netSet.HostFabric.NumaCount)
}
if nd.engineCount == 0 {
return nil, errors.Errorf(errNoNuma, netSet.HostSet)
}
req.Log.Debugf("engine count for generated config set to %d", nd.engineCount)
numaIfaces, err := getNetIfaces(req.Log, req.NetClass, nd.engineCount, netSet)
if err != nil {
return nil, err
}
nd.numaIfaces = numaIfaces
return nd, nil
}
// getStorageSet retrieves the result of storage scan over host list and
// verifies that there is only a single storage set in response which indicates
// that storage hardware setup is homogeneous across all hosts.
//
// Filter NVMe storage scan so only NUMA affinity and PCI address is taking into
// account by supplying NvmeBasic flag in scan request. This enables
// configuration to work with different combinations of SSD models.
//
// Return host errors, storage scan results for the host set or error.
func getStorageSet(ctx context.Context, log logging.Logger, hostList []string, client UnaryInvoker) (*HostStorageSet, error) {
scanReq := &StorageScanReq{NvmeBasic: true}
scanReq.SetHostList(hostList)
scanResp, err := StorageScan(ctx, client, scanReq)
if err != nil {
return nil, err
}
if len(scanResp.GetHostErrors()) > 0 {
return nil, &ConfigGenerateError{HostErrorsResp: scanResp.HostErrorsResp}
}
// verify homogeneous storage
switch len(scanResp.HostStorage) {
case 0:
return nil, errors.New("no host responses")
case 1: // success
default: // more than one means non-homogeneous hardware
log.Info("Heterogeneous storage hardware configurations detected, " +
"cannot proceed. The following sets of hosts have different " +
"storage hardware:")
for _, hss := range scanResp.HostStorage {
log.Info(hss.HostSet.String())
}
return nil, errors.New("storage hardware not consistent across hosts")
}
storageSet := scanResp.HostStorage[scanResp.HostStorage.Keys()[0]]
log.Debugf("Storage hardware is consistent for hosts %s:\n\t%s\n\t%s",
storageSet.HostSet.String(), storageSet.HostStorage.ScmNamespaces.Summary(),
storageSet.HostStorage.NvmeDevices.Summary())
return storageSet, nil
}
// numaPMemsMap is an alias for a map of NUMA node ID to slice of string sorted
// PMem block device paths.
type numaPMemsMap map[int]sort.StringSlice
// mapPMems maps NUMA node ID to pmem block device paths, sort paths to attempt
// selection of desired devices if named appropriately in the case that multiple
// devices exist for a given NUMA node.
func mapPMems(nss storage.ScmNamespaces) numaPMemsMap {
npms := make(numaPMemsMap)
for _, ns := range nss {
nn := int(ns.NumaNode)
npms[nn] = append(npms[nn], fmt.Sprintf("%s/%s", scmBdevDir, ns.BlockDevice))
}
for _, pms := range npms {
pms.Sort()
}
return npms
}
// numSSDsMap is an alias for a map of NUMA node ID to slice of NVMe SSD PCI
// addresses.
type numaSSDsMap map[int]sort.StringSlice
// mapSSDs maps NUMA node ID to NVMe SSD PCI addresses, sort addresses.
func mapSSDs(ssds storage.NvmeControllers) numaSSDsMap {
nssds := make(numaSSDsMap)
for _, ssd := range ssds {
nn := int(ssd.SocketID)
nssds[nn] = append(nssds[nn], ssd.PciAddr)
}
for _, ssds := range nssds {
ssds.Sort()
}
return nssds
}
type storageDetails struct {
hugePageSize int
numaPMems numaPMemsMap
numaSSDs numaSSDsMap
}
// validate checks sufficient PMem devices and SSD NUMA groups exist for the
// required number of engines. Minimum thresholds for SSD group size is also
// checked.
func (sd *storageDetails) validate(log logging.Logger, engineCount int, minNrSSDs int) error {
log.Debugf("numa to pmem mappings: %v", sd.numaPMems)
if len(sd.numaPMems) < engineCount {
return errors.Errorf(errInsufNrPMemGroups, sd.numaPMems, engineCount, len(sd.numaPMems))
}
if minNrSSDs == 0 {
// set empty ssd lists and skip validation
log.Debug("nvme disabled, skip validation")
for nn := 0; nn < engineCount; nn++ {
sd.numaSSDs[nn] = []string{}
}
return nil
}
for nn := 0; nn < engineCount; nn++ {
ssds, exists := sd.numaSSDs[nn]
if !exists {
sd.numaSSDs[nn] = []string{} // populate empty lists for missing entries
}
log.Debugf("ssds bound to numa %d: %v", nn, ssds)
if len(ssds) < minNrSSDs {
return errors.Errorf(errInsufNrSSDs, nn, minNrSSDs, len(ssds))
}
}
return nil
}
// getStorageDetails retrieves mappings of NUMA node to PMem and NVMe SSD
// devices.
//
// Returns storage details struct or host error response and outer error.
func getStorageDetails(ctx context.Context, req ConfigGenerateReq, engineCount int) (*storageDetails, error) {
if engineCount < 1 {
return nil, errors.Errorf(errInvalNrEngines, 1, engineCount)
}
storageSet, err := getStorageSet(ctx, req.Log, req.HostList, req.Client)
if err != nil {
return nil, err
}
sd := &storageDetails{
numaPMems: mapPMems(storageSet.HostStorage.ScmNamespaces),
numaSSDs: mapSSDs(storageSet.HostStorage.NvmeDevices),
hugePageSize: storageSet.HostStorage.HugePageInfo.PageSizeKb,
}
if err := sd.validate(req.Log, engineCount, req.MinNrSSDs); err != nil {
return nil, err
}
return sd, nil
}
func calcHelpers(log logging.Logger, targets, cores int) int {
helpers := cores - targets - 1
if helpers <= 1 {
return helpers
}
if helpers > targets {
log.Debugf("adjusting num helpers (%d) to < num targets (%d), new: %d",
helpers, targets, targets-1)
return targets - 1
}
return helpers
}
type coreCounts struct {
nrTgts int
nrHlprs int
}
// numaCoreCountsMap is an alias for a map of NUMA node ID to calculate target
// and helper core counts.
type numaCoreCountsMap map[int]*coreCounts
// checkCPUs validates and returns recommended values for I/O service and
// offload thread counts.
//
// The target count should be a multiplier of the number of SSDs and typically
// daos gets the best performance with 16x targets per I/O Engine so target
// count will typically be between 12 and 20.
//
// Validate number of targets + 1 cores are available per IO engine, not
// usually a problem as sockets normally have at least 18 cores.
//
// Create helper threads for the remaining available cores, e.g. with 24 cores,
// allocate 7 helper threads. Number of helper threads should never be more than
// number of targets.
func checkCPUs(log logging.Logger, numSSDs, numaCoreCount int) (*coreCounts, error) {
var numTargets int
if numSSDs == 0 {
numTargets = defaultTargetCount
if numTargets >= numaCoreCount {
return &coreCounts{
nrTgts: numaCoreCount - 1,
nrHlprs: 0,
}, nil
}
return &coreCounts{
nrTgts: numTargets,
nrHlprs: calcHelpers(log, numTargets, numaCoreCount),
}, nil
}
if numSSDs >= numaCoreCount {
return nil, errors.Errorf("need more cores than ssds, got %d want %d",
numaCoreCount, numSSDs)
}
for tgts := numSSDs; tgts < numaCoreCount; tgts += numSSDs {
numTargets = tgts
}
log.Debugf("%d targets assigned with %d ssds", numTargets, numSSDs)
return &coreCounts{
nrTgts: numTargets,
nrHlprs: calcHelpers(log, numTargets, numaCoreCount),
}, nil
}
// getCPUDetails retrieves recommended values for I/O service and offload
// threads suitable for the server config file.
//
// Returns core counts struct or error.
func getCPUDetails(log logging.Logger, numaSSDs numaSSDsMap, coresPerNuma int) (numaCoreCountsMap, error) {
if coresPerNuma < 1 {
return nil, errors.Errorf(errInvalNrCores, coresPerNuma)
}
numaCoreCounts := make(numaCoreCountsMap)
for numaID, ssds := range numaSSDs {
coreCounts, err := checkCPUs(log, len(ssds), coresPerNuma)
if err != nil {
return nil, err
}
numaCoreCounts[numaID] = coreCounts
}
return numaCoreCounts, nil
}
type newEngineCfgFn func(int) *engine.Config
func defaultEngineCfg(idx int) *engine.Config {
return engine.NewConfig().
WithTargetCount(defaultTargetCount).
WithLogFile(fmt.Sprintf("%s.%d.log", defaultEngineLogFile, idx))
}
// genConfig generates server config file from details of network, storage and CPU hardware after
// performing some basic sanity checks.
func genConfig(log logging.Logger, newEngineCfg newEngineCfgFn, accessPoints []string, nd *networkDetails, sd *storageDetails, ccs numaCoreCountsMap) (*config.Server, error) {
if nd.engineCount == 0 {
return nil, errors.Errorf(errInvalNrEngines, 1, 0)
}
if len(nd.numaIfaces) < nd.engineCount {
return nil, errors.Errorf(errInsufNrIfaces, "", nd.engineCount,
len(nd.numaIfaces), nd.numaIfaces)
}
if len(sd.numaPMems) < nd.engineCount {
return nil, errors.Errorf(errInsufNrPMemGroups, sd.numaPMems, nd.engineCount,
len(sd.numaPMems))
}
// enforce consistent ssd count across engine configs
minSsds := math.MaxUint32
numaWithMinSsds := 0
if len(sd.numaSSDs) > 0 {
if len(sd.numaSSDs) < nd.engineCount {
return nil, errors.New("invalid number of ssd groups") // should never happen
}
for numa, ssds := range sd.numaSSDs {
if len(ssds) < minSsds {
minSsds = len(ssds)
numaWithMinSsds = numa
}
}
}
if len(ccs) < nd.engineCount {
return nil, errors.New("invalid number of core count groups") // should never happen
}
// enforce consistent target and helper count across engine configs
nrTgts := ccs[numaWithMinSsds].nrTgts
nrHlprs := ccs[numaWithMinSsds].nrHlprs
engines := make([]*engine.Config, 0, nd.engineCount)
for nn := 0; nn < nd.engineCount; nn++ {
engineCfg := newEngineCfg(nn).
WithTargetCount(nrTgts).
WithHelperStreamCount(nrHlprs)
if len(sd.numaPMems) > 0 {
engineCfg.WithStorage(
storage.NewTierConfig().
WithStorageClass(storage.ClassDcpm.String()).
WithScmMountPoint(fmt.Sprintf("%s%d", scmMountPrefix, nn)).
WithScmDeviceList(sd.numaPMems[nn][0]),
)
}
if len(sd.numaSSDs) > 0 && len(sd.numaSSDs[nn]) > 0 {
engineCfg.WithStorage(
storage.NewTierConfig().
WithStorageClass(storage.ClassNvme.String()).
WithBdevDeviceList(sd.numaSSDs[nn][:minSsds]...),
)
}
pnn := uint(nn)
engineCfg.PinnedNumaNode = &pnn
engineCfg.Fabric = engine.FabricConfig{
Provider: nd.numaIfaces[nn].Provider,
Interface: nd.numaIfaces[nn].Device,
InterfacePort: int(defaultFiPort + (nn * defaultFiPortInterval)),
}
engines = append(engines, engineCfg)
}
numTargets := 0
for _, e := range engines {
numTargets += e.TargetCount
}
reqHugePages, err := common.CalcMinHugePages(sd.hugePageSize, numTargets)
if err != nil {
return nil, errors.Wrap(err, "unable to calculate minimum hugepages")
}
cfg := config.DefaultServer().
WithAccessPoints(accessPoints...).
WithFabricProvider(engines[0].Fabric.Provider).
WithEngines(engines...).
WithControlLogFile(defaultControlLogFile).
WithNrHugePages(reqHugePages)
return cfg, cfg.Validate(log, sd.hugePageSize, nil)
}
| {
return cge.Errors().Error()
} | identifier_body |
core-tracing.d.ts | /**
* OpenTelemetry compatible interface for Context
*/
export declare interface Context {
/**
* Get a value from the context.
*
* @param key - key which identifies a context value
*/
getValue(key: symbol): unknown;
/**
* Create a new context which inherits from this context and has
* the given key set to the given value.
*
* @param key - context key for which to set the value
* @param value - value to set for the given key
*/
setValue(key: symbol, value: unknown): Context;
/**
* Return a new context which inherits from this context but does
* not contain a value for the given key.
*
* @param key - context key for which to clear a value
*/
deleteValue(key: symbol): Context;
}
/** Entrypoint for context API */
export declare const context: ContextAPI;
/**
* Singleton object which represents the entry point to the OpenTelemetry Context API
*/
export declare interface ContextAPI {
/**
* Get the currently active context
*/
active(): Context;
}
/**
* Creates a function that can be used to create spans using the global tracer.
*
* Usage:
*
* ```typescript
* // once
* const createSpan = createSpanFunction({ packagePrefix: "Azure.Data.AppConfiguration", namespace: "Microsoft.AppConfiguration" });
*
* // in each operation
* const span = createSpan("deleteConfigurationSetting", operationOptions);
* // code...
* span.end();
* ```
*
* @hidden
* @param args - allows configuration of the prefix for each span as well as the az.namespace field.
*/
export declare function createSpanFunction(args: CreateSpanFunctionArgs): <T extends {
tracingOptions?: OperationTracingOptions | undefined;
}>(operationName: string, operationOptions: T | undefined) => {
span: Span;
updatedOptions: T;
};
/**
* Arguments for `createSpanFunction` that allow you to specify the
* prefix for each created span as well as the `az.namespace` attribute.
*
* @hidden
*/
export declare interface CreateSpanFunctionArgs {
/**
* Package name prefix.
*
* NOTE: if this is empty no prefix will be applied to created Span names.
*/
packagePrefix: string;
/**
* Service namespace
*
* NOTE: if this is empty no `az.namespace` attribute will be added to created Spans.
*/
namespace: string;
}
/**
* An Exception for a Span.
*/
export declare type Exception = ExceptionWithCode | ExceptionWithMessage | ExceptionWithName | string;
/**
* An Exception with a code.
*/
export declare interface ExceptionWithCode {
/** The code. */
code: string | number;
/** The name. */
name?: string;
/** The message. */
message?: string;
/** The stack. */
stack?: string;
}
/**
* An Exception with a message.
*/
export declare interface ExceptionWithMessage {
/** The code. */
code?: string | number;
/** The message. */
message: string;
/** The name. */
name?: string;
/** The stack. */
stack?: string;
}
/**
* An Exception with a name.
*/
export declare interface ExceptionWithName {
/** The code. */
code?: string | number;
/** The message. */
message?: string;
/** The name. */
name: string;
/** The stack. */
stack?: string;
}
/**
* Generates a `SpanContext` given a `traceparent` header value.
* @param traceParent - Serialized span context data as a `traceparent` header value.
* @returns The `SpanContext` generated from the `traceparent` value.
*/
export declare function extractSpanContextFromTraceParentHeader(traceParentHeader: string): SpanContext | undefined;
/**
* Return the span if one exists
*
* @param context - context to get span from
*/
export declare function getSpan(context: Context): Span | undefined;
/**
* Get the span context of the span if it exists.
*
* @param context - context to get values from
*/
export declare function getSpanContext(context: Context): SpanContext | undefined;
/**
* Generates a `traceparent` value given a span context.
* @param spanContext - Contains context for a specific span.
* @returns The `spanContext` represented as a `traceparent` value.
*/
export declare function getTraceParentHeader(spanContext: SpanContext): string | undefined;
/**
* Retrieves a tracer from the global tracer provider.
*/
export declare function getTracer(): Tracer;
/**
* Retrieves a tracer from the global tracer provider.
*/
export declare function getTracer(name: string, version?: string): Tracer;
/**
* Represents high resolution time.
*/
export declare type HrTime = [number, number];
/**
* Returns true of the given {@link SpanContext} is valid.
* A valid {@link SpanContext} is one which has a valid trace ID and span ID as per the spec.
*
* @param context - the {@link SpanContext} to validate.
*
* @returns true if the {@link SpanContext} is valid, false otherwise.
*/
export declare function isSpanContextValid(context: SpanContext): boolean;
/**
* Used to specify a span that is linked to another.
*/
export declare interface Link {
/** The {@link SpanContext} of a linked span. */
context: SpanContext;
/** A set of {@link SpanAttributes} on the link. */
attributes?: SpanAttributes;
}
/**
* Tracing options to set on an operation.
*/
export declare interface OperationTracingOptions {
/**
* OpenTelemetry SpanOptions used to create a span when tracing is enabled.
*/
spanOptions?: SpanOptions;
/**
* OpenTelemetry context to use for created Spans.
*/
tracingContext?: Context;
}
/**
* Set the span on a context
*
* @param context - context to use as parent
* @param span - span to set active
*/
export declare function setSpan(context: Context, span: Span): Context;
/**
* Wrap span context in a NoopSpan and set as span in a new
* context
*
* @param context - context to set active span on
* @param spanContext - span context to be wrapped
*/
export declare function setSpanContext(context: Context, spanContext: SpanContext): Context;
/**
* An interface that represents a span. A span represents a single operation
* within a trace. Examples of span might include remote procedure calls or a
* in-process function calls to sub-components. A Trace has a single, top-level
* "root" Span that in turn may have zero or more child Spans, which in turn
* may have children.
*
* Spans are created by the {@link Tracer.startSpan} method.
*/
export declare interface Span {
/**
* Returns the {@link SpanContext} object associated with this Span.
*
* Get an immutable, serializable identifier for this span that can be used
* to create new child spans. Returned SpanContext is usable even after the
* span ends.
*
* @returns the SpanContext object associated with this Span.
*/
spanContext(): SpanContext;
/**
* Sets an attribute to the span.
*
* Sets a single Attribute with the key and value passed as arguments.
*
* @param key - the key for this attribute.
* @param value - the value for this attribute. Setting a value null or
* undefined is invalid and will result in undefined behavior.
*/
setAttribute(key: string, value: SpanAttributeValue): this;
/**
* Sets attributes to the span.
*
* @param attributes - the attributes that will be added.
* null or undefined attribute values
* are invalid and will result in undefined behavior.
*/
setAttributes(attributes: SpanAttributes): this;
/**
* Adds an event to the Span.
*
* @param name - the name of the event.
* @param attributesOrStartTime - the attributes that will be added; these are
| * if type is TimeInput and 3rd param is undefined
* @param startTime - start time of the event.
*/
addEvent(name: string, attributesOrStartTime?: SpanAttributes | TimeInput, startTime?: TimeInput): this;
/**
* Sets a status to the span. If used, this will override the default Span
* status. Default is {@link SpanStatusCode.UNSET}. SetStatus overrides the value
* of previous calls to SetStatus on the Span.
*
* @param status - the SpanStatus to set.
*/
setStatus(status: SpanStatus): this;
/**
* Marks the end of Span execution.
*
* Call to End of a Span MUST not have any effects on child spans. Those may
* still be running and can be ended later.
*
* Do not return `this`. The Span generally should not be used after it
* is ended so chaining is not desired in this context.
*
* @param endTime - the time to set as Span's end time. If not provided,
* use the current time as the span's end time.
*/
end(endTime?: TimeInput): void;
/**
* Returns the flag whether this span will be recorded.
*
* @returns true if this Span is active and recording information like events
* with the `AddEvent` operation and attributes using `setAttributes`.
*/
isRecording(): boolean;
/**
* Sets exception as a span event
* @param exception - the exception the only accepted values are string or Error
* @param time - the time to set as Span's event time. If not provided,
* use the current time.
*/
recordException(exception: Exception, time?: TimeInput): void;
/**
* Updates the Span name.
*
* This will override the name provided via {@link Tracer.startSpan}.
*
* Upon this update, any sampling behavior based on Span name will depend on
* the implementation.
*
* @param name - the Span name.
*/
updateName(name: string): this;
}
/**
* Attributes for a Span.
*/
export declare interface SpanAttributes {
/**
* Attributes for a Span.
*/
[attributeKey: string]: SpanAttributeValue | undefined;
}
/**
* Attribute values may be any non-nullish primitive value except an object.
*
* null or undefined attribute values are invalid and will result in undefined behavior.
*/
export declare type SpanAttributeValue = string | number | boolean | Array<null | undefined | string> | Array<null | undefined | number> | Array<null | undefined | boolean>;
/**
* A light interface that tries to be structurally compatible with OpenTelemetry
*/
export declare interface SpanContext {
/**
* UUID of a trace.
*/
traceId: string;
/**
* UUID of a Span.
*/
spanId: string;
/**
* https://www.w3.org/TR/trace-context/#trace-flags
*/
traceFlags: number;
/**
* Tracing-system-specific info to propagate.
*
* The tracestate field value is a `list` as defined below. The `list` is a
* series of `list-members` separated by commas `,`, and a list-member is a
* key/value pair separated by an equals sign `=`. Spaces and horizontal tabs
* surrounding `list-members` are ignored. There can be a maximum of 32
* `list-members` in a `list`.
* More Info: https://www.w3.org/TR/trace-context/#tracestate-field
*
* Examples:
* Single tracing system (generic format):
* tracestate: rojo=00f067aa0ba902b7
* Multiple tracing systems (with different formatting):
* tracestate: rojo=00f067aa0ba902b7,congo=t61rcWkgMzE
*/
traceState?: TraceState;
}
/**
* The kind of span.
*/
export declare enum SpanKind {
/** Default value. Indicates that the span is used internally. */
INTERNAL = 0,
/**
* Indicates that the span covers server-side handling of an RPC or other
* remote request.
*/
SERVER = 1,
/**
* Indicates that the span covers the client-side wrapper around an RPC or
* other remote request.
*/
CLIENT = 2,
/**
* Indicates that the span describes producer sending a message to a
* broker. Unlike client and server, there is no direct critical path latency
* relationship between producer and consumer spans.
*/
PRODUCER = 3,
/**
* Indicates that the span describes consumer receiving a message from a
* broker. Unlike client and server, there is no direct critical path latency
* relationship between producer and consumer spans.
*/
CONSUMER = 4
}
/**
* An interface that enables manual propagation of Spans
*/
export declare interface SpanOptions {
/**
* Attributes to set on the Span
*/
attributes?: SpanAttributes;
/** {@link Link}s span to other spans */
links?: Link[];
/**
* The type of Span. Default to SpanKind.INTERNAL
*/
kind?: SpanKind;
/**
* A manually specified start time for the created `Span` object.
*/
startTime?: TimeInput;
}
/**
* The status for a span.
*/
export declare interface SpanStatus {
/** The status code of this message. */
code: SpanStatusCode;
/** A developer-facing error message. */
message?: string;
}
/** SpanStatusCode */
export declare enum SpanStatusCode {
/**
* The default status.
*/
UNSET = 0,
/**
* The operation has been validated by an Application developer or
* Operator to have completed successfully.
*/
OK = 1,
/**
* The operation contains an error.
*/
ERROR = 2
}
/**
* Used to represent a Time.
*/
export declare type TimeInput = HrTime | number | Date;
/**
* Shorthand enum for common traceFlags values inside SpanContext
*/
export declare const enum TraceFlags {
/** No flag set. */
NONE = 0,
/** Caller is collecting trace information. */
SAMPLED = 1
}
/**
* A Tracer.
*/
export declare interface Tracer {
/**
* Starts a new {@link Span}. Start the span without setting it on context.
*
* This method does NOT modify the current Context.
*
* @param name - The name of the span
* @param options - SpanOptions used for span creation
* @param context - Context to use to extract parent
* @returns The newly created span
* @example
* const span = tracer.startSpan('op');
* span.setAttribute('key', 'value');
* span.end();
*/
startSpan(name: string, options?: SpanOptions, context?: Context): Span;
}
/**
* TraceState.
*/
export declare interface TraceState {
/**
* Create a new TraceState which inherits from this TraceState and has the
* given key set.
* The new entry will always be added in the front of the list of states.
*
* @param key - key of the TraceState entry.
* @param value - value of the TraceState entry.
*/
set(key: string, value: string): TraceState;
/**
* Return a new TraceState which inherits from this TraceState but does not
* contain the given key.
*
* @param key - the key for the TraceState entry to be removed.
*/
unset(key: string): TraceState;
/**
* Returns the value to which the specified key is mapped, or `undefined` if
* this map contains no mapping for the key.
*
* @param key - with which the specified value is to be associated.
* @returns the value to which the specified key is mapped, or `undefined` if
* this map contains no mapping for the key.
*/
get(key: string): string | undefined;
/**
* Serializes the TraceState to a `list` as defined below. The `list` is a
* series of `list-members` separated by commas `,`, and a list-member is a
* key/value pair separated by an equals sign `=`. Spaces and horizontal tabs
* surrounding `list-members` are ignored. There can be a maximum of 32
* `list-members` in a `list`.
*
* @returns the serialized string.
*/
serialize(): string;
}
export { } | * associated with this event. Can be also a start time
| random_line_split |
lex.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go tool yacc cc.y
package cc
import (
"fmt"
"sort"
"strings"
)
// A Syntax represents any syntax element.
type Syntax interface {
// GetSpan returns the start and end position of the syntax,
// excluding leading or trailing comments.
// The use of a Get prefix is non-standard but avoids a conflict
// with the field named Span in most implementations.
GetSpan() Span
// GetComments returns the comments attached to the syntax.
// This method would normally be named 'Comments' but that
// would interfere with embedding a type of the same name.
// The use of a Get prefix is non-standard but avoids a conflict
// with the field named Comments in most implementations.
GetComments() *Comments
}
// SyntaxInfo contains metadata about a piece of syntax.
type SyntaxInfo struct {
Span Span // location of syntax in input
Comments Comments
}
func (s *SyntaxInfo) GetSpan() Span {
return s.Span
}
func (s *SyntaxInfo) GetComments() *Comments {
return &s.Comments
}
// Comments collects the comments associated with a syntax element.
type Comments struct {
Before []Comment // whole-line comments before this syntax
Suffix []Comment // end-of-line comments after this syntax
// For top-level syntax elements only, After lists whole-line
// comments following the syntax.
After []Comment
}
type lexer struct {
// input
start int
lexInput
forcePos Pos
c2goComment bool // inside /*c2go ... */ comment
comments map[Pos]Comment
// comment assignment
pre []Syntax
post []Syntax
enumSeen map[interface{}]bool
// type checking state
scope *Scope
declCache map[string]*Decl
// output
errors []string
prog *Prog
expr *Expr
}
// AddTypeName tells the lexer that name is the name of a type.
func AddTypeName(name string) {
if _, ok := extraTypes[name]; !ok {
extraTypes[name] = &Type{Kind: TypedefType, Name: name}
}
}
func (lx *lexer) parse() {
if lx.wholeInput == "" {
lx.wholeInput = lx.input
}
if lx.comments == nil {
lx.comments = make(map[Pos]Comment)
}
yyParse(lx)
}
type lexInput struct {
wholeInput string
input string
tok string
lastsym string
file string
lineno int
column int
systemHeader bool // inside a system header file
}
func (lx *lexer) pos() Pos {
if lx.forcePos.Line != 0 {
return lx.forcePos
}
return Pos{lx.file, lx.lineno, lx.column}
}
func (lx *lexer) span() Span {
p := lx.pos()
return Span{p, p}
}
func (lx *lexer) setSpan(s Span) {
lx.forcePos = s.Start
}
func span(l1, l2 Span) Span {
if l1.Start.Line == 0 {
return l2
}
if l2.Start.Line == 0 {
return l1
}
return Span{l1.Start, l2.End}
}
func (lx *lexer) skip(i int) {
lx.lineno += strings.Count(lx.input[:i], "\n")
if nl := strings.LastIndex(lx.input[:i], "\n"); nl != -1 {
lx.column = i - nl
} else {
lx.column += i
}
lx.input = lx.input[i:]
}
func (lx *lexer) token(i int) {
lx.tok = lx.input[:i]
lx.skip(i)
}
func (lx *lexer) sym(i int) {
lx.token(i)
lx.lastsym = lx.tok
}
func (lx *lexer) comment(i int) {
var c Comment
c.Span.Start = lx.pos()
c.Text = lx.input[:i]
j := len(lx.wholeInput) - len(lx.input)
for j > 0 && (lx.wholeInput[j-1] == ' ' || lx.wholeInput[j-1] == '\t') {
j--
}
if j > 0 && lx.wholeInput[j-1] != '\n' {
c.Suffix = true
}
prefix := lx.wholeInput[j : len(lx.wholeInput)-len(lx.input)]
lines := strings.Split(c.Text, "\n")
for i, line := range lines {
if strings.HasPrefix(line, prefix) {
lines[i] = line[len(prefix):]
}
}
c.Text = strings.Join(lines, "\n")
lx.skip(i)
c.Span.End = lx.pos()
lx.comments[c.Span.Start] = c
}
func isalpha(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '_' || c >= 0x80 || '0' <= c && c <= '9'
}
func isspace(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\v' || c == '\f'
}
func (lx *lexer) setEnd(yy *yySymType) {
yy.span.End = lx.pos()
}
func (lx *lexer) Lex(yy *yySymType) (yyt int) {
//defer func() { println("tok", yy.str, yyt) }()
if lx.start != 0 {
tok := lx.start
lx.start = 0
return tok
}
*yy = yySymType{}
defer lx.setEnd(yy)
Restart:
yy.span.Start = lx.pos()
in := lx.input
if len(in) == 0 {
return tokEOF
}
c := in[0]
if lx.systemHeader && c != '#' {
// Skip the contents of system header files.
nl := strings.IndexByte(in, '\n')
if nl == -1 {
nl = len(in)
} else {
nl++
}
lx.skip(nl)
goto Restart
}
if isspace(c) {
i := 1
for i < len(in) && isspace(in[i]) {
i++
}
lx.skip(i)
goto Restart
}
i := 0
switch c {
case '#':
i++
for in[i] != '\n' {
if in[i] == '\\' && in[i+1] == '\n' && i+2 < len(in) {
i++
}
i++
}
str := in[:i]
// If this line is defining a constant (not a function-like macro), don't
// ignore it.
if f := strings.Fields(str); len(f) > 2 && f[0] == "#define" && !strings.Contains(f[1], "(") {
lx.sym(len("#define"))
return tokDefine
}
lx.skip(i + 1)
// The preprocessor inserts lines that indicate what the current line number
// and filename are. If this is one of those, read it.
var line int
var file string
if n, _ := fmt.Sscanf(str, "# %d %q", &line, &file); n == 2 {
lx.file, lx.lineno = file, line
lx.systemHeader = false
if strings.HasSuffix(file, ".h") {
for _, p := range systemHeaderPaths {
if strings.HasPrefix(file, p) {
lx.systemHeader = true
break
}
}
}
}
goto Restart
case 'L':
if in[1] != '\'' && in[1] != '"' {
break // goes to alpha case after switch
}
i = 1
fallthrough
case '"', '\'':
q := in[i]
i++ // for the quote
for ; in[i] != q; i++ {
if in[i] == '\n' {
what := "string"
if q == '\'' {
what = "character"
}
lx.Errorf("unterminated %s constant", what)
}
if in[i] == '\\' {
i++
}
}
i++ // for the quote
lx.sym(i)
yy.str = lx.tok
if q == '"' {
return tokString
} else {
return tokLitChar
}
case '.':
if in[1] < '0' || '9' < in[1] {
if in[1] == '.' && in[2] == '.' {
lx.token(3)
return tokDotDotDot
}
lx.token(1)
return int(c)
}
fallthrough
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
for '0' <= in[i] && in[i] <= '9' || in[i] == '.' || 'A' <= in[i] && in[i] <= 'Z' || 'a' <= in[i] && in[i] <= 'z' || (in[i] == '+' || in[i] == '-') && (in[i-1] == 'e' || in[i-1] == 'E') {
i++
}
lx.sym(i)
yy.str = lx.tok
return tokNumber
case '/':
switch in[1] {
case '*':
if strings.HasPrefix(in, "/*c2go") {
lx.skip(6)
lx.c2goComment = true
goto Restart
}
i := 2
for ; ; i++ {
if i+2 <= len(in) && in[i] == '*' && in[i+1] == '/' {
i += 2
break
}
if i >= len(in) {
lx.Errorf("unterminated /* comment")
return tokError
}
}
lx.comment(i)
goto Restart
case '/':
for in[i] != '\n' {
i++
}
lx.comment(i)
if len(lx.input) >= 2 && lx.input[0] == '\n' && lx.input[1] == '\n' {
lx.skip(1)
lx.comment(0)
}
goto Restart
}
fallthrough
case '~', '*', '(', ')', '[', ']', '{', '}', '?', ':', ';', ',', '%', '^', '!', '=', '<', '>', '+', '-', '&', '|':
if lx.c2goComment && in[0] == '*' && in[1] == '/' {
lx.c2goComment = false
lx.skip(2)
goto Restart
}
if c == '-' && in[1] == '>' {
lx.token(2)
return tokArrow
}
if in[1] == '=' && tokEq[c] != 0 {
lx.token(2)
return int(tokEq[c])
}
if in[1] == c && tokTok[c] != 0 {
if in[2] == '=' && tokTokEq[c] != 0 {
lx.token(3)
return int(tokTokEq[c])
}
lx.token(2)
return int(tokTok[c])
}
lx.token(1)
return int(c)
}
if isalpha(c) {
for isalpha(in[i]) {
i++
}
lx.sym(i)
switch lx.tok {
case "union":
lx.tok = "struct"
case "NULL":
lx.tok = "nil"
}
yy.str = lx.tok
if t := tokId[lx.tok]; t != 0 {
return int(t)
}
yy.decl = lx.lookupDecl(lx.tok)
if yy.decl != nil && yy.decl.Storage&Typedef != 0 {
t := yy.decl.Type
for t.Kind == TypedefType && t.Base != nil {
t = t.Base
}
yy.typ = &Type{Kind: TypedefType, Name: yy.str, Base: t, TypeDecl: yy.decl}
return tokTypeName
}
if lx.tok == "EXTERN" {
goto Restart
}
if t, ok := extraTypes[lx.tok]; ok {
yy.typ = t
return tokTypeName
}
return tokName
}
lx.Errorf("unexpected input byte %#02x (%c)", c, c)
return tokError
}
var systemHeaderPaths = []string{
"/usr/include",
"/Library/Developer",
"/Applications/Xcode.app",
}
var extraTypes = map[string]*Type{
"bool": BoolType,
"_Bool": BoolType,
"FILE": &Type{Kind: TypedefType, Name: "os.File"},
"int8_t": CharType,
"int16_t": ShortType,
"int32_t": Int32Type,
"int64_t": LonglongType,
"intmax_t": LonglongType,
"intptr_t": IntType,
"ptrdiff_t": IntType,
"size_t": UintType,
"ssize_t": IntType,
"time_t": IntType,
"u_short": UshortType,
"u_int": UintType,
"u_long": UlongType,
"uint": UintType,
"uint8_t": UcharType,
"uint16_t": UshortType,
"uint32_t": Uint32Type,
"uint64_t": UlonglongType,
"uintptr_t": UintType,
"va_list": &Type{Kind: TypedefType, Name: "va_list"},
}
func (lx *lexer) Error(s string) {
lx.Errorf("%s near %s", s, lx.lastsym)
}
func (lx *lexer) Errorf(format string, args ...interface{}) {
lx.errors = append(lx.errors, fmt.Sprintf("%s: %s", lx.span(), fmt.Sprintf(format, args...)))
}
type Pos struct {
File string
Line int
Col int
}
func (a Pos) Less(b Pos) bool {
switch {
case a.File < b.File:
return true
case a.File > b.File:
return false
case a.Line < b.Line:
return true
case a.Line > b.Line:
return false
case a.Col < b.Col:
return true
default:
return false
}
}
type Span struct {
Start Pos
End Pos
}
func (l Span) String() string {
return fmt.Sprintf("%s:%d", l.Start.File, l.Start.Line)
}
type Comment struct {
Span
Text string
Suffix bool
}
func (c Comment) GetSpan() Span {
return c.Span
}
var tokEq = [256]int32{
'*': tokMulEq,
'/': tokDivEq,
'+': tokAddEq,
'-': tokSubEq,
'%': tokModEq,
'^': tokXorEq,
'!': tokNotEq,
'=': tokEqEq,
'<': tokLtEq,
'>': tokGtEq,
'&': tokAndEq,
'|': tokOrEq,
}
var tokTok = [256]int32{
'<': tokLsh,
'>': tokRsh,
'=': tokEqEq,
'+': tokInc,
'-': tokDec,
'&': tokAndAnd,
'|': tokOrOr,
}
var tokTokEq = [256]int32{
'<': tokLshEq,
'>': tokRshEq,
}
var tokId = map[string]int32{
"auto": tokAuto,
"break": tokBreak,
"case": tokCase,
"char": tokChar,
"const": tokConst,
"continue": tokContinue,
"default": tokDefault,
"do": tokDo,
"double": tokDouble,
"else": tokElse,
"enum": tokEnum,
"extern": tokExtern,
"float": tokFloat,
"for": tokFor,
"goto": tokGoto,
"if": tokIf,
"inline": tokInline,
"int": tokInt,
"long": tokLong,
"offsetof": tokOffsetof,
"register": tokRegister,
"return": tokReturn,
"short": tokShort,
"signed": tokSigned,
"sizeof": tokSizeof,
"static": tokStatic,
"struct": tokStruct,
"switch": tokSwitch,
"typedef": tokTypedef,
"union": tokUnion,
"unsigned": tokUnsigned,
"va_arg": tokVaArg,
"void": tokVoid,
"volatile": tokVolatile,
"while": tokWhile,
}
// Comment assignment.
// We build two lists of all subexpressions, preorder and postorder.
// The preorder list is ordered by start location, with outer expressions first.
// The postorder list is ordered by end location, with outer expressions last.
// We use the preorder list to assign each whole-line comment to the syntax
// immediately following it, and we use the postorder list to assign each
// end-of-line comment to the syntax immediately preceding it.
// enum walks the expression adding it and its subexpressions to the pre list.
// The order may not reflect the order in the input.
func (lx *lexer) enum(x Syntax) {
switch x := x.(type) {
default:
panic(fmt.Errorf("order: unexpected type %T", x))
case nil:
return
case *Expr:
if x == nil {
return
}
lx.enum(x.Left)
lx.enum(x.Right)
for _, y := range x.List {
lx.enum(y)
}
case *Init:
if x == nil {
return
}
lx.enum(x.Expr)
for _, y := range x.Braced {
lx.enum(y)
}
case *Prog:
if x == nil {
return
}
for _, y := range x.Decls {
lx.enum(y)
}
case *Stmt:
if x == nil {
return
}
for _, y := range x.Labels {
lx.enum(y)
}
lx.enum(x.Pre)
lx.enum(x.Expr)
lx.enum(x.Post)
lx.enum(x.Body)
lx.enum(x.Else)
lx.enum(x.Decl)
for _, y := range x.Block {
lx.enum(y)
}
case *Label:
// ok
case *Decl:
if x == nil {
return
}
if lx.enumSeen[x] {
return
}
lx.enumSeen[x] = true
lx.enum(x.Type)
lx.enum(x.Init)
lx.enum(x.Body)
case *Type:
if x == nil {
return
}
lx.enum(x.Base)
for _, y := range x.Decls {
lx.enum(y)
}
return // do not record type itself, just inner decls
}
lx.pre = append(lx.pre, x)
}
func (lx *lexer) order(prog *Prog) {
lx.enumSeen = make(map[interface{}]bool)
lx.enum(prog)
sort.Sort(byStart(lx.pre))
lx.post = make([]Syntax, len(lx.pre))
copy(lx.post, lx.pre)
sort.Sort(byEnd(lx.post))
}
type byStart []Syntax
func (x byStart) Len() int { return len(x) }
func (x byStart) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byStart) Less(i, j int) bool {
pi := x[i].GetSpan()
pj := x[j].GetSpan()
// Order by start, leftmost first,
// and break ties by choosing outer before inner.
switch {
case pi.Start.Less(pj.Start):
return true
case pj.Start.Less(pi.Start):
return false
default:
return pj.End.Less(pi.End)
}
}
type byEnd []Syntax
func (x byEnd) Len() int { return len(x) }
func (x byEnd) | (i, j int) { x[i], x[j] = x[j], x[i] }
func (x byEnd) Less(i, j int) bool {
pi := x[i].GetSpan()
pj := x[j].GetSpan()
// Order by end, leftmost first,
// and break ties by choosing inner before outer.
switch {
case pi.End.Less(pj.End):
return true
case pj.End.Less(pi.End):
return false
default:
return pi.Start.Less(pj.Start)
}
}
type commentList []Comment
func (x commentList) Len() int { return len(x) }
func (x commentList) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x commentList) Less(i, j int) bool {
return x[i].Start.Less(x[j].Start)
}
// assignComments attaches comments to nearby syntax.
func (lx *lexer) assignComments() {
// Generate preorder and postorder lists.
lx.order(lx.prog)
// Split into whole-line comments and suffix comments.
var line, suffix commentList
for _, com := range lx.comments {
if com.Suffix {
suffix = append(suffix, com)
} else {
line = append(line, com)
}
}
sort.Sort(line)
sort.Sort(suffix)
currentFile := ""
// Assign line comments to syntax immediately following.
for _, x := range lx.pre {
if _, ok := x.(*Init); ok {
// Don't assign comments to an initializer; skip it and go to the declaration.
continue
}
start := x.GetSpan().Start
xcom := x.GetComments()
if start.File != currentFile {
// Starting a new file. Make sure we catch the comment block at the start of the file,
// even if they aren't close to a declaration.
currentFile = start.File
for len(line) > 0 && line[0].Start.File < currentFile {
line = line[1:]
}
header := 0
for header < len(line) && line[header].End.Less(start) && (header == 0 || line[header-1].End.Line >= line[header].Start.Line-2) {
header++
}
xcom.Before = append(xcom.Before, line[:header]...)
line = line[header:]
}
end := 0
for end < len(line) && line[end].Start.Less(start) {
end++
}
// Now line[0:end] are the comments that come before x.
first := end
if first > 0 && line[first-1].End.File == start.File && line[first-1].End.Line >= start.Line-5 {
first--
for first > 0 && line[first-1].End.File == line[first].Start.File && line[first-1].End.Line >= line[first].Start.Line-2 {
first--
}
// Now line[first:end] are the comments that come before x,
// separated from x by no more than 4 lines, and from each other by no more than one line.
xcom.Before = append(xcom.Before, line[first:end]...)
}
line = line[end:]
}
// Remaining line comments go at end of file.
lx.prog.Comments.After = append(lx.prog.Comments.After, line...)
// Assign suffix comments to syntax immediately before.
for i := len(lx.post) - 1; i >= 0; i-- {
x := lx.post[i]
// Do not assign suffix comments to call, list, end-of-list, or whole file.
// Instead assign them to the last argument, element, or rule.
/*
switch x.(type) {
case *CallExpr, *ListExpr, *End, *File:
continue
}
*/
// Do not assign suffix comments to something that starts
// on an earlier line, so that in
//
// tags = [ "a",
// "b" ], # comment
//
// we assign the comment to "b" and not to tags = [ ... ].
span := x.GetSpan()
start, end := span.Start, span.End
if start.Line != end.Line {
continue
}
xcom := x.GetComments()
for len(suffix) > 0 && end.Less(suffix[len(suffix)-1].Start) {
c := suffix[len(suffix)-1]
if c.Start.Line == end.Line {
xcom.Suffix = append(xcom.Suffix, c)
}
suffix = suffix[:len(suffix)-1]
}
}
// We assigned suffix comments in reverse.
// If multiple suffix comments were appended to the same
// expression node, they are now in reverse. Fix that.
for _, x := range lx.post {
reverseComments(x.GetComments().Suffix)
}
}
// reverseComments reverses the []Comment list.
func reverseComments(list []Comment) {
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
list[i], list[j] = list[j], list[i]
}
}
| Swap | identifier_name |
lex.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go tool yacc cc.y
package cc
import (
"fmt"
"sort"
"strings"
)
// A Syntax represents any syntax element.
type Syntax interface {
// GetSpan returns the start and end position of the syntax,
// excluding leading or trailing comments.
// The use of a Get prefix is non-standard but avoids a conflict
// with the field named Span in most implementations.
GetSpan() Span
// GetComments returns the comments attached to the syntax.
// This method would normally be named 'Comments' but that
// would interfere with embedding a type of the same name.
// The use of a Get prefix is non-standard but avoids a conflict
// with the field named Comments in most implementations.
GetComments() *Comments
}
// SyntaxInfo contains metadata about a piece of syntax.
type SyntaxInfo struct {
Span Span // location of syntax in input
Comments Comments
}
func (s *SyntaxInfo) GetSpan() Span {
return s.Span
}
func (s *SyntaxInfo) GetComments() *Comments {
return &s.Comments
}
// Comments collects the comments associated with a syntax element.
type Comments struct {
Before []Comment // whole-line comments before this syntax
Suffix []Comment // end-of-line comments after this syntax
// For top-level syntax elements only, After lists whole-line
// comments following the syntax.
After []Comment
}
type lexer struct {
// input
start int
lexInput
forcePos Pos
c2goComment bool // inside /*c2go ... */ comment
comments map[Pos]Comment
// comment assignment
pre []Syntax
post []Syntax
enumSeen map[interface{}]bool
// type checking state
scope *Scope
declCache map[string]*Decl
// output
errors []string
prog *Prog
expr *Expr
}
// AddTypeName tells the lexer that name is the name of a type.
func AddTypeName(name string) {
if _, ok := extraTypes[name]; !ok {
extraTypes[name] = &Type{Kind: TypedefType, Name: name}
}
}
func (lx *lexer) parse() {
if lx.wholeInput == "" {
lx.wholeInput = lx.input
}
if lx.comments == nil {
lx.comments = make(map[Pos]Comment)
}
yyParse(lx)
}
type lexInput struct {
wholeInput string
input string
tok string
lastsym string
file string
lineno int
column int
systemHeader bool // inside a system header file
}
func (lx *lexer) pos() Pos {
if lx.forcePos.Line != 0 {
return lx.forcePos
}
return Pos{lx.file, lx.lineno, lx.column}
}
func (lx *lexer) span() Span {
p := lx.pos()
return Span{p, p}
}
func (lx *lexer) setSpan(s Span) {
lx.forcePos = s.Start
}
func span(l1, l2 Span) Span {
if l1.Start.Line == 0 {
return l2
}
if l2.Start.Line == 0 {
return l1
}
return Span{l1.Start, l2.End}
}
func (lx *lexer) skip(i int) {
lx.lineno += strings.Count(lx.input[:i], "\n")
if nl := strings.LastIndex(lx.input[:i], "\n"); nl != -1 {
lx.column = i - nl
} else {
lx.column += i
}
lx.input = lx.input[i:]
}
func (lx *lexer) token(i int) {
lx.tok = lx.input[:i]
lx.skip(i)
}
func (lx *lexer) sym(i int) {
lx.token(i)
lx.lastsym = lx.tok
}
func (lx *lexer) comment(i int) {
var c Comment
c.Span.Start = lx.pos()
c.Text = lx.input[:i]
j := len(lx.wholeInput) - len(lx.input)
for j > 0 && (lx.wholeInput[j-1] == ' ' || lx.wholeInput[j-1] == '\t') {
j--
}
if j > 0 && lx.wholeInput[j-1] != '\n' {
c.Suffix = true
}
prefix := lx.wholeInput[j : len(lx.wholeInput)-len(lx.input)]
lines := strings.Split(c.Text, "\n")
for i, line := range lines {
if strings.HasPrefix(line, prefix) {
lines[i] = line[len(prefix):]
}
}
c.Text = strings.Join(lines, "\n")
lx.skip(i)
c.Span.End = lx.pos()
lx.comments[c.Span.Start] = c
}
func isalpha(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '_' || c >= 0x80 || '0' <= c && c <= '9'
}
func isspace(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\v' || c == '\f'
}
func (lx *lexer) setEnd(yy *yySymType) {
yy.span.End = lx.pos()
}
func (lx *lexer) Lex(yy *yySymType) (yyt int) {
//defer func() { println("tok", yy.str, yyt) }()
if lx.start != 0 {
tok := lx.start
lx.start = 0
return tok
}
*yy = yySymType{}
defer lx.setEnd(yy)
Restart:
yy.span.Start = lx.pos()
in := lx.input
if len(in) == 0 {
return tokEOF
}
c := in[0]
if lx.systemHeader && c != '#' {
// Skip the contents of system header files.
nl := strings.IndexByte(in, '\n')
if nl == -1 {
nl = len(in)
} else {
nl++
}
lx.skip(nl)
goto Restart
}
if isspace(c) {
i := 1
for i < len(in) && isspace(in[i]) {
i++
}
lx.skip(i)
goto Restart
}
i := 0
switch c {
case '#':
i++
for in[i] != '\n' {
if in[i] == '\\' && in[i+1] == '\n' && i+2 < len(in) {
i++
}
i++
}
str := in[:i]
// If this line is defining a constant (not a function-like macro), don't
// ignore it.
if f := strings.Fields(str); len(f) > 2 && f[0] == "#define" && !strings.Contains(f[1], "(") {
lx.sym(len("#define"))
return tokDefine
}
lx.skip(i + 1)
// The preprocessor inserts lines that indicate what the current line number
// and filename are. If this is one of those, read it.
var line int
var file string
if n, _ := fmt.Sscanf(str, "# %d %q", &line, &file); n == 2 {
lx.file, lx.lineno = file, line
lx.systemHeader = false
if strings.HasSuffix(file, ".h") {
for _, p := range systemHeaderPaths {
if strings.HasPrefix(file, p) {
lx.systemHeader = true
break
}
}
}
}
goto Restart
case 'L':
if in[1] != '\'' && in[1] != '"' {
break // goes to alpha case after switch
}
i = 1
fallthrough
case '"', '\'':
q := in[i]
i++ // for the quote
for ; in[i] != q; i++ {
if in[i] == '\n' {
what := "string"
if q == '\'' {
what = "character"
}
lx.Errorf("unterminated %s constant", what)
}
if in[i] == '\\' {
i++
}
}
i++ // for the quote
lx.sym(i)
yy.str = lx.tok
if q == '"' {
return tokString
} else {
return tokLitChar
}
case '.':
if in[1] < '0' || '9' < in[1] {
if in[1] == '.' && in[2] == '.' {
lx.token(3)
return tokDotDotDot
}
lx.token(1)
return int(c)
}
fallthrough
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
for '0' <= in[i] && in[i] <= '9' || in[i] == '.' || 'A' <= in[i] && in[i] <= 'Z' || 'a' <= in[i] && in[i] <= 'z' || (in[i] == '+' || in[i] == '-') && (in[i-1] == 'e' || in[i-1] == 'E') {
i++
}
lx.sym(i)
yy.str = lx.tok
return tokNumber
case '/':
switch in[1] {
case '*':
if strings.HasPrefix(in, "/*c2go") {
lx.skip(6)
lx.c2goComment = true
goto Restart
}
i := 2
for ; ; i++ {
if i+2 <= len(in) && in[i] == '*' && in[i+1] == '/' {
i += 2
break
}
if i >= len(in) {
lx.Errorf("unterminated /* comment")
return tokError
}
}
lx.comment(i)
goto Restart
case '/':
for in[i] != '\n' {
i++
}
lx.comment(i)
if len(lx.input) >= 2 && lx.input[0] == '\n' && lx.input[1] == '\n' {
lx.skip(1)
lx.comment(0)
}
goto Restart
}
fallthrough
case '~', '*', '(', ')', '[', ']', '{', '}', '?', ':', ';', ',', '%', '^', '!', '=', '<', '>', '+', '-', '&', '|':
if lx.c2goComment && in[0] == '*' && in[1] == '/' {
lx.c2goComment = false
lx.skip(2)
goto Restart
}
if c == '-' && in[1] == '>' {
lx.token(2)
return tokArrow
}
if in[1] == '=' && tokEq[c] != 0 {
lx.token(2)
return int(tokEq[c])
}
if in[1] == c && tokTok[c] != 0 {
if in[2] == '=' && tokTokEq[c] != 0 {
lx.token(3)
return int(tokTokEq[c])
}
lx.token(2)
return int(tokTok[c])
}
lx.token(1)
return int(c)
}
if isalpha(c) {
for isalpha(in[i]) {
i++
}
lx.sym(i)
switch lx.tok {
case "union":
lx.tok = "struct"
case "NULL":
lx.tok = "nil"
}
yy.str = lx.tok
if t := tokId[lx.tok]; t != 0 {
return int(t)
}
yy.decl = lx.lookupDecl(lx.tok)
if yy.decl != nil && yy.decl.Storage&Typedef != 0 {
t := yy.decl.Type
for t.Kind == TypedefType && t.Base != nil {
t = t.Base
}
yy.typ = &Type{Kind: TypedefType, Name: yy.str, Base: t, TypeDecl: yy.decl}
return tokTypeName
}
if lx.tok == "EXTERN" {
goto Restart
}
if t, ok := extraTypes[lx.tok]; ok {
yy.typ = t
return tokTypeName
}
return tokName
}
lx.Errorf("unexpected input byte %#02x (%c)", c, c)
return tokError
}
var systemHeaderPaths = []string{
"/usr/include",
"/Library/Developer",
"/Applications/Xcode.app",
}
var extraTypes = map[string]*Type{
"bool": BoolType,
"_Bool": BoolType,
"FILE": &Type{Kind: TypedefType, Name: "os.File"},
"int8_t": CharType,
"int16_t": ShortType,
"int32_t": Int32Type,
"int64_t": LonglongType,
"intmax_t": LonglongType,
"intptr_t": IntType,
"ptrdiff_t": IntType,
"size_t": UintType,
"ssize_t": IntType,
"time_t": IntType,
"u_short": UshortType,
"u_int": UintType,
"u_long": UlongType,
"uint": UintType,
"uint8_t": UcharType,
"uint16_t": UshortType,
"uint32_t": Uint32Type,
"uint64_t": UlonglongType,
"uintptr_t": UintType,
"va_list": &Type{Kind: TypedefType, Name: "va_list"},
}
func (lx *lexer) Error(s string) {
lx.Errorf("%s near %s", s, lx.lastsym)
}
func (lx *lexer) Errorf(format string, args ...interface{}) {
lx.errors = append(lx.errors, fmt.Sprintf("%s: %s", lx.span(), fmt.Sprintf(format, args...)))
}
type Pos struct {
File string
Line int
Col int
}
func (a Pos) Less(b Pos) bool {
switch {
case a.File < b.File:
return true
case a.File > b.File:
return false
case a.Line < b.Line:
return true
case a.Line > b.Line:
return false
case a.Col < b.Col:
return true
default:
return false
}
}
type Span struct {
Start Pos
End Pos
}
func (l Span) String() string {
return fmt.Sprintf("%s:%d", l.Start.File, l.Start.Line)
}
type Comment struct {
Span
Text string
Suffix bool
}
func (c Comment) GetSpan() Span {
return c.Span
}
var tokEq = [256]int32{
'*': tokMulEq,
'/': tokDivEq,
'+': tokAddEq,
'-': tokSubEq,
'%': tokModEq,
'^': tokXorEq,
'!': tokNotEq,
'=': tokEqEq,
'<': tokLtEq,
'>': tokGtEq,
'&': tokAndEq,
'|': tokOrEq,
}
var tokTok = [256]int32{
'<': tokLsh,
'>': tokRsh,
'=': tokEqEq,
'+': tokInc,
'-': tokDec,
'&': tokAndAnd,
'|': tokOrOr,
}
var tokTokEq = [256]int32{
'<': tokLshEq,
'>': tokRshEq,
}
var tokId = map[string]int32{
"auto": tokAuto,
"break": tokBreak,
"case": tokCase,
"char": tokChar,
"const": tokConst,
"continue": tokContinue,
"default": tokDefault,
"do": tokDo,
"double": tokDouble,
"else": tokElse,
"enum": tokEnum,
"extern": tokExtern,
"float": tokFloat,
"for": tokFor,
"goto": tokGoto,
"if": tokIf,
"inline": tokInline,
"int": tokInt,
"long": tokLong,
"offsetof": tokOffsetof,
"register": tokRegister,
"return": tokReturn,
"short": tokShort,
"signed": tokSigned,
"sizeof": tokSizeof,
"static": tokStatic,
"struct": tokStruct,
"switch": tokSwitch,
"typedef": tokTypedef,
"union": tokUnion,
"unsigned": tokUnsigned,
"va_arg": tokVaArg,
"void": tokVoid,
"volatile": tokVolatile,
"while": tokWhile,
}
// Comment assignment.
// We build two lists of all subexpressions, preorder and postorder.
// The preorder list is ordered by start location, with outer expressions first.
// The postorder list is ordered by end location, with outer expressions last.
// We use the preorder list to assign each whole-line comment to the syntax
// immediately following it, and we use the postorder list to assign each
// end-of-line comment to the syntax immediately preceding it.
// enum walks the expression adding it and its subexpressions to the pre list.
// The order may not reflect the order in the input.
func (lx *lexer) enum(x Syntax) {
switch x := x.(type) {
default:
panic(fmt.Errorf("order: unexpected type %T", x))
case nil:
return
case *Expr:
if x == nil {
return
}
lx.enum(x.Left)
lx.enum(x.Right)
for _, y := range x.List {
lx.enum(y)
}
case *Init:
if x == nil {
return
}
lx.enum(x.Expr)
for _, y := range x.Braced {
lx.enum(y)
}
case *Prog:
if x == nil {
return
}
for _, y := range x.Decls {
lx.enum(y)
}
case *Stmt:
if x == nil {
return
}
for _, y := range x.Labels {
lx.enum(y)
}
lx.enum(x.Pre)
lx.enum(x.Expr)
lx.enum(x.Post)
lx.enum(x.Body)
lx.enum(x.Else)
lx.enum(x.Decl)
for _, y := range x.Block {
lx.enum(y)
}
case *Label:
// ok
case *Decl:
if x == nil {
return
}
if lx.enumSeen[x] {
return
}
lx.enumSeen[x] = true
lx.enum(x.Type)
lx.enum(x.Init)
lx.enum(x.Body)
case *Type:
if x == nil {
return
}
lx.enum(x.Base)
for _, y := range x.Decls {
lx.enum(y)
}
return // do not record type itself, just inner decls
}
lx.pre = append(lx.pre, x)
}
func (lx *lexer) order(prog *Prog) {
lx.enumSeen = make(map[interface{}]bool)
lx.enum(prog)
sort.Sort(byStart(lx.pre))
lx.post = make([]Syntax, len(lx.pre))
copy(lx.post, lx.pre)
sort.Sort(byEnd(lx.post))
}
type byStart []Syntax
func (x byStart) Len() int { return len(x) }
func (x byStart) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byStart) Less(i, j int) bool {
pi := x[i].GetSpan()
pj := x[j].GetSpan()
// Order by start, leftmost first,
// and break ties by choosing outer before inner.
switch {
case pi.Start.Less(pj.Start):
return true
case pj.Start.Less(pi.Start):
return false
default:
return pj.End.Less(pi.End)
}
}
type byEnd []Syntax
func (x byEnd) Len() int { return len(x) }
func (x byEnd) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byEnd) Less(i, j int) bool {
pi := x[i].GetSpan()
pj := x[j].GetSpan()
// Order by end, leftmost first,
// and break ties by choosing inner before outer.
switch {
case pi.End.Less(pj.End):
return true
case pj.End.Less(pi.End):
return false
default:
return pi.Start.Less(pj.Start)
}
}
type commentList []Comment
func (x commentList) Len() int { return len(x) }
func (x commentList) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x commentList) Less(i, j int) bool {
return x[i].Start.Less(x[j].Start)
}
// assignComments attaches comments to nearby syntax.
func (lx *lexer) assignComments() {
// Generate preorder and postorder lists.
lx.order(lx.prog)
// Split into whole-line comments and suffix comments.
var line, suffix commentList
for _, com := range lx.comments {
if com.Suffix {
suffix = append(suffix, com)
} else {
line = append(line, com)
}
}
sort.Sort(line)
sort.Sort(suffix)
currentFile := ""
// Assign line comments to syntax immediately following.
for _, x := range lx.pre {
if _, ok := x.(*Init); ok {
// Don't assign comments to an initializer; skip it and go to the declaration.
continue
}
start := x.GetSpan().Start
xcom := x.GetComments()
if start.File != currentFile {
// Starting a new file. Make sure we catch the comment block at the start of the file,
// even if they aren't close to a declaration.
currentFile = start.File
for len(line) > 0 && line[0].Start.File < currentFile {
line = line[1:]
}
header := 0
for header < len(line) && line[header].End.Less(start) && (header == 0 || line[header-1].End.Line >= line[header].Start.Line-2) {
header++
}
xcom.Before = append(xcom.Before, line[:header]...)
line = line[header:]
}
end := 0
for end < len(line) && line[end].Start.Less(start) {
end++
}
// Now line[0:end] are the comments that come before x.
first := end
if first > 0 && line[first-1].End.File == start.File && line[first-1].End.Line >= start.Line-5 {
first--
for first > 0 && line[first-1].End.File == line[first].Start.File && line[first-1].End.Line >= line[first].Start.Line-2 {
first--
}
// Now line[first:end] are the comments that come before x,
// separated from x by no more than 4 lines, and from each other by no more than one line.
xcom.Before = append(xcom.Before, line[first:end]...)
}
line = line[end:]
}
// Remaining line comments go at end of file.
lx.prog.Comments.After = append(lx.prog.Comments.After, line...)
// Assign suffix comments to syntax immediately before.
for i := len(lx.post) - 1; i >= 0; i-- {
x := lx.post[i]
// Do not assign suffix comments to call, list, end-of-list, or whole file.
// Instead assign them to the last argument, element, or rule.
/*
switch x.(type) {
case *CallExpr, *ListExpr, *End, *File:
continue
}
*/
// Do not assign suffix comments to something that starts
// on an earlier line, so that in
// | // we assign the comment to "b" and not to tags = [ ... ].
span := x.GetSpan()
start, end := span.Start, span.End
if start.Line != end.Line {
continue
}
xcom := x.GetComments()
for len(suffix) > 0 && end.Less(suffix[len(suffix)-1].Start) {
c := suffix[len(suffix)-1]
if c.Start.Line == end.Line {
xcom.Suffix = append(xcom.Suffix, c)
}
suffix = suffix[:len(suffix)-1]
}
}
// We assigned suffix comments in reverse.
// If multiple suffix comments were appended to the same
// expression node, they are now in reverse. Fix that.
for _, x := range lx.post {
reverseComments(x.GetComments().Suffix)
}
}
// reverseComments reverses the []Comment list.
func reverseComments(list []Comment) {
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
list[i], list[j] = list[j], list[i]
}
} | // tags = [ "a",
// "b" ], # comment
// | random_line_split |
lex.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go tool yacc cc.y
package cc
import (
"fmt"
"sort"
"strings"
)
// A Syntax represents any syntax element.
type Syntax interface {
// GetSpan returns the start and end position of the syntax,
// excluding leading or trailing comments.
// The use of a Get prefix is non-standard but avoids a conflict
// with the field named Span in most implementations.
GetSpan() Span
// GetComments returns the comments attached to the syntax.
// This method would normally be named 'Comments' but that
// would interfere with embedding a type of the same name.
// The use of a Get prefix is non-standard but avoids a conflict
// with the field named Comments in most implementations.
GetComments() *Comments
}
// SyntaxInfo contains metadata about a piece of syntax.
type SyntaxInfo struct {
Span Span // location of syntax in input
Comments Comments
}
func (s *SyntaxInfo) GetSpan() Span {
return s.Span
}
func (s *SyntaxInfo) GetComments() *Comments {
return &s.Comments
}
// Comments collects the comments associated with a syntax element.
type Comments struct {
Before []Comment // whole-line comments before this syntax
Suffix []Comment // end-of-line comments after this syntax
// For top-level syntax elements only, After lists whole-line
// comments following the syntax.
After []Comment
}
type lexer struct {
// input
start int
lexInput
forcePos Pos
c2goComment bool // inside /*c2go ... */ comment
comments map[Pos]Comment
// comment assignment
pre []Syntax
post []Syntax
enumSeen map[interface{}]bool
// type checking state
scope *Scope
declCache map[string]*Decl
// output
errors []string
prog *Prog
expr *Expr
}
// AddTypeName tells the lexer that name is the name of a type.
func AddTypeName(name string) {
if _, ok := extraTypes[name]; !ok {
extraTypes[name] = &Type{Kind: TypedefType, Name: name}
}
}
func (lx *lexer) parse() |
type lexInput struct {
wholeInput string
input string
tok string
lastsym string
file string
lineno int
column int
systemHeader bool // inside a system header file
}
func (lx *lexer) pos() Pos {
if lx.forcePos.Line != 0 {
return lx.forcePos
}
return Pos{lx.file, lx.lineno, lx.column}
}
func (lx *lexer) span() Span {
p := lx.pos()
return Span{p, p}
}
func (lx *lexer) setSpan(s Span) {
lx.forcePos = s.Start
}
func span(l1, l2 Span) Span {
if l1.Start.Line == 0 {
return l2
}
if l2.Start.Line == 0 {
return l1
}
return Span{l1.Start, l2.End}
}
func (lx *lexer) skip(i int) {
lx.lineno += strings.Count(lx.input[:i], "\n")
if nl := strings.LastIndex(lx.input[:i], "\n"); nl != -1 {
lx.column = i - nl
} else {
lx.column += i
}
lx.input = lx.input[i:]
}
func (lx *lexer) token(i int) {
lx.tok = lx.input[:i]
lx.skip(i)
}
func (lx *lexer) sym(i int) {
lx.token(i)
lx.lastsym = lx.tok
}
func (lx *lexer) comment(i int) {
var c Comment
c.Span.Start = lx.pos()
c.Text = lx.input[:i]
j := len(lx.wholeInput) - len(lx.input)
for j > 0 && (lx.wholeInput[j-1] == ' ' || lx.wholeInput[j-1] == '\t') {
j--
}
if j > 0 && lx.wholeInput[j-1] != '\n' {
c.Suffix = true
}
prefix := lx.wholeInput[j : len(lx.wholeInput)-len(lx.input)]
lines := strings.Split(c.Text, "\n")
for i, line := range lines {
if strings.HasPrefix(line, prefix) {
lines[i] = line[len(prefix):]
}
}
c.Text = strings.Join(lines, "\n")
lx.skip(i)
c.Span.End = lx.pos()
lx.comments[c.Span.Start] = c
}
func isalpha(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '_' || c >= 0x80 || '0' <= c && c <= '9'
}
func isspace(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\v' || c == '\f'
}
func (lx *lexer) setEnd(yy *yySymType) {
yy.span.End = lx.pos()
}
func (lx *lexer) Lex(yy *yySymType) (yyt int) {
//defer func() { println("tok", yy.str, yyt) }()
if lx.start != 0 {
tok := lx.start
lx.start = 0
return tok
}
*yy = yySymType{}
defer lx.setEnd(yy)
Restart:
yy.span.Start = lx.pos()
in := lx.input
if len(in) == 0 {
return tokEOF
}
c := in[0]
if lx.systemHeader && c != '#' {
// Skip the contents of system header files.
nl := strings.IndexByte(in, '\n')
if nl == -1 {
nl = len(in)
} else {
nl++
}
lx.skip(nl)
goto Restart
}
if isspace(c) {
i := 1
for i < len(in) && isspace(in[i]) {
i++
}
lx.skip(i)
goto Restart
}
i := 0
switch c {
case '#':
i++
for in[i] != '\n' {
if in[i] == '\\' && in[i+1] == '\n' && i+2 < len(in) {
i++
}
i++
}
str := in[:i]
// If this line is defining a constant (not a function-like macro), don't
// ignore it.
if f := strings.Fields(str); len(f) > 2 && f[0] == "#define" && !strings.Contains(f[1], "(") {
lx.sym(len("#define"))
return tokDefine
}
lx.skip(i + 1)
// The preprocessor inserts lines that indicate what the current line number
// and filename are. If this is one of those, read it.
var line int
var file string
if n, _ := fmt.Sscanf(str, "# %d %q", &line, &file); n == 2 {
lx.file, lx.lineno = file, line
lx.systemHeader = false
if strings.HasSuffix(file, ".h") {
for _, p := range systemHeaderPaths {
if strings.HasPrefix(file, p) {
lx.systemHeader = true
break
}
}
}
}
goto Restart
case 'L':
if in[1] != '\'' && in[1] != '"' {
break // goes to alpha case after switch
}
i = 1
fallthrough
case '"', '\'':
q := in[i]
i++ // for the quote
for ; in[i] != q; i++ {
if in[i] == '\n' {
what := "string"
if q == '\'' {
what = "character"
}
lx.Errorf("unterminated %s constant", what)
}
if in[i] == '\\' {
i++
}
}
i++ // for the quote
lx.sym(i)
yy.str = lx.tok
if q == '"' {
return tokString
} else {
return tokLitChar
}
case '.':
if in[1] < '0' || '9' < in[1] {
if in[1] == '.' && in[2] == '.' {
lx.token(3)
return tokDotDotDot
}
lx.token(1)
return int(c)
}
fallthrough
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
for '0' <= in[i] && in[i] <= '9' || in[i] == '.' || 'A' <= in[i] && in[i] <= 'Z' || 'a' <= in[i] && in[i] <= 'z' || (in[i] == '+' || in[i] == '-') && (in[i-1] == 'e' || in[i-1] == 'E') {
i++
}
lx.sym(i)
yy.str = lx.tok
return tokNumber
case '/':
switch in[1] {
case '*':
if strings.HasPrefix(in, "/*c2go") {
lx.skip(6)
lx.c2goComment = true
goto Restart
}
i := 2
for ; ; i++ {
if i+2 <= len(in) && in[i] == '*' && in[i+1] == '/' {
i += 2
break
}
if i >= len(in) {
lx.Errorf("unterminated /* comment")
return tokError
}
}
lx.comment(i)
goto Restart
case '/':
for in[i] != '\n' {
i++
}
lx.comment(i)
if len(lx.input) >= 2 && lx.input[0] == '\n' && lx.input[1] == '\n' {
lx.skip(1)
lx.comment(0)
}
goto Restart
}
fallthrough
case '~', '*', '(', ')', '[', ']', '{', '}', '?', ':', ';', ',', '%', '^', '!', '=', '<', '>', '+', '-', '&', '|':
if lx.c2goComment && in[0] == '*' && in[1] == '/' {
lx.c2goComment = false
lx.skip(2)
goto Restart
}
if c == '-' && in[1] == '>' {
lx.token(2)
return tokArrow
}
if in[1] == '=' && tokEq[c] != 0 {
lx.token(2)
return int(tokEq[c])
}
if in[1] == c && tokTok[c] != 0 {
if in[2] == '=' && tokTokEq[c] != 0 {
lx.token(3)
return int(tokTokEq[c])
}
lx.token(2)
return int(tokTok[c])
}
lx.token(1)
return int(c)
}
if isalpha(c) {
for isalpha(in[i]) {
i++
}
lx.sym(i)
switch lx.tok {
case "union":
lx.tok = "struct"
case "NULL":
lx.tok = "nil"
}
yy.str = lx.tok
if t := tokId[lx.tok]; t != 0 {
return int(t)
}
yy.decl = lx.lookupDecl(lx.tok)
if yy.decl != nil && yy.decl.Storage&Typedef != 0 {
t := yy.decl.Type
for t.Kind == TypedefType && t.Base != nil {
t = t.Base
}
yy.typ = &Type{Kind: TypedefType, Name: yy.str, Base: t, TypeDecl: yy.decl}
return tokTypeName
}
if lx.tok == "EXTERN" {
goto Restart
}
if t, ok := extraTypes[lx.tok]; ok {
yy.typ = t
return tokTypeName
}
return tokName
}
lx.Errorf("unexpected input byte %#02x (%c)", c, c)
return tokError
}
var systemHeaderPaths = []string{
"/usr/include",
"/Library/Developer",
"/Applications/Xcode.app",
}
var extraTypes = map[string]*Type{
"bool": BoolType,
"_Bool": BoolType,
"FILE": &Type{Kind: TypedefType, Name: "os.File"},
"int8_t": CharType,
"int16_t": ShortType,
"int32_t": Int32Type,
"int64_t": LonglongType,
"intmax_t": LonglongType,
"intptr_t": IntType,
"ptrdiff_t": IntType,
"size_t": UintType,
"ssize_t": IntType,
"time_t": IntType,
"u_short": UshortType,
"u_int": UintType,
"u_long": UlongType,
"uint": UintType,
"uint8_t": UcharType,
"uint16_t": UshortType,
"uint32_t": Uint32Type,
"uint64_t": UlonglongType,
"uintptr_t": UintType,
"va_list": &Type{Kind: TypedefType, Name: "va_list"},
}
func (lx *lexer) Error(s string) {
lx.Errorf("%s near %s", s, lx.lastsym)
}
func (lx *lexer) Errorf(format string, args ...interface{}) {
lx.errors = append(lx.errors, fmt.Sprintf("%s: %s", lx.span(), fmt.Sprintf(format, args...)))
}
type Pos struct {
File string
Line int
Col int
}
func (a Pos) Less(b Pos) bool {
switch {
case a.File < b.File:
return true
case a.File > b.File:
return false
case a.Line < b.Line:
return true
case a.Line > b.Line:
return false
case a.Col < b.Col:
return true
default:
return false
}
}
type Span struct {
Start Pos
End Pos
}
func (l Span) String() string {
return fmt.Sprintf("%s:%d", l.Start.File, l.Start.Line)
}
type Comment struct {
Span
Text string
Suffix bool
}
func (c Comment) GetSpan() Span {
return c.Span
}
var tokEq = [256]int32{
'*': tokMulEq,
'/': tokDivEq,
'+': tokAddEq,
'-': tokSubEq,
'%': tokModEq,
'^': tokXorEq,
'!': tokNotEq,
'=': tokEqEq,
'<': tokLtEq,
'>': tokGtEq,
'&': tokAndEq,
'|': tokOrEq,
}
var tokTok = [256]int32{
'<': tokLsh,
'>': tokRsh,
'=': tokEqEq,
'+': tokInc,
'-': tokDec,
'&': tokAndAnd,
'|': tokOrOr,
}
var tokTokEq = [256]int32{
'<': tokLshEq,
'>': tokRshEq,
}
var tokId = map[string]int32{
"auto": tokAuto,
"break": tokBreak,
"case": tokCase,
"char": tokChar,
"const": tokConst,
"continue": tokContinue,
"default": tokDefault,
"do": tokDo,
"double": tokDouble,
"else": tokElse,
"enum": tokEnum,
"extern": tokExtern,
"float": tokFloat,
"for": tokFor,
"goto": tokGoto,
"if": tokIf,
"inline": tokInline,
"int": tokInt,
"long": tokLong,
"offsetof": tokOffsetof,
"register": tokRegister,
"return": tokReturn,
"short": tokShort,
"signed": tokSigned,
"sizeof": tokSizeof,
"static": tokStatic,
"struct": tokStruct,
"switch": tokSwitch,
"typedef": tokTypedef,
"union": tokUnion,
"unsigned": tokUnsigned,
"va_arg": tokVaArg,
"void": tokVoid,
"volatile": tokVolatile,
"while": tokWhile,
}
// Comment assignment.
// We build two lists of all subexpressions, preorder and postorder.
// The preorder list is ordered by start location, with outer expressions first.
// The postorder list is ordered by end location, with outer expressions last.
// We use the preorder list to assign each whole-line comment to the syntax
// immediately following it, and we use the postorder list to assign each
// end-of-line comment to the syntax immediately preceding it.
// enum walks the expression adding it and its subexpressions to the pre list.
// The order may not reflect the order in the input.
func (lx *lexer) enum(x Syntax) {
switch x := x.(type) {
default:
panic(fmt.Errorf("order: unexpected type %T", x))
case nil:
return
case *Expr:
if x == nil {
return
}
lx.enum(x.Left)
lx.enum(x.Right)
for _, y := range x.List {
lx.enum(y)
}
case *Init:
if x == nil {
return
}
lx.enum(x.Expr)
for _, y := range x.Braced {
lx.enum(y)
}
case *Prog:
if x == nil {
return
}
for _, y := range x.Decls {
lx.enum(y)
}
case *Stmt:
if x == nil {
return
}
for _, y := range x.Labels {
lx.enum(y)
}
lx.enum(x.Pre)
lx.enum(x.Expr)
lx.enum(x.Post)
lx.enum(x.Body)
lx.enum(x.Else)
lx.enum(x.Decl)
for _, y := range x.Block {
lx.enum(y)
}
case *Label:
// ok
case *Decl:
if x == nil {
return
}
if lx.enumSeen[x] {
return
}
lx.enumSeen[x] = true
lx.enum(x.Type)
lx.enum(x.Init)
lx.enum(x.Body)
case *Type:
if x == nil {
return
}
lx.enum(x.Base)
for _, y := range x.Decls {
lx.enum(y)
}
return // do not record type itself, just inner decls
}
lx.pre = append(lx.pre, x)
}
func (lx *lexer) order(prog *Prog) {
lx.enumSeen = make(map[interface{}]bool)
lx.enum(prog)
sort.Sort(byStart(lx.pre))
lx.post = make([]Syntax, len(lx.pre))
copy(lx.post, lx.pre)
sort.Sort(byEnd(lx.post))
}
type byStart []Syntax
func (x byStart) Len() int { return len(x) }
func (x byStart) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byStart) Less(i, j int) bool {
pi := x[i].GetSpan()
pj := x[j].GetSpan()
// Order by start, leftmost first,
// and break ties by choosing outer before inner.
switch {
case pi.Start.Less(pj.Start):
return true
case pj.Start.Less(pi.Start):
return false
default:
return pj.End.Less(pi.End)
}
}
type byEnd []Syntax
func (x byEnd) Len() int { return len(x) }
func (x byEnd) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byEnd) Less(i, j int) bool {
pi := x[i].GetSpan()
pj := x[j].GetSpan()
// Order by end, leftmost first,
// and break ties by choosing inner before outer.
switch {
case pi.End.Less(pj.End):
return true
case pj.End.Less(pi.End):
return false
default:
return pi.Start.Less(pj.Start)
}
}
type commentList []Comment
func (x commentList) Len() int { return len(x) }
func (x commentList) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x commentList) Less(i, j int) bool {
return x[i].Start.Less(x[j].Start)
}
// assignComments attaches comments to nearby syntax.
func (lx *lexer) assignComments() {
// Generate preorder and postorder lists.
lx.order(lx.prog)
// Split into whole-line comments and suffix comments.
var line, suffix commentList
for _, com := range lx.comments {
if com.Suffix {
suffix = append(suffix, com)
} else {
line = append(line, com)
}
}
sort.Sort(line)
sort.Sort(suffix)
currentFile := ""
// Assign line comments to syntax immediately following.
for _, x := range lx.pre {
if _, ok := x.(*Init); ok {
// Don't assign comments to an initializer; skip it and go to the declaration.
continue
}
start := x.GetSpan().Start
xcom := x.GetComments()
if start.File != currentFile {
// Starting a new file. Make sure we catch the comment block at the start of the file,
// even if they aren't close to a declaration.
currentFile = start.File
for len(line) > 0 && line[0].Start.File < currentFile {
line = line[1:]
}
header := 0
for header < len(line) && line[header].End.Less(start) && (header == 0 || line[header-1].End.Line >= line[header].Start.Line-2) {
header++
}
xcom.Before = append(xcom.Before, line[:header]...)
line = line[header:]
}
end := 0
for end < len(line) && line[end].Start.Less(start) {
end++
}
// Now line[0:end] are the comments that come before x.
first := end
if first > 0 && line[first-1].End.File == start.File && line[first-1].End.Line >= start.Line-5 {
first--
for first > 0 && line[first-1].End.File == line[first].Start.File && line[first-1].End.Line >= line[first].Start.Line-2 {
first--
}
// Now line[first:end] are the comments that come before x,
// separated from x by no more than 4 lines, and from each other by no more than one line.
xcom.Before = append(xcom.Before, line[first:end]...)
}
line = line[end:]
}
// Remaining line comments go at end of file.
lx.prog.Comments.After = append(lx.prog.Comments.After, line...)
// Assign suffix comments to syntax immediately before.
for i := len(lx.post) - 1; i >= 0; i-- {
x := lx.post[i]
// Do not assign suffix comments to call, list, end-of-list, or whole file.
// Instead assign them to the last argument, element, or rule.
/*
switch x.(type) {
case *CallExpr, *ListExpr, *End, *File:
continue
}
*/
// Do not assign suffix comments to something that starts
// on an earlier line, so that in
//
// tags = [ "a",
// "b" ], # comment
//
// we assign the comment to "b" and not to tags = [ ... ].
span := x.GetSpan()
start, end := span.Start, span.End
if start.Line != end.Line {
continue
}
xcom := x.GetComments()
for len(suffix) > 0 && end.Less(suffix[len(suffix)-1].Start) {
c := suffix[len(suffix)-1]
if c.Start.Line == end.Line {
xcom.Suffix = append(xcom.Suffix, c)
}
suffix = suffix[:len(suffix)-1]
}
}
// We assigned suffix comments in reverse.
// If multiple suffix comments were appended to the same
// expression node, they are now in reverse. Fix that.
for _, x := range lx.post {
reverseComments(x.GetComments().Suffix)
}
}
// reverseComments reverses the []Comment list.
func reverseComments(list []Comment) {
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
list[i], list[j] = list[j], list[i]
}
}
| {
if lx.wholeInput == "" {
lx.wholeInput = lx.input
}
if lx.comments == nil {
lx.comments = make(map[Pos]Comment)
}
yyParse(lx)
} | identifier_body |
lex.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go tool yacc cc.y
package cc
import (
"fmt"
"sort"
"strings"
)
// A Syntax represents any syntax element.
type Syntax interface {
// GetSpan returns the start and end position of the syntax,
// excluding leading or trailing comments.
// The use of a Get prefix is non-standard but avoids a conflict
// with the field named Span in most implementations.
GetSpan() Span
// GetComments returns the comments attached to the syntax.
// This method would normally be named 'Comments' but that
// would interfere with embedding a type of the same name.
// The use of a Get prefix is non-standard but avoids a conflict
// with the field named Comments in most implementations.
GetComments() *Comments
}
// SyntaxInfo contains metadata about a piece of syntax.
type SyntaxInfo struct {
Span Span // location of syntax in input
Comments Comments
}
func (s *SyntaxInfo) GetSpan() Span {
return s.Span
}
func (s *SyntaxInfo) GetComments() *Comments {
return &s.Comments
}
// Comments collects the comments associated with a syntax element.
type Comments struct {
Before []Comment // whole-line comments before this syntax
Suffix []Comment // end-of-line comments after this syntax
// For top-level syntax elements only, After lists whole-line
// comments following the syntax.
After []Comment
}
type lexer struct {
// input
start int
lexInput
forcePos Pos
c2goComment bool // inside /*c2go ... */ comment
comments map[Pos]Comment
// comment assignment
pre []Syntax
post []Syntax
enumSeen map[interface{}]bool
// type checking state
scope *Scope
declCache map[string]*Decl
// output
errors []string
prog *Prog
expr *Expr
}
// AddTypeName tells the lexer that name is the name of a type.
func AddTypeName(name string) {
if _, ok := extraTypes[name]; !ok {
extraTypes[name] = &Type{Kind: TypedefType, Name: name}
}
}
func (lx *lexer) parse() {
if lx.wholeInput == "" {
lx.wholeInput = lx.input
}
if lx.comments == nil {
lx.comments = make(map[Pos]Comment)
}
yyParse(lx)
}
type lexInput struct {
wholeInput string
input string
tok string
lastsym string
file string
lineno int
column int
systemHeader bool // inside a system header file
}
func (lx *lexer) pos() Pos {
if lx.forcePos.Line != 0 {
return lx.forcePos
}
return Pos{lx.file, lx.lineno, lx.column}
}
func (lx *lexer) span() Span {
p := lx.pos()
return Span{p, p}
}
func (lx *lexer) setSpan(s Span) {
lx.forcePos = s.Start
}
func span(l1, l2 Span) Span {
if l1.Start.Line == 0 {
return l2
}
if l2.Start.Line == 0 {
return l1
}
return Span{l1.Start, l2.End}
}
func (lx *lexer) skip(i int) {
lx.lineno += strings.Count(lx.input[:i], "\n")
if nl := strings.LastIndex(lx.input[:i], "\n"); nl != -1 {
lx.column = i - nl
} else {
lx.column += i
}
lx.input = lx.input[i:]
}
func (lx *lexer) token(i int) {
lx.tok = lx.input[:i]
lx.skip(i)
}
func (lx *lexer) sym(i int) {
lx.token(i)
lx.lastsym = lx.tok
}
func (lx *lexer) comment(i int) {
var c Comment
c.Span.Start = lx.pos()
c.Text = lx.input[:i]
j := len(lx.wholeInput) - len(lx.input)
for j > 0 && (lx.wholeInput[j-1] == ' ' || lx.wholeInput[j-1] == '\t') {
j--
}
if j > 0 && lx.wholeInput[j-1] != '\n' {
c.Suffix = true
}
prefix := lx.wholeInput[j : len(lx.wholeInput)-len(lx.input)]
lines := strings.Split(c.Text, "\n")
for i, line := range lines {
if strings.HasPrefix(line, prefix) {
lines[i] = line[len(prefix):]
}
}
c.Text = strings.Join(lines, "\n")
lx.skip(i)
c.Span.End = lx.pos()
lx.comments[c.Span.Start] = c
}
func isalpha(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '_' || c >= 0x80 || '0' <= c && c <= '9'
}
func isspace(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\v' || c == '\f'
}
func (lx *lexer) setEnd(yy *yySymType) {
yy.span.End = lx.pos()
}
func (lx *lexer) Lex(yy *yySymType) (yyt int) {
//defer func() { println("tok", yy.str, yyt) }()
if lx.start != 0 {
tok := lx.start
lx.start = 0
return tok
}
*yy = yySymType{}
defer lx.setEnd(yy)
Restart:
yy.span.Start = lx.pos()
in := lx.input
if len(in) == 0 {
return tokEOF
}
c := in[0]
if lx.systemHeader && c != '#' {
// Skip the contents of system header files.
nl := strings.IndexByte(in, '\n')
if nl == -1 {
nl = len(in)
} else {
nl++
}
lx.skip(nl)
goto Restart
}
if isspace(c) {
i := 1
for i < len(in) && isspace(in[i]) {
i++
}
lx.skip(i)
goto Restart
}
i := 0
switch c {
case '#':
i++
for in[i] != '\n' {
if in[i] == '\\' && in[i+1] == '\n' && i+2 < len(in) {
i++
}
i++
}
str := in[:i]
// If this line is defining a constant (not a function-like macro), don't
// ignore it.
if f := strings.Fields(str); len(f) > 2 && f[0] == "#define" && !strings.Contains(f[1], "(") {
lx.sym(len("#define"))
return tokDefine
}
lx.skip(i + 1)
// The preprocessor inserts lines that indicate what the current line number
// and filename are. If this is one of those, read it.
var line int
var file string
if n, _ := fmt.Sscanf(str, "# %d %q", &line, &file); n == 2 {
lx.file, lx.lineno = file, line
lx.systemHeader = false
if strings.HasSuffix(file, ".h") {
for _, p := range systemHeaderPaths {
if strings.HasPrefix(file, p) {
lx.systemHeader = true
break
}
}
}
}
goto Restart
case 'L':
if in[1] != '\'' && in[1] != '"' {
break // goes to alpha case after switch
}
i = 1
fallthrough
case '"', '\'':
q := in[i]
i++ // for the quote
for ; in[i] != q; i++ {
if in[i] == '\n' {
what := "string"
if q == '\'' {
what = "character"
}
lx.Errorf("unterminated %s constant", what)
}
if in[i] == '\\' {
i++
}
}
i++ // for the quote
lx.sym(i)
yy.str = lx.tok
if q == '"' {
return tokString
} else {
return tokLitChar
}
case '.':
if in[1] < '0' || '9' < in[1] {
if in[1] == '.' && in[2] == '.' {
lx.token(3)
return tokDotDotDot
}
lx.token(1)
return int(c)
}
fallthrough
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
for '0' <= in[i] && in[i] <= '9' || in[i] == '.' || 'A' <= in[i] && in[i] <= 'Z' || 'a' <= in[i] && in[i] <= 'z' || (in[i] == '+' || in[i] == '-') && (in[i-1] == 'e' || in[i-1] == 'E') {
i++
}
lx.sym(i)
yy.str = lx.tok
return tokNumber
case '/':
switch in[1] {
case '*':
if strings.HasPrefix(in, "/*c2go") {
lx.skip(6)
lx.c2goComment = true
goto Restart
}
i := 2
for ; ; i++ {
if i+2 <= len(in) && in[i] == '*' && in[i+1] == '/' {
i += 2
break
}
if i >= len(in) {
lx.Errorf("unterminated /* comment")
return tokError
}
}
lx.comment(i)
goto Restart
case '/':
for in[i] != '\n' {
i++
}
lx.comment(i)
if len(lx.input) >= 2 && lx.input[0] == '\n' && lx.input[1] == '\n' {
lx.skip(1)
lx.comment(0)
}
goto Restart
}
fallthrough
case '~', '*', '(', ')', '[', ']', '{', '}', '?', ':', ';', ',', '%', '^', '!', '=', '<', '>', '+', '-', '&', '|':
if lx.c2goComment && in[0] == '*' && in[1] == '/' {
lx.c2goComment = false
lx.skip(2)
goto Restart
}
if c == '-' && in[1] == '>' {
lx.token(2)
return tokArrow
}
if in[1] == '=' && tokEq[c] != 0 {
lx.token(2)
return int(tokEq[c])
}
if in[1] == c && tokTok[c] != 0 {
if in[2] == '=' && tokTokEq[c] != 0 {
lx.token(3)
return int(tokTokEq[c])
}
lx.token(2)
return int(tokTok[c])
}
lx.token(1)
return int(c)
}
if isalpha(c) {
for isalpha(in[i]) {
i++
}
lx.sym(i)
switch lx.tok {
case "union":
lx.tok = "struct"
case "NULL":
lx.tok = "nil"
}
yy.str = lx.tok
if t := tokId[lx.tok]; t != 0 {
return int(t)
}
yy.decl = lx.lookupDecl(lx.tok)
if yy.decl != nil && yy.decl.Storage&Typedef != 0 {
t := yy.decl.Type
for t.Kind == TypedefType && t.Base != nil {
t = t.Base
}
yy.typ = &Type{Kind: TypedefType, Name: yy.str, Base: t, TypeDecl: yy.decl}
return tokTypeName
}
if lx.tok == "EXTERN" {
goto Restart
}
if t, ok := extraTypes[lx.tok]; ok {
yy.typ = t
return tokTypeName
}
return tokName
}
lx.Errorf("unexpected input byte %#02x (%c)", c, c)
return tokError
}
var systemHeaderPaths = []string{
"/usr/include",
"/Library/Developer",
"/Applications/Xcode.app",
}
var extraTypes = map[string]*Type{
"bool": BoolType,
"_Bool": BoolType,
"FILE": &Type{Kind: TypedefType, Name: "os.File"},
"int8_t": CharType,
"int16_t": ShortType,
"int32_t": Int32Type,
"int64_t": LonglongType,
"intmax_t": LonglongType,
"intptr_t": IntType,
"ptrdiff_t": IntType,
"size_t": UintType,
"ssize_t": IntType,
"time_t": IntType,
"u_short": UshortType,
"u_int": UintType,
"u_long": UlongType,
"uint": UintType,
"uint8_t": UcharType,
"uint16_t": UshortType,
"uint32_t": Uint32Type,
"uint64_t": UlonglongType,
"uintptr_t": UintType,
"va_list": &Type{Kind: TypedefType, Name: "va_list"},
}
func (lx *lexer) Error(s string) {
lx.Errorf("%s near %s", s, lx.lastsym)
}
func (lx *lexer) Errorf(format string, args ...interface{}) {
lx.errors = append(lx.errors, fmt.Sprintf("%s: %s", lx.span(), fmt.Sprintf(format, args...)))
}
type Pos struct {
File string
Line int
Col int
}
func (a Pos) Less(b Pos) bool {
switch {
case a.File < b.File:
return true
case a.File > b.File:
return false
case a.Line < b.Line:
return true
case a.Line > b.Line:
return false
case a.Col < b.Col:
return true
default:
return false
}
}
type Span struct {
Start Pos
End Pos
}
func (l Span) String() string {
return fmt.Sprintf("%s:%d", l.Start.File, l.Start.Line)
}
type Comment struct {
Span
Text string
Suffix bool
}
func (c Comment) GetSpan() Span {
return c.Span
}
var tokEq = [256]int32{
'*': tokMulEq,
'/': tokDivEq,
'+': tokAddEq,
'-': tokSubEq,
'%': tokModEq,
'^': tokXorEq,
'!': tokNotEq,
'=': tokEqEq,
'<': tokLtEq,
'>': tokGtEq,
'&': tokAndEq,
'|': tokOrEq,
}
var tokTok = [256]int32{
'<': tokLsh,
'>': tokRsh,
'=': tokEqEq,
'+': tokInc,
'-': tokDec,
'&': tokAndAnd,
'|': tokOrOr,
}
var tokTokEq = [256]int32{
'<': tokLshEq,
'>': tokRshEq,
}
var tokId = map[string]int32{
"auto": tokAuto,
"break": tokBreak,
"case": tokCase,
"char": tokChar,
"const": tokConst,
"continue": tokContinue,
"default": tokDefault,
"do": tokDo,
"double": tokDouble,
"else": tokElse,
"enum": tokEnum,
"extern": tokExtern,
"float": tokFloat,
"for": tokFor,
"goto": tokGoto,
"if": tokIf,
"inline": tokInline,
"int": tokInt,
"long": tokLong,
"offsetof": tokOffsetof,
"register": tokRegister,
"return": tokReturn,
"short": tokShort,
"signed": tokSigned,
"sizeof": tokSizeof,
"static": tokStatic,
"struct": tokStruct,
"switch": tokSwitch,
"typedef": tokTypedef,
"union": tokUnion,
"unsigned": tokUnsigned,
"va_arg": tokVaArg,
"void": tokVoid,
"volatile": tokVolatile,
"while": tokWhile,
}
// Comment assignment.
// We build two lists of all subexpressions, preorder and postorder.
// The preorder list is ordered by start location, with outer expressions first.
// The postorder list is ordered by end location, with outer expressions last.
// We use the preorder list to assign each whole-line comment to the syntax
// immediately following it, and we use the postorder list to assign each
// end-of-line comment to the syntax immediately preceding it.
// enum walks the expression adding it and its subexpressions to the pre list.
// The order may not reflect the order in the input.
func (lx *lexer) enum(x Syntax) {
switch x := x.(type) {
default:
panic(fmt.Errorf("order: unexpected type %T", x))
case nil:
return
case *Expr:
if x == nil {
return
}
lx.enum(x.Left)
lx.enum(x.Right)
for _, y := range x.List |
case *Init:
if x == nil {
return
}
lx.enum(x.Expr)
for _, y := range x.Braced {
lx.enum(y)
}
case *Prog:
if x == nil {
return
}
for _, y := range x.Decls {
lx.enum(y)
}
case *Stmt:
if x == nil {
return
}
for _, y := range x.Labels {
lx.enum(y)
}
lx.enum(x.Pre)
lx.enum(x.Expr)
lx.enum(x.Post)
lx.enum(x.Body)
lx.enum(x.Else)
lx.enum(x.Decl)
for _, y := range x.Block {
lx.enum(y)
}
case *Label:
// ok
case *Decl:
if x == nil {
return
}
if lx.enumSeen[x] {
return
}
lx.enumSeen[x] = true
lx.enum(x.Type)
lx.enum(x.Init)
lx.enum(x.Body)
case *Type:
if x == nil {
return
}
lx.enum(x.Base)
for _, y := range x.Decls {
lx.enum(y)
}
return // do not record type itself, just inner decls
}
lx.pre = append(lx.pre, x)
}
func (lx *lexer) order(prog *Prog) {
lx.enumSeen = make(map[interface{}]bool)
lx.enum(prog)
sort.Sort(byStart(lx.pre))
lx.post = make([]Syntax, len(lx.pre))
copy(lx.post, lx.pre)
sort.Sort(byEnd(lx.post))
}
type byStart []Syntax
func (x byStart) Len() int { return len(x) }
func (x byStart) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byStart) Less(i, j int) bool {
pi := x[i].GetSpan()
pj := x[j].GetSpan()
// Order by start, leftmost first,
// and break ties by choosing outer before inner.
switch {
case pi.Start.Less(pj.Start):
return true
case pj.Start.Less(pi.Start):
return false
default:
return pj.End.Less(pi.End)
}
}
type byEnd []Syntax
func (x byEnd) Len() int { return len(x) }
func (x byEnd) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byEnd) Less(i, j int) bool {
pi := x[i].GetSpan()
pj := x[j].GetSpan()
// Order by end, leftmost first,
// and break ties by choosing inner before outer.
switch {
case pi.End.Less(pj.End):
return true
case pj.End.Less(pi.End):
return false
default:
return pi.Start.Less(pj.Start)
}
}
type commentList []Comment
func (x commentList) Len() int { return len(x) }
func (x commentList) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x commentList) Less(i, j int) bool {
return x[i].Start.Less(x[j].Start)
}
// assignComments attaches comments to nearby syntax.
func (lx *lexer) assignComments() {
// Generate preorder and postorder lists.
lx.order(lx.prog)
// Split into whole-line comments and suffix comments.
var line, suffix commentList
for _, com := range lx.comments {
if com.Suffix {
suffix = append(suffix, com)
} else {
line = append(line, com)
}
}
sort.Sort(line)
sort.Sort(suffix)
currentFile := ""
// Assign line comments to syntax immediately following.
for _, x := range lx.pre {
if _, ok := x.(*Init); ok {
// Don't assign comments to an initializer; skip it and go to the declaration.
continue
}
start := x.GetSpan().Start
xcom := x.GetComments()
if start.File != currentFile {
// Starting a new file. Make sure we catch the comment block at the start of the file,
// even if they aren't close to a declaration.
currentFile = start.File
for len(line) > 0 && line[0].Start.File < currentFile {
line = line[1:]
}
header := 0
for header < len(line) && line[header].End.Less(start) && (header == 0 || line[header-1].End.Line >= line[header].Start.Line-2) {
header++
}
xcom.Before = append(xcom.Before, line[:header]...)
line = line[header:]
}
end := 0
for end < len(line) && line[end].Start.Less(start) {
end++
}
// Now line[0:end] are the comments that come before x.
first := end
if first > 0 && line[first-1].End.File == start.File && line[first-1].End.Line >= start.Line-5 {
first--
for first > 0 && line[first-1].End.File == line[first].Start.File && line[first-1].End.Line >= line[first].Start.Line-2 {
first--
}
// Now line[first:end] are the comments that come before x,
// separated from x by no more than 4 lines, and from each other by no more than one line.
xcom.Before = append(xcom.Before, line[first:end]...)
}
line = line[end:]
}
// Remaining line comments go at end of file.
lx.prog.Comments.After = append(lx.prog.Comments.After, line...)
// Assign suffix comments to syntax immediately before.
for i := len(lx.post) - 1; i >= 0; i-- {
x := lx.post[i]
// Do not assign suffix comments to call, list, end-of-list, or whole file.
// Instead assign them to the last argument, element, or rule.
/*
switch x.(type) {
case *CallExpr, *ListExpr, *End, *File:
continue
}
*/
// Do not assign suffix comments to something that starts
// on an earlier line, so that in
//
// tags = [ "a",
// "b" ], # comment
//
// we assign the comment to "b" and not to tags = [ ... ].
span := x.GetSpan()
start, end := span.Start, span.End
if start.Line != end.Line {
continue
}
xcom := x.GetComments()
for len(suffix) > 0 && end.Less(suffix[len(suffix)-1].Start) {
c := suffix[len(suffix)-1]
if c.Start.Line == end.Line {
xcom.Suffix = append(xcom.Suffix, c)
}
suffix = suffix[:len(suffix)-1]
}
}
// We assigned suffix comments in reverse.
// If multiple suffix comments were appended to the same
// expression node, they are now in reverse. Fix that.
for _, x := range lx.post {
reverseComments(x.GetComments().Suffix)
}
}
// reverseComments reverses the []Comment list.
func reverseComments(list []Comment) {
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
list[i], list[j] = list[j], list[i]
}
}
| {
lx.enum(y)
} | conditional_block |
stream.rs | use crate::{JsonRpcClient, Middleware, PinBoxFut, Provider, ProviderError};
use ethers_core::types::{Transaction, TxHash, U256};
use futures_core::stream::Stream;
use futures_core::Future;
use futures_util::stream::FuturesUnordered;
use futures_util::{stream, FutureExt, StreamExt};
use pin_project::pin_project;
use serde::{de::DeserializeOwned, Serialize};
use std::collections::VecDeque;
use std::{
fmt::Debug,
pin::Pin,
task::{Context, Poll},
time::Duration,
vec::IntoIter,
};
#[cfg(not(target_arch = "wasm32"))]
use futures_timer::Delay;
#[cfg(target_arch = "wasm32")]
use wasm_timer::Delay;
// https://github.com/tomusdrw/rust-web3/blob/befcb2fb8f3ca0a43e3081f68886fa327e64c8e6/src/api/eth_filter.rs#L20
pub fn interval(duration: Duration) -> impl Stream<Item = ()> + Send + Unpin {
stream::unfold((), move |_| Delay::new(duration).map(|_| Some(((), ())))).map(drop)
}
/// The default polling interval for filters and pending transactions
pub const DEFAULT_POLL_INTERVAL: Duration = Duration::from_millis(7000);
enum FilterWatcherState<'a, R> {
WaitForInterval,
GetFilterChanges(PinBoxFut<'a, Vec<R>>),
NextItem(IntoIter<R>),
}
#[must_use = "filters do nothing unless you stream them"]
#[pin_project]
/// Streams data from an installed filter via `eth_getFilterChanges`
pub struct FilterWatcher<'a, P, R> {
/// The filter's installed id on the ethereum node
pub id: U256,
provider: &'a Provider<P>,
// The polling interval
interval: Box<dyn Stream<Item = ()> + Send + Unpin>,
state: FilterWatcherState<'a, R>,
}
impl<'a, P, R> FilterWatcher<'a, P, R>
where
P: JsonRpcClient,
R: Send + Sync + DeserializeOwned,
{
/// Creates a new watcher with the provided factory and filter id.
pub fn new<T: Into<U256>>(id: T, provider: &'a Provider<P>) -> Self {
Self {
id: id.into(),
interval: Box::new(interval(DEFAULT_POLL_INTERVAL)),
state: FilterWatcherState::WaitForInterval,
provider,
}
}
/// Sets the stream's polling interval
pub fn interval(mut self, duration: Duration) -> Self {
self.interval = Box::new(interval(duration));
self
}
/// Alias for Box::pin, must be called in order to pin the stream and be able
/// to call `next` on it.
pub fn stream(self) -> Pin<Box<Self>> {
Box::pin(self)
}
}
// Pattern for flattening the returned Vec of filter changes taken from
// https://github.com/tomusdrw/rust-web3/blob/f043b222744580bf4be043da757ab0b300c3b2da/src/api/eth_filter.rs#L50-L67
impl<'a, P, R> Stream for FilterWatcher<'a, P, R>
where
P: JsonRpcClient,
R: Serialize + Send + Sync + DeserializeOwned + Debug + 'a,
{
type Item = R;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let this = self.project();
let id = *this.id;
*this.state = match this.state {
FilterWatcherState::WaitForInterval => {
// Wait the polling period
let _ready = futures_util::ready!(this.interval.poll_next_unpin(cx));
// create a new instance of the future
cx.waker().wake_by_ref();
let fut = Box::pin(this.provider.get_filter_changes(id));
FilterWatcherState::GetFilterChanges(fut)
}
FilterWatcherState::GetFilterChanges(fut) => {
// NOTE: If the provider returns an error, this will return an empty
// vector. Should we make this return a Result instead? Ideally if we're
// in a streamed loop we wouldn't want the loop to terminate if an error
// is encountered (since it might be a temporary error).
let items: Vec<R> = futures_util::ready!(fut.as_mut().poll(cx)).unwrap_or_default();
cx.waker().wake_by_ref();
FilterWatcherState::NextItem(items.into_iter())
}
// Consume 1 element from the vector. If more elements are in the vector,
// the next call will immediately go to this branch instead of trying to get
// filter changes again. Once the whole vector is consumed, it will poll again
// for new logs
FilterWatcherState::NextItem(iter) => {
cx.waker().wake_by_ref();
match iter.next() {
Some(item) => return Poll::Ready(Some(item)),
None => FilterWatcherState::WaitForInterval,
}
}
};
Poll::Pending
}
}
impl<'a, P> FilterWatcher<'a, P, TxHash>
where
P: JsonRpcClient,
{
/// Returns a stream that yields the `Transaction`s for the transaction hashes this stream yields.
///
/// This internally calls `Provider::get_transaction` with every new transaction.
/// No more than n futures will be buffered at any point in time, and less than n may also be
/// buffered depending on the state of each future.
pub fn transactions_unordered(self, n: usize) -> TransactionStream<'a, P, Self> {
TransactionStream::new(self.provider, self, n)
}
}
/// Errors `TransactionStream` can throw
#[derive(Debug, thiserror::Error)]
pub enum GetTransactionError {
#[error("Failed to get transaction `{0}`: {1}")]
ProviderError(TxHash, ProviderError),
/// `get_transaction` resulted in a `None`
#[error("Transaction `{0}` not found")]
NotFound(TxHash),
}
impl From<GetTransactionError> for ProviderError {
fn from(err: GetTransactionError) -> Self {
match err {
GetTransactionError::ProviderError(_, err) => err,
err @ GetTransactionError::NotFound(_) => ProviderError::CustomError(err.to_string()),
}
}
}
type TransactionFut<'a> = Pin<Box<dyn Future<Output = TransactionResult> + 'a>>;
type TransactionResult = Result<Transaction, GetTransactionError>;
/// Drains a stream of transaction hashes and yields entire `Transaction`.
#[must_use = "streams do nothing unless polled"]
pub struct TransactionStream<'a, P, St> {
/// Currently running futures pending completion.
pending: FuturesUnordered<TransactionFut<'a>>,
/// Temporary buffered transaction that get started as soon as another future finishes.
buffered: VecDeque<TxHash>,
/// The provider that gets the transaction
provider: &'a Provider<P>,
/// A stream of transaction hashes.
stream: St,
/// max allowed futures to execute at once.
max_concurrent: usize,
}
impl<'a, P: JsonRpcClient, St> TransactionStream<'a, P, St> {
/// Create a new `TransactionStream` instance
pub fn new(provider: &'a Provider<P>, stream: St, max_concurrent: usize) -> Self {
Self {
pending: Default::default(),
buffered: Default::default(),
provider,
stream,
max_concurrent,
}
}
/// Push a future into the set
fn push_tx(&mut self, tx: TxHash) {
let fut = self
.provider
.get_transaction(tx)
.then(move |res| match res {
Ok(Some(tx)) => futures_util::future::ok(tx),
Ok(None) => futures_util::future::err(GetTransactionError::NotFound(tx)),
Err(err) => futures_util::future::err(GetTransactionError::ProviderError(tx, err)),
});
self.pending.push(Box::pin(fut));
}
}
impl<'a, P, St> Stream for TransactionStream<'a, P, St>
where
P: JsonRpcClient,
St: Stream<Item = TxHash> + Unpin + 'a,
{
type Item = TransactionResult;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
// drain buffered transactions first
while this.pending.len() < this.max_concurrent {
if let Some(tx) = this.buffered.pop_front() {
this.push_tx(tx);
} else {
break;
}
}
let mut stream_done = false;
loop {
match Stream::poll_next(Pin::new(&mut this.stream), cx) {
Poll::Ready(Some(tx)) => {
if this.pending.len() < this.max_concurrent {
this.push_tx(tx);
} else {
this.buffered.push_back(tx);
}
}
Poll::Ready(None) => {
stream_done = true;
break;
}
_ => break,
}
}
// poll running futures
if let tx @ Poll::Ready(Some(_)) = this.pending.poll_next_unpin(cx) {
return tx;
}
if stream_done && this.pending.is_empty() {
// all done
return Poll::Ready(None);
}
Poll::Pending
}
}
#[cfg(test)]
#[cfg(not(target_arch = "wasm32"))]
mod tests {
use super::*;
use crate::{Http, Ws};
use ethers_core::{
types::{TransactionReceipt, TransactionRequest},
utils::{Ganache, Geth},
};
use futures_util::{FutureExt, StreamExt};
use std::collections::HashSet;
use std::convert::TryFrom;
#[tokio::test]
async fn can_stream_pending_transactions() {
let num_txs = 5;
let geth = Geth::new().block_time(2u64).spawn();
let provider = Provider::<Http>::try_from(geth.endpoint())
.unwrap()
.interval(Duration::from_millis(1000));
let ws = Ws::connect(geth.ws_endpoint()).await.unwrap();
let ws_provider = Provider::new(ws);
let accounts = provider.get_accounts().await.unwrap();
let tx = TransactionRequest::new()
.from(accounts[0])
.to(accounts[0])
.value(1e18 as u64);
let mut sending = futures_util::future::join_all(
std::iter::repeat(tx.clone()).take(num_txs).map(|tx| async {
provider
.send_transaction(tx, None)
.await
.unwrap()
.await
.unwrap()
.unwrap()
}),
)
.fuse();
let mut watch_tx_stream = provider
.watch_pending_transactions()
.await
.unwrap()
.transactions_unordered(num_txs)
.fuse();
let mut sub_tx_stream = ws_provider
.subscribe_pending_txs()
.await
.unwrap()
.transactions_unordered(2)
.fuse();
let mut sent: Option<Vec<TransactionReceipt>> = None;
let mut watch_received: Vec<Transaction> = Vec::with_capacity(num_txs);
let mut sub_received: Vec<Transaction> = Vec::with_capacity(num_txs);
loop {
futures_util::select! {
txs = sending => {
sent = Some(txs)
},
tx = watch_tx_stream.next() => watch_received.push(tx.unwrap().unwrap()),
tx = sub_tx_stream.next() => sub_received.push(tx.unwrap().unwrap()),
}; | if let Some(ref sent) = sent {
assert_eq!(sent.len(), watch_received.len());
let sent_txs = sent
.iter()
.map(|tx| tx.transaction_hash)
.collect::<HashSet<_>>();
assert_eq!(sent_txs, watch_received.iter().map(|tx| tx.hash).collect());
assert_eq!(sent_txs, sub_received.iter().map(|tx| tx.hash).collect());
break;
}
}
}
}
#[tokio::test]
async fn can_stream_transactions() {
let ganache = Ganache::new().block_time(2u64).spawn();
let provider = Provider::<Http>::try_from(ganache.endpoint())
.unwrap()
.with_sender(ganache.addresses()[0]);
let accounts = provider.get_accounts().await.unwrap();
let tx = TransactionRequest::new()
.from(accounts[0])
.to(accounts[0])
.value(1e18 as u64);
let txs =
futures_util::future::join_all(std::iter::repeat(tx.clone()).take(3).map(|tx| async {
provider
.send_transaction(tx, None)
.await
.unwrap()
.await
.unwrap()
}))
.await;
let stream = TransactionStream::new(
&provider,
stream::iter(txs.iter().cloned().map(|tx| tx.unwrap().transaction_hash)),
10,
);
let res = stream
.collect::<Vec<_>>()
.await
.into_iter()
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert_eq!(res.len(), txs.len());
assert_eq!(
res.into_iter().map(|tx| tx.hash).collect::<HashSet<_>>(),
txs.into_iter()
.map(|tx| tx.unwrap().transaction_hash)
.collect()
);
}
} | if watch_received.len() == num_txs && sub_received.len() == num_txs { | random_line_split |
stream.rs | use crate::{JsonRpcClient, Middleware, PinBoxFut, Provider, ProviderError};
use ethers_core::types::{Transaction, TxHash, U256};
use futures_core::stream::Stream;
use futures_core::Future;
use futures_util::stream::FuturesUnordered;
use futures_util::{stream, FutureExt, StreamExt};
use pin_project::pin_project;
use serde::{de::DeserializeOwned, Serialize};
use std::collections::VecDeque;
use std::{
fmt::Debug,
pin::Pin,
task::{Context, Poll},
time::Duration,
vec::IntoIter,
};
#[cfg(not(target_arch = "wasm32"))]
use futures_timer::Delay;
#[cfg(target_arch = "wasm32")]
use wasm_timer::Delay;
// https://github.com/tomusdrw/rust-web3/blob/befcb2fb8f3ca0a43e3081f68886fa327e64c8e6/src/api/eth_filter.rs#L20
pub fn interval(duration: Duration) -> impl Stream<Item = ()> + Send + Unpin {
stream::unfold((), move |_| Delay::new(duration).map(|_| Some(((), ())))).map(drop)
}
/// The default polling interval for filters and pending transactions
pub const DEFAULT_POLL_INTERVAL: Duration = Duration::from_millis(7000);
enum FilterWatcherState<'a, R> {
WaitForInterval,
GetFilterChanges(PinBoxFut<'a, Vec<R>>),
NextItem(IntoIter<R>),
}
#[must_use = "filters do nothing unless you stream them"]
#[pin_project]
/// Streams data from an installed filter via `eth_getFilterChanges`
pub struct | <'a, P, R> {
/// The filter's installed id on the ethereum node
pub id: U256,
provider: &'a Provider<P>,
// The polling interval
interval: Box<dyn Stream<Item = ()> + Send + Unpin>,
state: FilterWatcherState<'a, R>,
}
impl<'a, P, R> FilterWatcher<'a, P, R>
where
P: JsonRpcClient,
R: Send + Sync + DeserializeOwned,
{
/// Creates a new watcher with the provided factory and filter id.
pub fn new<T: Into<U256>>(id: T, provider: &'a Provider<P>) -> Self {
Self {
id: id.into(),
interval: Box::new(interval(DEFAULT_POLL_INTERVAL)),
state: FilterWatcherState::WaitForInterval,
provider,
}
}
/// Sets the stream's polling interval
pub fn interval(mut self, duration: Duration) -> Self {
self.interval = Box::new(interval(duration));
self
}
/// Alias for Box::pin, must be called in order to pin the stream and be able
/// to call `next` on it.
pub fn stream(self) -> Pin<Box<Self>> {
Box::pin(self)
}
}
// Pattern for flattening the returned Vec of filter changes taken from
// https://github.com/tomusdrw/rust-web3/blob/f043b222744580bf4be043da757ab0b300c3b2da/src/api/eth_filter.rs#L50-L67
impl<'a, P, R> Stream for FilterWatcher<'a, P, R>
where
P: JsonRpcClient,
R: Serialize + Send + Sync + DeserializeOwned + Debug + 'a,
{
type Item = R;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let this = self.project();
let id = *this.id;
*this.state = match this.state {
FilterWatcherState::WaitForInterval => {
// Wait the polling period
let _ready = futures_util::ready!(this.interval.poll_next_unpin(cx));
// create a new instance of the future
cx.waker().wake_by_ref();
let fut = Box::pin(this.provider.get_filter_changes(id));
FilterWatcherState::GetFilterChanges(fut)
}
FilterWatcherState::GetFilterChanges(fut) => {
// NOTE: If the provider returns an error, this will return an empty
// vector. Should we make this return a Result instead? Ideally if we're
// in a streamed loop we wouldn't want the loop to terminate if an error
// is encountered (since it might be a temporary error).
let items: Vec<R> = futures_util::ready!(fut.as_mut().poll(cx)).unwrap_or_default();
cx.waker().wake_by_ref();
FilterWatcherState::NextItem(items.into_iter())
}
// Consume 1 element from the vector. If more elements are in the vector,
// the next call will immediately go to this branch instead of trying to get
// filter changes again. Once the whole vector is consumed, it will poll again
// for new logs
FilterWatcherState::NextItem(iter) => {
cx.waker().wake_by_ref();
match iter.next() {
Some(item) => return Poll::Ready(Some(item)),
None => FilterWatcherState::WaitForInterval,
}
}
};
Poll::Pending
}
}
impl<'a, P> FilterWatcher<'a, P, TxHash>
where
P: JsonRpcClient,
{
/// Returns a stream that yields the `Transaction`s for the transaction hashes this stream yields.
///
/// This internally calls `Provider::get_transaction` with every new transaction.
/// No more than n futures will be buffered at any point in time, and less than n may also be
/// buffered depending on the state of each future.
pub fn transactions_unordered(self, n: usize) -> TransactionStream<'a, P, Self> {
TransactionStream::new(self.provider, self, n)
}
}
/// Errors `TransactionStream` can throw
#[derive(Debug, thiserror::Error)]
pub enum GetTransactionError {
#[error("Failed to get transaction `{0}`: {1}")]
ProviderError(TxHash, ProviderError),
/// `get_transaction` resulted in a `None`
#[error("Transaction `{0}` not found")]
NotFound(TxHash),
}
impl From<GetTransactionError> for ProviderError {
fn from(err: GetTransactionError) -> Self {
match err {
GetTransactionError::ProviderError(_, err) => err,
err @ GetTransactionError::NotFound(_) => ProviderError::CustomError(err.to_string()),
}
}
}
type TransactionFut<'a> = Pin<Box<dyn Future<Output = TransactionResult> + 'a>>;
type TransactionResult = Result<Transaction, GetTransactionError>;
/// Drains a stream of transaction hashes and yields entire `Transaction`.
#[must_use = "streams do nothing unless polled"]
pub struct TransactionStream<'a, P, St> {
/// Currently running futures pending completion.
pending: FuturesUnordered<TransactionFut<'a>>,
/// Temporary buffered transaction that get started as soon as another future finishes.
buffered: VecDeque<TxHash>,
/// The provider that gets the transaction
provider: &'a Provider<P>,
/// A stream of transaction hashes.
stream: St,
/// max allowed futures to execute at once.
max_concurrent: usize,
}
impl<'a, P: JsonRpcClient, St> TransactionStream<'a, P, St> {
/// Create a new `TransactionStream` instance
pub fn new(provider: &'a Provider<P>, stream: St, max_concurrent: usize) -> Self {
Self {
pending: Default::default(),
buffered: Default::default(),
provider,
stream,
max_concurrent,
}
}
/// Push a future into the set
fn push_tx(&mut self, tx: TxHash) {
let fut = self
.provider
.get_transaction(tx)
.then(move |res| match res {
Ok(Some(tx)) => futures_util::future::ok(tx),
Ok(None) => futures_util::future::err(GetTransactionError::NotFound(tx)),
Err(err) => futures_util::future::err(GetTransactionError::ProviderError(tx, err)),
});
self.pending.push(Box::pin(fut));
}
}
impl<'a, P, St> Stream for TransactionStream<'a, P, St>
where
P: JsonRpcClient,
St: Stream<Item = TxHash> + Unpin + 'a,
{
type Item = TransactionResult;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
// drain buffered transactions first
while this.pending.len() < this.max_concurrent {
if let Some(tx) = this.buffered.pop_front() {
this.push_tx(tx);
} else {
break;
}
}
let mut stream_done = false;
loop {
match Stream::poll_next(Pin::new(&mut this.stream), cx) {
Poll::Ready(Some(tx)) => {
if this.pending.len() < this.max_concurrent {
this.push_tx(tx);
} else {
this.buffered.push_back(tx);
}
}
Poll::Ready(None) => {
stream_done = true;
break;
}
_ => break,
}
}
// poll running futures
if let tx @ Poll::Ready(Some(_)) = this.pending.poll_next_unpin(cx) {
return tx;
}
if stream_done && this.pending.is_empty() {
// all done
return Poll::Ready(None);
}
Poll::Pending
}
}
#[cfg(test)]
#[cfg(not(target_arch = "wasm32"))]
mod tests {
use super::*;
use crate::{Http, Ws};
use ethers_core::{
types::{TransactionReceipt, TransactionRequest},
utils::{Ganache, Geth},
};
use futures_util::{FutureExt, StreamExt};
use std::collections::HashSet;
use std::convert::TryFrom;
#[tokio::test]
async fn can_stream_pending_transactions() {
let num_txs = 5;
let geth = Geth::new().block_time(2u64).spawn();
let provider = Provider::<Http>::try_from(geth.endpoint())
.unwrap()
.interval(Duration::from_millis(1000));
let ws = Ws::connect(geth.ws_endpoint()).await.unwrap();
let ws_provider = Provider::new(ws);
let accounts = provider.get_accounts().await.unwrap();
let tx = TransactionRequest::new()
.from(accounts[0])
.to(accounts[0])
.value(1e18 as u64);
let mut sending = futures_util::future::join_all(
std::iter::repeat(tx.clone()).take(num_txs).map(|tx| async {
provider
.send_transaction(tx, None)
.await
.unwrap()
.await
.unwrap()
.unwrap()
}),
)
.fuse();
let mut watch_tx_stream = provider
.watch_pending_transactions()
.await
.unwrap()
.transactions_unordered(num_txs)
.fuse();
let mut sub_tx_stream = ws_provider
.subscribe_pending_txs()
.await
.unwrap()
.transactions_unordered(2)
.fuse();
let mut sent: Option<Vec<TransactionReceipt>> = None;
let mut watch_received: Vec<Transaction> = Vec::with_capacity(num_txs);
let mut sub_received: Vec<Transaction> = Vec::with_capacity(num_txs);
loop {
futures_util::select! {
txs = sending => {
sent = Some(txs)
},
tx = watch_tx_stream.next() => watch_received.push(tx.unwrap().unwrap()),
tx = sub_tx_stream.next() => sub_received.push(tx.unwrap().unwrap()),
};
if watch_received.len() == num_txs && sub_received.len() == num_txs {
if let Some(ref sent) = sent {
assert_eq!(sent.len(), watch_received.len());
let sent_txs = sent
.iter()
.map(|tx| tx.transaction_hash)
.collect::<HashSet<_>>();
assert_eq!(sent_txs, watch_received.iter().map(|tx| tx.hash).collect());
assert_eq!(sent_txs, sub_received.iter().map(|tx| tx.hash).collect());
break;
}
}
}
}
#[tokio::test]
async fn can_stream_transactions() {
let ganache = Ganache::new().block_time(2u64).spawn();
let provider = Provider::<Http>::try_from(ganache.endpoint())
.unwrap()
.with_sender(ganache.addresses()[0]);
let accounts = provider.get_accounts().await.unwrap();
let tx = TransactionRequest::new()
.from(accounts[0])
.to(accounts[0])
.value(1e18 as u64);
let txs =
futures_util::future::join_all(std::iter::repeat(tx.clone()).take(3).map(|tx| async {
provider
.send_transaction(tx, None)
.await
.unwrap()
.await
.unwrap()
}))
.await;
let stream = TransactionStream::new(
&provider,
stream::iter(txs.iter().cloned().map(|tx| tx.unwrap().transaction_hash)),
10,
);
let res = stream
.collect::<Vec<_>>()
.await
.into_iter()
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert_eq!(res.len(), txs.len());
assert_eq!(
res.into_iter().map(|tx| tx.hash).collect::<HashSet<_>>(),
txs.into_iter()
.map(|tx| tx.unwrap().transaction_hash)
.collect()
);
}
}
| FilterWatcher | identifier_name |
HttpHelper.ts |
import {URL, DNS} from "../Config/config"
import { G_UserControl } from '../Controller/UserControl';
//import { context } from "../../../packVersion/ver_1_0.0.0/src/cocos2d-jsb";
import CryptoJS = require('../Common/CryptoJS')
import { JSEncrypt } from "../Common/jsencrypt";
import { G_RequestControl } from "../Controller/RequestControl";
class HttpHelper {
private lastTime = null;
private cd(path){
if(this.lastTime == null)
{
this.lastTime = new Date().getTime();
console.log("Time--------------------------->",path," ",0);
}else
{
let tt = new Date().getTime() - this.lastTime;
//this.lastTime = new Date().getTime();
console.log("Time--------------------------->",path," ",tt/1000);
}
}
public static readonly Instance : HttpHelper = new HttpHelper();
/**
* get请求
* @param {string} url
* @param {function} callback
*/
httpGet(path, callback) {
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
return;
}
let url = G_RequestControl.getConfig().getURL(path)
// cc.myGame.gameUi.onShowLockScreen();
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
if (xhr.readyState === 4 && xhr.status == 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}
else if (xhr.readyState === 4 && xhr.status == 401) {
callback({status:401});
}
}.bind(this);
let data = this.doEncode(url);
// xhr.withCredentials = true;
console.log("[HTTP>GET]:URL>>>>>>>>>>>>>>>>>",URL+url)
xhr.open('GET', URL + url, true);
// if (cc.sys.isNative) {
xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.setRequestHeader('Access-Control-Allow-Headers', 'Content-Type');
xhr.setRequestHeader('Access-Control-Allow-Methods', 'GET,POST,PUT,OPTIONS');
xhr.setRequestHeader('Access-Control-Allow-Credentials','true')
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'PUT,GET');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with,content-type');
// xhr.setRequestHeader("Content-Type", "application/json;charset=utf-8");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
xhr.setRequestHeader('JHReferer', DNS);
xhr.setRequestHeader('Authorization', 'Bearer '+G_UserControl.getUser().accessToken);
// xhr.setRequestHeader('Authorization', 'Bearer ' + "");
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
console.log("xhr "+xhr.status)
xhr.timeout = 5000;
xhr.send();
}
/**
* post请求
* @param {string} url
* @param {object} params
* @param {function} callback
*/
httpPost(path, params, callback) {
console.log("path ",path);
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
console.log("return ",path);
return;
}
console.log("nocdddddddddddddd ",path);
let url = G_RequestControl.getConfig().getURL(path)
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
if (xhr.readyState === 4 && xhr.status == 200) {
let respone = xhr.responseText;
let data = this.doDecode(xhr.responseText);
// let rsp = JSON.parse(data);
callback(data);
}else if(xhr.readyState === 3 && xhr.status != 200) {
let respone = xhr.responseText;
let data = this.doDecode(xhr.responseText);
// let rsp = JSON.parse(data);
callback(data);
}
//console.log('respone '+xhr.responseText);
}.bind(this);
console.log("[HTTP>POST]:URL>>>>>>>>>>>>>>>>>",URL+url," params "+JSON.stringify(params) )
xhr.open('POST', URL+url, true);
// if (cc.sys.isNative) {
// xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'GET, POST');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with');
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
//xhr.setRequestHeader("Accept", "application/json");
// xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
xhr.setRequestHeader('JHReferer', DNS);
if(G_UserControl.getUser().accessToken)
{
xhr.setRequestHeader('Authorization', 'Bearer ' + G_UserControl.getUser().accessToken);
}
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
xhr.timeout = 5000;// 8 seconds for timeout
//xhr.send(JSON.stringify(params));
if(params == null)
{
xhr.send();
}else
{
let data = {"data":this.doEncode(params)};
xhr.send(JSON.stringify(data));
}
}
/**
* put请求
* @param {string} url
* @param {object} params
* @param {function} callback
*/
httpPut(path, params, callback) {
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
return;
}
let url = G_RequestControl.getConfig().getURL(path)
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
console.log("xhr.readyState "+xhr.readyState + " xhr.status == 200 "+xhr.status)
if (xhr.readyState === 4 && xhr.status == 200) {
| readyState === 3 && xhr.status != 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}
//console.log('respone '+xhr.status);
}.bind(this);
console.log("[HTTP>POST]:URL>>>>>>>>>>>>>>>>>",URL+url," params "+JSON.stringify(params) )
xhr.open('PUT', URL+url, true);
// if (cc.sys.isNative) {
// xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.open('PUT', URL+url, true);
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'PUT,GET, POST');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with,content-type');
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
xhr.setRequestHeader('JHReferer', DNS);
xhr.setRequestHeader('Authorization', 'Bearer ' + G_UserControl.getUser().accessToken);
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
xhr.timeout = 5000;// 8 seconds for timeout
//xhr.send(JSON.stringify(params));
if(params == null)
{
xhr.send();
}else
{
let data = {"data":this.doEncode(params)};
xhr.send(JSON.stringify(data));
}
}
doEncode(data)
{
// //偏移量 由前端每次请求随机生成 16位
var IV = this.randomString(16);
// //AES加密KEY 由前端自己每次请求随机生成
var KEY = this.randomString(16);
var public_key = "-----BEGIN PUBLIC KEY-----MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCgy6JOupuDqE9itVQvGSBDJotBEJFASuklIwvcMNtXUH99PdihJ+TJN2AjNphzCdgL9KlguDG+u4C719DZOC3YrGn7Ps9vWOFtQYLzh69cGd+nlqOR4LKVSAYRn2NtrV9elAzBjie/Y7ITMsU9+ZTsccRqb+qd+OlBsYdg9dhvVQIDAQAB-----END PUBLIC KEY-----";
//加密后的数据 json 直接传递给后端
var encrypt_data = this.AES_encrypt(data,KEY,IV,public_key);
return encrypt_data;
}
//随机串
randomString(len) {
len = len || 32;
var $chars = 'ABCDEFGHJKMNPQRSTWXYZabcdefhijkmnprstwxyz2345678'; //默认去掉了容易混淆的字符oOLl,9gq,Vv,Uu,I1
var maxPos = $chars.length;
var pwd = '';
for (var i = 0; i < len; i++) {
pwd += $chars.charAt(Math.floor(Math.random() * maxPos));
}
return pwd;
}
/**
* AES加密数组 传入参数为需要传递的数组JSON
*/
AES_encrypt(data,KEY,IV,pkcs8_public) {
console.log("typeof data ",typeof(data));
var key_utf8 = CryptoJS.enc.Utf8.parse(KEY);// 秘钥
var iv_utf8= CryptoJS.enc.Utf8.parse(IV);//向量iv
let srcs = ''
switch (typeof (data)) {
case 'string':
srcs = CryptoJS.enc.Utf8.parse(data)
break;
case 'object':
srcs = CryptoJS.enc.Utf8.parse(JSON.stringify(data))
break;
default:
srcs = CryptoJS.enc.Utf8.parse(data.toString())
}
//AES 加密
var encrypted = CryptoJS.AES.encrypt(srcs, key_utf8, { iv: iv_utf8, mode: CryptoJS.mode.CBC, padding: CryptoJS.pad.Pkcs7}).toString();
//RSA 加密 组包
return this.pack(encrypted,IV,KEY,pkcs8_public)
}
//组包
pack(encrypted,iv,key,pub_key) {
var jsencrypt = new JSEncrypt();
jsencrypt.setKey(pub_key);
var rsa_iv = jsencrypt.encrypt(iv);
var rsa_key = jsencrypt.encrypt(key);
var splitFlag = 'aesrsastart';
var res_data = encrypted+splitFlag+rsa_iv+splitFlag+rsa_key
return res_data
}
//解密
doDecode(responseData)
{
//console.log("解密 responseData "+responseData);
if(responseData == null)
{
return null;
}
var data = JSON.parse(responseData);
var cryptData = data.data;
if (cryptData != undefined) {
var cryptDataArr = cryptData.split("hDdoAPaXI3S");
if (cryptDataArr.length == 3) {
var cryptDataStr = cryptDataArr[0];
var privateKey = "-----BEGIN PRIVATE KEY-----MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAPTYUA2oNnnEwCM+firQEh3qtvhzy2sPcCCPBuk1ALN98ThFtwbsAIXn4iflC8cL74OxsW5LhVLqRaNJwrj19nUWRNg2V0UG0qiSMDoFQzcf14Tl3YEMVhHmhT60KEc/mcOkGp7BGFneNRkUrnAedUPaI18hHfwlOXCTBOXjsLEHAgMBAAECgYAOsZCUUTz7r8gMFWsC7Lu5meVjIafag/GpsouqoSiqnOtGAkEKpE0fvBvBYyiCyH+WOqq4QMX+hNqrAvkxmmkw3Zj6pqGIGBm8qP0sC7kV9l3+1GyNweBaPqnZs02Kb3WCZnw8h1NaJRR9uqXFITzLkNgxEOuq9oiQqmI9UmP7sQJBAP1qL2O32RS/i08lCHR1r/XQTF/0pkSPX+a6SEf25iewzKm5do8hOtSG7+zjOlOQwsGwCPuNovz5g8BPMv2juQ8CQQD3V78skMtTp+0c6WjVh5ORIkkYAyOnSfl3nigkQKCfGyiTwX1cm3GLTHkDHZBVJjFyz8U/ngZZbG8ScHZCMtiJAkEAroiApQxNXaXiu5rE7PjVPNa+k2P7U8LviQiJmc7pizKQcuDCUCfRzeg1vJBvbniIOkAUn7RYKiVrYXrqopgtbwJAd+zzpIgQDd+99+a0DdROmHAnQJ1FDDex3W2xyOIM/xgL9Jg8UEqOIxxREFGlSaPbFe/nk5DrQzBwKmCc9jvxAQJALe9ZaKqPeZywh2aUa8huotTe5lj/iDeGdHOgxx4xkDK9ddzuSks1dbJQ/gHl8lA7MjOI6TvtgeLB9FOOvsi5EQ==-----END PRIVATE KEY-----";
var jsencrypt = new JSEncrypt();
jsencrypt.setPrivateKey(privateKey);
var iValue = jsencrypt.decrypt(cryptDataArr[1]);
var iKey = jsencrypt.decrypt(cryptDataArr[2]);
//console.log("cryptDataArr",cryptDataArr[0])
//console.log('iValue', iValue);
//console.log('iKey',iKey);
var options = {
iv: CryptoJS.enc.Utf8.parse(iValue),
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.Pkcs7
};
var tt = CryptoJS.AES.decrypt(cryptDataStr,CryptoJS.enc.Utf8.parse(iKey),options);
let srcs = ''
switch (typeof (tt)) {
case 'string':
srcs = CryptoJS.enc.Utf8.parse(tt)
break;
case 'object':
srcs = tt.toString(CryptoJS.enc.Utf8);
//srcs = CryptoJS.enc.Utf8.parse(tt.toString());
break;
default:
srcs = CryptoJS.enc.Utf8.parse(tt.toString())
}
var jsonData = JSON.parse(srcs);
console.log("typeof (tt) "+typeof (tt) +" srcs "+srcs)
return jsonData;
}
}else
{
console.log("cryptData underfined")
}
return null;
}
}
export const G_HttpHelper = HttpHelper.Instance;
| let data = this.doDecode(xhr.responseText);
callback(data);
}else if(xhr. | conditional_block |
HttpHelper.ts |
import {URL, DNS} from "../Config/config"
import { G_UserControl } from '../Controller/UserControl';
//import { context } from "../../../packVersion/ver_1_0.0.0/src/cocos2d-jsb";
import CryptoJS = require('../Common/CryptoJS')
import { JSEncrypt } from "../Common/jsencrypt";
import { G_RequestControl } from "../Controller/RequestControl";
class HttpHelper {
private lastTime = null;
private cd(path){
if(this.lastTime == null)
{
this.lastTime = new Date().getTime();
console.log("Time--------------------------->",path," ",0);
}else
{
let tt = new Date().getTime() - this.lastTime;
//this.lastTime = new Date().getTime();
console.log("Time--------------------------->",path," ",tt/1000);
}
}
public static readonly Instance : HttpHelper = new HttpHelper();
/**
* get请求
* @param {string} url
* @param {function} callback
*/
httpGet(path, callback) {
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
return;
}
let url = G_RequestControl.getConfig().getURL(path)
// cc.myGame.gameUi.onShowLockScreen();
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
if (xhr.readyState === 4 && xhr.status == 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}
else if (xhr.readyState === 4 && xhr.status == 401) {
callback({status:401});
}
}.bind(this);
let data = this.doEncode(url);
// xhr.withCredentials = true;
console.log("[HTTP>GET]:URL>>>>>>>>>>>>>>>>>",URL+url)
xhr.open('GET', URL + url, true);
// if (cc.sys.isNative) {
xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.setRequestHeader('Access-Control-Allow-Headers', 'Content-Type');
xhr.setRequestHeader('Access-Control-Allow-Methods', 'GET,POST,PUT,OPTIONS');
xhr.setRequestHeader('Access-Control-Allow-Credentials','true')
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'PUT,GET');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with,content-type');
// xhr.setRequestHeader("Content-Type", "application/json;charset=utf-8");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
xhr.setRequestHeader('JHReferer', DNS);
xhr.setRequestHeader('Authorization', 'Bearer '+G_UserControl.getUser().accessToken);
// xhr.setRequestHeader('Authorization', 'Bearer ' + "");
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
console.log("xhr "+xhr.status)
xhr.timeout = 5000;
xhr.send();
}
/**
* post请求
* @param {string} url
* @param {object} params
* @param {function} callback
*/
httpPost(path, params, callback) {
console.log("path ",path);
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
console.log("return ",path);
return;
}
console.log("nocdddddddddddddd ",path);
let url = G_RequestControl.getConfig().getURL(path)
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
if (xhr.readyState === 4 && xhr.status == 200) {
let respone = xhr.responseText;
let data = this.doDecode(xhr.responseText);
// let rsp = JSON.parse(data);
callback(data);
}else if(xhr.readyState === 3 && xhr.status != 200) {
let respone = xhr.responseText;
let data = this.doDecode(xhr.responseText);
// let rsp = JSON.parse(data);
callback(data);
}
//console.log('respone '+xhr.responseText);
}.bind(this);
console.log("[HTTP>POST]:URL>>>>>>>>>>>>>>>>>",URL+url," params "+JSON.stringify(params) )
xhr.open('POST', URL+url, true);
// if (cc.sys.isNative) {
// xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'GET, POST');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with');
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
//xhr.setRequestHeader("Accept", "application/json");
// xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
xhr.setRequestHeader('JHReferer', DNS);
if(G_UserControl.getUser().accessToken)
{
xhr.setRequestHeader('Authorization', 'Bearer ' + G_UserControl.getUser().accessToken);
}
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
xhr.timeout = 5000;// 8 seconds for timeout
//xhr.send(JSON.stringify(params));
if(params == null)
{
xhr.send();
}else
{
let data = {"data":this.doEncode(params)};
xhr.send(JSON.stringify(data));
}
}
/**
* put请求
* @param {string} url
* @param {object} params
* @param {function} callback
*/
httpPut(path, params, callback) {
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
return;
}
let url = G_RequestControl.getConfig().getURL(path)
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
console.log("xhr.readyState "+xhr.readyState + " xhr.status == 200 "+xhr.status)
if (xhr.readyState === 4 && xhr.status == 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}else if(xhr.readyState === 3 && xhr.status != 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}
//console.log('respone '+xhr.status);
}.bind(this);
console.log("[HTTP>POST]:URL>>>>>>>>>>>>>>>>>",URL+url," params "+JSON.stringify(params) )
xhr.open('PUT', URL+url, true);
// if (cc.sys.isNative) {
// xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.open('PUT', URL+url, true);
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'PUT,GET, POST');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with,content-type');
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
xhr.setRequestHeader('JHReferer', DNS);
xhr.setRequestHeader('Authorization', 'Bearer ' + G_UserControl.getUser().accessToken);
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
xhr.timeout = 5000;// 8 seconds for timeout
//xhr.send(JSON.stringify(params));
if(params == null)
{
xhr.send();
}else
{
let data = {"data":this.doEncode(params)};
xhr.send(JSON.stringify(data));
}
}
doEncode(data)
{
// //偏移量 由前端每次请求随机生成 16位
var IV = this.randomString(16);
// //AES加密KEY 由前端自己每次请求随机生成
var KEY = this.randomString(16);
var public_key = "-----BEGIN PUBLIC KEY-----MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCgy6JOupuDqE9itVQvGSBDJotBEJFASuklIwvcMNtXUH99PdihJ+TJN2AjNphzCdgL9KlguDG+u4C719DZOC3YrGn7Ps9vWOFtQYLzh69cGd+nlqOR4LKVSAYRn2NtrV9elAzBjie/Y7ITMsU9+ZTsccRqb+qd+OlBsYdg9dhvVQIDAQAB-----END PUBLIC KEY-----";
//加密后的数据 json 直接传递给后端
var encrypt_data = this.AES_encrypt(data,KEY,IV,public_key);
return encrypt_data;
}
//随机串
randomString(len) {
len = len || 32;
var $chars = 'ABCDEFGHJKMNPQRSTWXYZabcdefhijkmnprstwxyz2345678'; //默认去掉了容易混淆的字符oOLl,9gq,Vv,Uu,I1
var maxPos = $chars.length;
var pwd = '';
for (var i = 0; i < len; i++) {
pwd += $chars.charAt(Math.floor(Math.random() * maxPos));
}
return pwd;
}
/**
* AES加密数组 传入参数为需要传递的数组JSON
*/
AES_encrypt(data,KEY,IV,pkcs8_public) {
console.log("typeof data ",typeof(data));
var key_utf8 = CryptoJS.enc.Utf8.parse(KEY);// 秘钥
var iv_utf8= CryptoJS.enc.Utf8.parse(IV);//向量iv
let srcs = ''
switch (typeof (data)) {
case 'string':
srcs = CryptoJS.enc.Utf8.parse(data)
break;
case 'object':
srcs = CryptoJS.enc.Utf8.parse(JSON.stringify(data))
break;
default:
srcs = CryptoJS.enc.Utf8.parse(data.toString())
}
//AES 加密
var encrypted = CryptoJS.AES.encrypt(srcs, key_utf8, { iv: iv_utf8, mode: CryptoJS.mode.CBC, padding: CryptoJS.pad.Pkcs7}).toString();
//RSA 加密 组包
return this.pack(encrypted,IV,KEY,pkcs8_public)
}
//组包
pack(encrypted,iv,key,pub_key) {
var jsencrypt = new JSEncrypt();
jsencrypt.setKey(pub_key);
var rsa_iv = jsencrypt.encrypt(iv);
var rsa_key = jsencrypt.encrypt(key);
var splitFlag = 'aesrsastart';
var res_data = encrypted+splitFlag+rsa_iv+splitFlag+rsa_key
return res_data
}
//解密
doDecode(responseData)
{
//console.log("解密 responseData "+responseData);
if(responseData == null)
{
return null;
}
var data = JSON.parse(responseData);
var cryptData = data.data;
| if (cryptData != undefined) {
var cryptDataArr = cryptData.split("hDdoAPaXI3S");
if (cryptDataArr.length == 3) {
var cryptDataStr = cryptDataArr[0];
var privateKey = "-----BEGIN PRIVATE KEY-----MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAPTYUA2oNnnEwCM+firQEh3qtvhzy2sPcCCPBuk1ALN98ThFtwbsAIXn4iflC8cL74OxsW5LhVLqRaNJwrj19nUWRNg2V0UG0qiSMDoFQzcf14Tl3YEMVhHmhT60KEc/mcOkGp7BGFneNRkUrnAedUPaI18hHfwlOXCTBOXjsLEHAgMBAAECgYAOsZCUUTz7r8gMFWsC7Lu5meVjIafag/GpsouqoSiqnOtGAkEKpE0fvBvBYyiCyH+WOqq4QMX+hNqrAvkxmmkw3Zj6pqGIGBm8qP0sC7kV9l3+1GyNweBaPqnZs02Kb3WCZnw8h1NaJRR9uqXFITzLkNgxEOuq9oiQqmI9UmP7sQJBAP1qL2O32RS/i08lCHR1r/XQTF/0pkSPX+a6SEf25iewzKm5do8hOtSG7+zjOlOQwsGwCPuNovz5g8BPMv2juQ8CQQD3V78skMtTp+0c6WjVh5ORIkkYAyOnSfl3nigkQKCfGyiTwX1cm3GLTHkDHZBVJjFyz8U/ngZZbG8ScHZCMtiJAkEAroiApQxNXaXiu5rE7PjVPNa+k2P7U8LviQiJmc7pizKQcuDCUCfRzeg1vJBvbniIOkAUn7RYKiVrYXrqopgtbwJAd+zzpIgQDd+99+a0DdROmHAnQJ1FDDex3W2xyOIM/xgL9Jg8UEqOIxxREFGlSaPbFe/nk5DrQzBwKmCc9jvxAQJALe9ZaKqPeZywh2aUa8huotTe5lj/iDeGdHOgxx4xkDK9ddzuSks1dbJQ/gHl8lA7MjOI6TvtgeLB9FOOvsi5EQ==-----END PRIVATE KEY-----";
var jsencrypt = new JSEncrypt();
jsencrypt.setPrivateKey(privateKey);
var iValue = jsencrypt.decrypt(cryptDataArr[1]);
var iKey = jsencrypt.decrypt(cryptDataArr[2]);
//console.log("cryptDataArr",cryptDataArr[0])
//console.log('iValue', iValue);
//console.log('iKey',iKey);
var options = {
iv: CryptoJS.enc.Utf8.parse(iValue),
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.Pkcs7
};
var tt = CryptoJS.AES.decrypt(cryptDataStr,CryptoJS.enc.Utf8.parse(iKey),options);
let srcs = ''
switch (typeof (tt)) {
case 'string':
srcs = CryptoJS.enc.Utf8.parse(tt)
break;
case 'object':
srcs = tt.toString(CryptoJS.enc.Utf8);
//srcs = CryptoJS.enc.Utf8.parse(tt.toString());
break;
default:
srcs = CryptoJS.enc.Utf8.parse(tt.toString())
}
var jsonData = JSON.parse(srcs);
console.log("typeof (tt) "+typeof (tt) +" srcs "+srcs)
return jsonData;
}
}else
{
console.log("cryptData underfined")
}
return null;
}
}
export const G_HttpHelper = HttpHelper.Instance;
| identifier_body | |
HttpHelper.ts | import {URL, DNS} from "../Config/config"
import { G_UserControl } from '../Controller/UserControl';
//import { context } from "../../../packVersion/ver_1_0.0.0/src/cocos2d-jsb";
import CryptoJS = require('../Common/CryptoJS')
import { JSEncrypt } from "../Common/jsencrypt";
import { G_RequestControl } from "../Controller/RequestControl";
class HttpHelper {
private lastTime = null;
private cd(path){
if(this.lastTime == null)
{
this.lastTime = new Date().getTime();
console.log("Time--------------------------->",path," ",0);
}else
{
let tt = new Date().getTime() - this.lastTime;
//this.lastTime = new Date().getTime();
console.log("Time--------------------------->",path," ",tt/1000);
}
}
public static readonly Instance : HttpHelper = new HttpHelper();
/**
* get请求
* @param {string} url
* @param {function} callback
*/
httpGet(path, callback) {
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
return;
}
let url = G_RequestControl.getConfig().getURL(path)
// cc.myGame.gameUi.onShowLockScreen();
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
if (xhr.readyState === 4 && xhr.status == 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}
else if (xhr.readyState === 4 && xhr.status == 401) {
callback({status:401});
}
}.bind(this);
let data = this.doEncode(url);
// xhr.withCredentials = true;
console.log("[HTTP>GET]:URL>>>>>>>>>>>>>>>>>",URL+url)
xhr.open('GET', URL + url, true);
// if (cc.sys.isNative) {
xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.setRequestHeader('Access-Control-Allow-Headers', 'Content-Type');
xhr.setRequestHeader('Access-Control-Allow-Methods', 'GET,POST,PUT,OPTIONS');
xhr.setRequestHeader('Access-Control-Allow-Credentials','true')
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'PUT,GET');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with,content-type');
// xhr.setRequestHeader("Content-Type", "application/json;charset=utf-8");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
xhr.setRequestHeader('JHReferer', DNS);
xhr.setRequestHeader('Authorization', 'Bearer '+G_UserControl.getUser().accessToken);
// xhr.setRequestHeader('Authorization', 'Bearer ' + "");
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
console.log("xhr "+xhr.status)
xhr.timeout = 5000;
xhr.send();
}
/**
* post请求
* @param {string} url
* @param {object} params
* @param {function} callback
*/
httpPost(path, params, callback) {
console.log("path ",path);
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
console.log("return ",path);
return;
}
console.log("nocdddddddddddddd ",path);
let url = G_RequestControl.getConfig().getURL(path)
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
if (xhr.readyState === 4 && xhr.status == 200) {
let respone = xhr.responseText;
let data = this.doDecode(xhr.responseText);
// let rsp = JSON.parse(data);
callback(data);
}else if(xhr.readyState === 3 && xhr.status != 200) {
let respone = xhr.responseText;
let data = this.doDecode(xhr.responseText);
// let rsp = JSON.parse(data);
callback(data);
}
//console.log('respone '+xhr.responseText);
}.bind(this);
console.log("[HTTP>POST]:URL>>>>>>>>>>>>>>>>>",URL+url," params "+JSON.stringify(params) )
xhr.open('POST', URL+url, true);
// if (cc.sys.isNative) {
// xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'GET, POST');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with');
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
//xhr.setRequestHeader("Accept", "application/json");
// xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
xhr.setRequestHeader('JHReferer', DNS);
if(G_UserControl.getUser().accessToken)
{
xhr.setRequestHeader('Authorization', 'Bearer ' + G_UserControl.getUser().accessToken);
}
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
xhr.timeout = 5000;// 8 seconds for timeout
//xhr.send(JSON.stringify(params));
if(params == null)
{
xhr.send();
}else
{
let data = {"data":this.doEncode(params)};
xhr.send(JSON.stringify(data));
}
}
/**
* put请求
* @param {string} url
* @param {object} params
* @param {function} callback
*/
httpPut(path, params, callback) {
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
return;
}
let url = G_RequestControl.getConfig().getURL(path)
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
console.log("xhr.readyState "+xhr.readyState + " xhr.status == 200 "+xhr.status)
if (xhr.readyState === 4 && xhr.status == 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}else if(xhr.readyState === 3 && xhr.status != 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}
//console.log('respone '+xhr.status);
}.bind(this);
console.log("[HTTP>POST]:URL>>>>>>>>>>>>>>>>>",URL+url," params "+JSON.stringify(params) )
xhr.open('PUT', URL+url, true);
// if (cc.sys.isNative) {
// xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.open('PUT', URL+url, true);
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'PUT,GET, POST');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with,content-type');
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
xhr.setRequestHeader('JHReferer', DNS);
xhr.setRequestHeader('Authorization', 'Bearer ' + G_UserControl.getUser().accessToken);
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
xhr.timeout = 5000;// 8 seconds for timeout
//xhr.send(JSON.stringify(params));
if(params == null)
{
xhr.send();
}else
{
let data = {"data":this.doEncode(params)};
xhr.send(JSON.stringify(data));
}
}
doEncode(data)
{
// //偏移量 由前端每次请求随机生成 16位
var IV = this.randomString(16);
// //AES加密KEY 由前端自己每次请求随机生成
var KEY = this.randomString(16);
var public_key = "-----BEGIN PUBLIC KEY-----MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCgy6JOupuDqE9itVQvGSBDJotBEJFASuklIwvcMNtXUH99PdihJ+TJN2AjNphzCdgL9KlguDG+u4C719DZOC3YrGn7Ps9vWOFtQYLzh69cGd+nlqOR4LKVSAYRn2NtrV9elAzBjie/Y7ITMsU9+ZTsccRqb+qd+OlBsYdg9dhvVQIDAQAB-----END PUBLIC KEY-----";
//加密后的数据 json 直接传递给后端
var encrypt_data = this.AES_encrypt(data,KEY,IV,public_key);
return encrypt_data;
}
//随机串
randomString(len) {
len = len || 32;
var $chars = 'ABCDEFGHJKMNPQRSTWXYZabcdefhijkmnprstwxyz2345678'; //默认去掉了容易混淆的字符oOLl,9gq,Vv,Uu,I1
var maxPos = $chars.length;
var pwd = '';
for (var i = 0; i < len; i++) {
pwd += $chars.charAt(Math.floor(Math.random() * maxPos)); | }
/**
* AES加密数组 传入参数为需要传递的数组JSON
*/
AES_encrypt(data,KEY,IV,pkcs8_public) {
console.log("typeof data ",typeof(data));
var key_utf8 = CryptoJS.enc.Utf8.parse(KEY);// 秘钥
var iv_utf8= CryptoJS.enc.Utf8.parse(IV);//向量iv
let srcs = ''
switch (typeof (data)) {
case 'string':
srcs = CryptoJS.enc.Utf8.parse(data)
break;
case 'object':
srcs = CryptoJS.enc.Utf8.parse(JSON.stringify(data))
break;
default:
srcs = CryptoJS.enc.Utf8.parse(data.toString())
}
//AES 加密
var encrypted = CryptoJS.AES.encrypt(srcs, key_utf8, { iv: iv_utf8, mode: CryptoJS.mode.CBC, padding: CryptoJS.pad.Pkcs7}).toString();
//RSA 加密 组包
return this.pack(encrypted,IV,KEY,pkcs8_public)
}
//组包
pack(encrypted,iv,key,pub_key) {
var jsencrypt = new JSEncrypt();
jsencrypt.setKey(pub_key);
var rsa_iv = jsencrypt.encrypt(iv);
var rsa_key = jsencrypt.encrypt(key);
var splitFlag = 'aesrsastart';
var res_data = encrypted+splitFlag+rsa_iv+splitFlag+rsa_key
return res_data
}
//解密
doDecode(responseData)
{
//console.log("解密 responseData "+responseData);
if(responseData == null)
{
return null;
}
var data = JSON.parse(responseData);
var cryptData = data.data;
if (cryptData != undefined) {
var cryptDataArr = cryptData.split("hDdoAPaXI3S");
if (cryptDataArr.length == 3) {
var cryptDataStr = cryptDataArr[0];
var privateKey = "-----BEGIN PRIVATE KEY-----MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAPTYUA2oNnnEwCM+firQEh3qtvhzy2sPcCCPBuk1ALN98ThFtwbsAIXn4iflC8cL74OxsW5LhVLqRaNJwrj19nUWRNg2V0UG0qiSMDoFQzcf14Tl3YEMVhHmhT60KEc/mcOkGp7BGFneNRkUrnAedUPaI18hHfwlOXCTBOXjsLEHAgMBAAECgYAOsZCUUTz7r8gMFWsC7Lu5meVjIafag/GpsouqoSiqnOtGAkEKpE0fvBvBYyiCyH+WOqq4QMX+hNqrAvkxmmkw3Zj6pqGIGBm8qP0sC7kV9l3+1GyNweBaPqnZs02Kb3WCZnw8h1NaJRR9uqXFITzLkNgxEOuq9oiQqmI9UmP7sQJBAP1qL2O32RS/i08lCHR1r/XQTF/0pkSPX+a6SEf25iewzKm5do8hOtSG7+zjOlOQwsGwCPuNovz5g8BPMv2juQ8CQQD3V78skMtTp+0c6WjVh5ORIkkYAyOnSfl3nigkQKCfGyiTwX1cm3GLTHkDHZBVJjFyz8U/ngZZbG8ScHZCMtiJAkEAroiApQxNXaXiu5rE7PjVPNa+k2P7U8LviQiJmc7pizKQcuDCUCfRzeg1vJBvbniIOkAUn7RYKiVrYXrqopgtbwJAd+zzpIgQDd+99+a0DdROmHAnQJ1FDDex3W2xyOIM/xgL9Jg8UEqOIxxREFGlSaPbFe/nk5DrQzBwKmCc9jvxAQJALe9ZaKqPeZywh2aUa8huotTe5lj/iDeGdHOgxx4xkDK9ddzuSks1dbJQ/gHl8lA7MjOI6TvtgeLB9FOOvsi5EQ==-----END PRIVATE KEY-----";
var jsencrypt = new JSEncrypt();
jsencrypt.setPrivateKey(privateKey);
var iValue = jsencrypt.decrypt(cryptDataArr[1]);
var iKey = jsencrypt.decrypt(cryptDataArr[2]);
//console.log("cryptDataArr",cryptDataArr[0])
//console.log('iValue', iValue);
//console.log('iKey',iKey);
var options = {
iv: CryptoJS.enc.Utf8.parse(iValue),
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.Pkcs7
};
var tt = CryptoJS.AES.decrypt(cryptDataStr,CryptoJS.enc.Utf8.parse(iKey),options);
let srcs = ''
switch (typeof (tt)) {
case 'string':
srcs = CryptoJS.enc.Utf8.parse(tt)
break;
case 'object':
srcs = tt.toString(CryptoJS.enc.Utf8);
//srcs = CryptoJS.enc.Utf8.parse(tt.toString());
break;
default:
srcs = CryptoJS.enc.Utf8.parse(tt.toString())
}
var jsonData = JSON.parse(srcs);
console.log("typeof (tt) "+typeof (tt) +" srcs "+srcs)
return jsonData;
}
}else
{
console.log("cryptData underfined")
}
return null;
}
}
export const G_HttpHelper = HttpHelper.Instance; | }
return pwd; | random_line_split |
HttpHelper.ts |
import {URL, DNS} from "../Config/config"
import { G_UserControl } from '../Controller/UserControl';
//import { context } from "../../../packVersion/ver_1_0.0.0/src/cocos2d-jsb";
import CryptoJS = require('../Common/CryptoJS')
import { JSEncrypt } from "../Common/jsencrypt";
import { G_RequestControl } from "../Controller/RequestControl";
class HttpHelper {
private lastTime = null;
private | (path){
if(this.lastTime == null)
{
this.lastTime = new Date().getTime();
console.log("Time--------------------------->",path," ",0);
}else
{
let tt = new Date().getTime() - this.lastTime;
//this.lastTime = new Date().getTime();
console.log("Time--------------------------->",path," ",tt/1000);
}
}
public static readonly Instance : HttpHelper = new HttpHelper();
/**
* get请求
* @param {string} url
* @param {function} callback
*/
httpGet(path, callback) {
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
return;
}
let url = G_RequestControl.getConfig().getURL(path)
// cc.myGame.gameUi.onShowLockScreen();
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
if (xhr.readyState === 4 && xhr.status == 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}
else if (xhr.readyState === 4 && xhr.status == 401) {
callback({status:401});
}
}.bind(this);
let data = this.doEncode(url);
// xhr.withCredentials = true;
console.log("[HTTP>GET]:URL>>>>>>>>>>>>>>>>>",URL+url)
xhr.open('GET', URL + url, true);
// if (cc.sys.isNative) {
xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.setRequestHeader('Access-Control-Allow-Headers', 'Content-Type');
xhr.setRequestHeader('Access-Control-Allow-Methods', 'GET,POST,PUT,OPTIONS');
xhr.setRequestHeader('Access-Control-Allow-Credentials','true')
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'PUT,GET');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with,content-type');
// xhr.setRequestHeader("Content-Type", "application/json;charset=utf-8");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
xhr.setRequestHeader('JHReferer', DNS);
xhr.setRequestHeader('Authorization', 'Bearer '+G_UserControl.getUser().accessToken);
// xhr.setRequestHeader('Authorization', 'Bearer ' + "");
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
console.log("xhr "+xhr.status)
xhr.timeout = 5000;
xhr.send();
}
/**
* post请求
* @param {string} url
* @param {object} params
* @param {function} callback
*/
httpPost(path, params, callback) {
console.log("path ",path);
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
console.log("return ",path);
return;
}
console.log("nocdddddddddddddd ",path);
let url = G_RequestControl.getConfig().getURL(path)
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
if (xhr.readyState === 4 && xhr.status == 200) {
let respone = xhr.responseText;
let data = this.doDecode(xhr.responseText);
// let rsp = JSON.parse(data);
callback(data);
}else if(xhr.readyState === 3 && xhr.status != 200) {
let respone = xhr.responseText;
let data = this.doDecode(xhr.responseText);
// let rsp = JSON.parse(data);
callback(data);
}
//console.log('respone '+xhr.responseText);
}.bind(this);
console.log("[HTTP>POST]:URL>>>>>>>>>>>>>>>>>",URL+url," params "+JSON.stringify(params) )
xhr.open('POST', URL+url, true);
// if (cc.sys.isNative) {
// xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'GET, POST');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with');
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
//xhr.setRequestHeader("Accept", "application/json");
// xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
xhr.setRequestHeader('JHReferer', DNS);
if(G_UserControl.getUser().accessToken)
{
xhr.setRequestHeader('Authorization', 'Bearer ' + G_UserControl.getUser().accessToken);
}
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
xhr.timeout = 5000;// 8 seconds for timeout
//xhr.send(JSON.stringify(params));
if(params == null)
{
xhr.send();
}else
{
let data = {"data":this.doEncode(params)};
xhr.send(JSON.stringify(data));
}
}
/**
* put请求
* @param {string} url
* @param {object} params
* @param {function} callback
*/
httpPut(path, params, callback) {
this.cd(path);
let isCD = G_RequestControl.getConfig().isCD(path)
if(isCD)
{
return;
}
let url = G_RequestControl.getConfig().getURL(path)
let xhr = cc.loader.getXMLHttpRequest();
xhr.onreadystatechange = function () {
console.log("xhr.readyState "+xhr.readyState + " xhr.status == 200 "+xhr.status)
if (xhr.readyState === 4 && xhr.status == 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}else if(xhr.readyState === 3 && xhr.status != 200) {
let data = this.doDecode(xhr.responseText);
callback(data);
}
//console.log('respone '+xhr.status);
}.bind(this);
console.log("[HTTP>POST]:URL>>>>>>>>>>>>>>>>>",URL+url," params "+JSON.stringify(params) )
xhr.open('PUT', URL+url, true);
// if (cc.sys.isNative) {
// xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.open('PUT', URL+url, true);
//xhr.setRequestHeader('Access-Control-Allow-Methods', 'PUT,GET, POST');
//xhr.setRequestHeader('Access-Control-Allow-Headers', 'x-requested-with,content-type');
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader('X-Requested-With','XMLHttpRequest');
xhr.setRequestHeader('JHReferer', DNS);
xhr.setRequestHeader('Authorization', 'Bearer ' + G_UserControl.getUser().accessToken);
// }
// note: In Internet Explorer, the timeout property may be set only after calling the open()
// method and before calling the send() method.
xhr.timeout = 5000;// 8 seconds for timeout
//xhr.send(JSON.stringify(params));
if(params == null)
{
xhr.send();
}else
{
let data = {"data":this.doEncode(params)};
xhr.send(JSON.stringify(data));
}
}
doEncode(data)
{
// //偏移量 由前端每次请求随机生成 16位
var IV = this.randomString(16);
// //AES加密KEY 由前端自己每次请求随机生成
var KEY = this.randomString(16);
var public_key = "-----BEGIN PUBLIC KEY-----MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCgy6JOupuDqE9itVQvGSBDJotBEJFASuklIwvcMNtXUH99PdihJ+TJN2AjNphzCdgL9KlguDG+u4C719DZOC3YrGn7Ps9vWOFtQYLzh69cGd+nlqOR4LKVSAYRn2NtrV9elAzBjie/Y7ITMsU9+ZTsccRqb+qd+OlBsYdg9dhvVQIDAQAB-----END PUBLIC KEY-----";
//加密后的数据 json 直接传递给后端
var encrypt_data = this.AES_encrypt(data,KEY,IV,public_key);
return encrypt_data;
}
//随机串
randomString(len) {
len = len || 32;
var $chars = 'ABCDEFGHJKMNPQRSTWXYZabcdefhijkmnprstwxyz2345678'; //默认去掉了容易混淆的字符oOLl,9gq,Vv,Uu,I1
var maxPos = $chars.length;
var pwd = '';
for (var i = 0; i < len; i++) {
pwd += $chars.charAt(Math.floor(Math.random() * maxPos));
}
return pwd;
}
/**
* AES加密数组 传入参数为需要传递的数组JSON
*/
AES_encrypt(data,KEY,IV,pkcs8_public) {
console.log("typeof data ",typeof(data));
var key_utf8 = CryptoJS.enc.Utf8.parse(KEY);// 秘钥
var iv_utf8= CryptoJS.enc.Utf8.parse(IV);//向量iv
let srcs = ''
switch (typeof (data)) {
case 'string':
srcs = CryptoJS.enc.Utf8.parse(data)
break;
case 'object':
srcs = CryptoJS.enc.Utf8.parse(JSON.stringify(data))
break;
default:
srcs = CryptoJS.enc.Utf8.parse(data.toString())
}
//AES 加密
var encrypted = CryptoJS.AES.encrypt(srcs, key_utf8, { iv: iv_utf8, mode: CryptoJS.mode.CBC, padding: CryptoJS.pad.Pkcs7}).toString();
//RSA 加密 组包
return this.pack(encrypted,IV,KEY,pkcs8_public)
}
//组包
pack(encrypted,iv,key,pub_key) {
var jsencrypt = new JSEncrypt();
jsencrypt.setKey(pub_key);
var rsa_iv = jsencrypt.encrypt(iv);
var rsa_key = jsencrypt.encrypt(key);
var splitFlag = 'aesrsastart';
var res_data = encrypted+splitFlag+rsa_iv+splitFlag+rsa_key
return res_data
}
//解密
doDecode(responseData)
{
//console.log("解密 responseData "+responseData);
if(responseData == null)
{
return null;
}
var data = JSON.parse(responseData);
var cryptData = data.data;
if (cryptData != undefined) {
var cryptDataArr = cryptData.split("hDdoAPaXI3S");
if (cryptDataArr.length == 3) {
var cryptDataStr = cryptDataArr[0];
var privateKey = "-----BEGIN PRIVATE KEY-----MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAPTYUA2oNnnEwCM+firQEh3qtvhzy2sPcCCPBuk1ALN98ThFtwbsAIXn4iflC8cL74OxsW5LhVLqRaNJwrj19nUWRNg2V0UG0qiSMDoFQzcf14Tl3YEMVhHmhT60KEc/mcOkGp7BGFneNRkUrnAedUPaI18hHfwlOXCTBOXjsLEHAgMBAAECgYAOsZCUUTz7r8gMFWsC7Lu5meVjIafag/GpsouqoSiqnOtGAkEKpE0fvBvBYyiCyH+WOqq4QMX+hNqrAvkxmmkw3Zj6pqGIGBm8qP0sC7kV9l3+1GyNweBaPqnZs02Kb3WCZnw8h1NaJRR9uqXFITzLkNgxEOuq9oiQqmI9UmP7sQJBAP1qL2O32RS/i08lCHR1r/XQTF/0pkSPX+a6SEf25iewzKm5do8hOtSG7+zjOlOQwsGwCPuNovz5g8BPMv2juQ8CQQD3V78skMtTp+0c6WjVh5ORIkkYAyOnSfl3nigkQKCfGyiTwX1cm3GLTHkDHZBVJjFyz8U/ngZZbG8ScHZCMtiJAkEAroiApQxNXaXiu5rE7PjVPNa+k2P7U8LviQiJmc7pizKQcuDCUCfRzeg1vJBvbniIOkAUn7RYKiVrYXrqopgtbwJAd+zzpIgQDd+99+a0DdROmHAnQJ1FDDex3W2xyOIM/xgL9Jg8UEqOIxxREFGlSaPbFe/nk5DrQzBwKmCc9jvxAQJALe9ZaKqPeZywh2aUa8huotTe5lj/iDeGdHOgxx4xkDK9ddzuSks1dbJQ/gHl8lA7MjOI6TvtgeLB9FOOvsi5EQ==-----END PRIVATE KEY-----";
var jsencrypt = new JSEncrypt();
jsencrypt.setPrivateKey(privateKey);
var iValue = jsencrypt.decrypt(cryptDataArr[1]);
var iKey = jsencrypt.decrypt(cryptDataArr[2]);
//console.log("cryptDataArr",cryptDataArr[0])
//console.log('iValue', iValue);
//console.log('iKey',iKey);
var options = {
iv: CryptoJS.enc.Utf8.parse(iValue),
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.Pkcs7
};
var tt = CryptoJS.AES.decrypt(cryptDataStr,CryptoJS.enc.Utf8.parse(iKey),options);
let srcs = ''
switch (typeof (tt)) {
case 'string':
srcs = CryptoJS.enc.Utf8.parse(tt)
break;
case 'object':
srcs = tt.toString(CryptoJS.enc.Utf8);
//srcs = CryptoJS.enc.Utf8.parse(tt.toString());
break;
default:
srcs = CryptoJS.enc.Utf8.parse(tt.toString())
}
var jsonData = JSON.parse(srcs);
console.log("typeof (tt) "+typeof (tt) +" srcs "+srcs)
return jsonData;
}
}else
{
console.log("cryptData underfined")
}
return null;
}
}
export const G_HttpHelper = HttpHelper.Instance;
| cd | identifier_name |
run_squad.py | """
Question Answering with XLNet
"""
# pylint:disable=redefined-outer-name,logging-format-interpolation
import os
import time
import argparse
import random
import logging
import warnings
import json
import collections
import pickle
import sys
import itertools
import subprocess
import multiprocessing as mp
from functools import partial
import numpy as np
import mxnet as mx
import gluonnlp as nlp
from gluonnlp.data import SQuAD
from gluonnlp.data.bert.glue import concat_sequences
from gluonnlp.data.bert.squad import get_doc_spans, \
check_is_max_context, convert_squad_examples, align_position2doc_spans
from gluonnlp.data.xlnet.squad import lcs_match, convert_index
from model.qa import XLNetForQA
from transformer import model
from xlnet_qa_evaluate import predict_extended
parser = argparse.ArgumentParser(description='XLNet QA example.'
'We fine-tune the XLNet model on SQuAD dataset.')
# I/O configuration
parser.add_argument('--sentencepiece', type=str, default=None,
help='Path to the sentencepiece .model file for both tokenization and vocab.')
parser.add_argument('--pretrained_xlnet_parameters', type=str, default=None,
help='Pre-trained bert model parameter file. default is None')
parser.add_argument('--load_pickle', action='store_true',
help='Whether do data preprocessing or load from pickled file')
parser.add_argument('--dev_dataset_file', default='./output_dir/out.dev', type=str,
help='Path to dev data features')
parser.add_argument('--train_dataset_file', default='./output_dir/out.train', type=str,
help='Path to train data features')
parser.add_argument('--model_parameters', type=str, default=None, help='Model parameter file')
parser.add_argument(
'--output_dir', type=str, default='./output_dir',
help='The output directory where the model params will be written.'
' default is ./output_dir')
# Training configuration
parser.add_argument('--seed', type=int, default=3, help='Random seed')
parser.add_argument('--version_2', action='store_true', help='Whether use SQuAD v2.0 dataset')
parser.add_argument('--model', type=str, default='xlnet_cased_l12_h768_a12',
choices=['xlnet_cased_l24_h1024_a16', 'xlnet_cased_l12_h768_a12'],
help='The name of pre-trained XLNet model to fine-tune')
parser.add_argument('--dataset', type=str, default='126gb', choices=['126gb'],
help='The dataset BERT pre-trained with. Currently only 126gb is available')
parser.add_argument(
'--uncased', action='store_true', help=
'if set, inputs are converted to lower case. Up to 01/04/2020, all released models are cased')
parser.add_argument('--gpu', type=int, default=None,
help='Number of gpus to use for finetuning. CPU is used if not set.')
parser.add_argument('--log_interval', type=int, default=10, help='report interval. default is 10')
parser.add_argument('--debug', action='store_true',
help='Run the example in test mode for sanity checks')
parser.add_argument('--only_predict', action='store_true', help='Whether to predict only.')
# Hyperparameters
parser.add_argument('--epochs', type=int, default=3, help='number of epochs, default is 3')
parser.add_argument(
'--training_steps', type=int, help='training steps. Note that epochs will be ignored '
'if training steps are set')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size. Number of examples per gpu in a minibatch. default is 32')
parser.add_argument('--test_batch_size', type=int, default=24,
help='Test batch size. default is 24')
parser.add_argument('--optimizer', type=str, default='bertadam',
help='optimization algorithm. default is bertadam')
parser.add_argument(
'--accumulate', type=int, default=None, help='The number of batches for '
'gradients accumulation to simulate large batch size. Default is None')
parser.add_argument('--lr', type=float, default=3e-5,
help='Initial learning rate. default is 5e-5')
parser.add_argument(
'--warmup_ratio', type=float, default=0,
help='ratio of warmup steps that linearly increase learning rate from '
'0 to target learning rate. default is 0')
parser.add_argument('--layerwise_decay', type=float, default=0.75, help='Layer-wise lr decay')
parser.add_argument('--wd', type=float, default=0.01, help='weight decay')
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--attention_dropout', type=float, default=0.1, help='attention dropout')
# Data pre/post processing
parser.add_argument(
'--max_seq_length', type=int, default=512,
help='The maximum total input sequence length after WordPiece tokenization.'
'Sequences longer than this will be truncated, and sequences shorter '
'than this will be padded. default is 512')
parser.add_argument(
'--doc_stride', type=int, default=128,
help='When splitting up a long document into chunks, how much stride to '
'take between chunks. default is 128')
parser.add_argument(
'--max_query_length', type=int, default=64,
help='The maximum number of tokens for the question. Questions longer than '
'this will be truncated to this length. default is 64')
parser.add_argument(
'--round_to', type=int, default=None,
help='The length of padded sequences will be rounded up to be multiple of this argument.'
'When round to is set to 8, training throughput may increase for mixed precision'
'training on GPUs with tensorcores.')
parser.add_argument('--start_top_n', type=int, default=5,
help='Number of start-position candidates')
parser.add_argument('--end_top_n', type=int, default=5,
help='Number of end-position candidates corresponding '
'to a start position')
parser.add_argument('--n_best_size', type=int, default=5, help='top N results written to file')
parser.add_argument(
'--max_answer_length', type=int, default=64,
help='The maximum length of an answer that can be generated. This is needed '
'because the start and end predictions are not conditioned on one another.'
' default is 64')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of workers used for data preprocessing')
parser.add_argument(
'--null_score_diff_threshold', type=float, default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.'
'Typical values are between -1.0 and -5.0. default is 0.0. '
'Note that a best value can be automatically found by the evaluation script')
args = parser.parse_args()
# random seed
np.random.seed(args.seed)
random.seed(args.seed)
mx.random.seed(args.seed)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# set the logger
log = logging.getLogger('gluonnlp')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(levelname)s:%(name)s:%(asctime)s %(message)s',
datefmt='%H:%M:%S')
fh = logging.FileHandler(os.path.join(args.output_dir, 'finetune_squad.log'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(fh)
log.info(args)
pretrained_xlnet_parameters = args.pretrained_xlnet_parameters
if pretrained_xlnet_parameters and args.model_parameters:
raise ValueError('Cannot provide both pre-trained BERT parameters and '
'BertForQA model parameters.')
ctx = [mx.cpu(0)] if not args.gpu else [mx.gpu(i) for i in range(args.gpu)]
log_interval = args.log_interval * args.accumulate if args.accumulate else args.log_interval
if args.accumulate:
log.info('Using gradient accumulation. Effective batch size = %d',
args.accumulate * args.batch_size)
if args.max_seq_length <= args.max_query_length + 3:
raise ValueError('The max_seq_length (%d) must be greater than max_query_length '
'(%d) + 3' % (args.max_seq_length, args.max_query_length))
get_pretrained = True
get_model_params = {
'name': args.model,
'dataset_name': args.dataset,
'pretrained': get_pretrained,
'ctx': ctx,
'use_decoder': False,
'dropout': args.dropout,
'attention_dropout': args.attention_dropout
}
# model, vocabulary and tokenizer
xlnet_base, vocab, tokenizer = model.get_model(**get_model_params)
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Stack('int32'), # example_id
nlp.data.batchify.Pad(axis=0, pad_val=vocab[vocab.padding_token], dtype='int32',
round_to=args.round_to), # input_ids
nlp.data.batchify.Pad(axis=0, pad_val=3, dtype='int32', round_to=args.round_to), # segment_ids
nlp.data.batchify.Stack('float32'), # valid_length
nlp.data.batchify.Pad(axis=0, pad_val=1, round_to=args.round_to), # p_mask
nlp.data.batchify.Stack('float32'), # start_position
nlp.data.batchify.Stack('float32'), # end_position
nlp.data.batchify.Stack('float32')) # is_impossible
if pretrained_xlnet_parameters:
# only load XLnetModel parameters
nlp.utils.load_parameters(xlnet_base, pretrained_xlnet_parameters, ctx=ctx, ignore_extra=True,
cast_dtype=True)
units = xlnet_base._net._units
net = XLNetForQA(xlnet_base=xlnet_base, start_top_n=args.start_top_n, end_top_n=args.end_top_n,
units=units)
net_eval = XLNetForQA(xlnet_base=xlnet_base, start_top_n=args.start_top_n,
end_top_n=args.end_top_n, units=units, is_eval=True,
params=net.collect_params())
initializer = mx.init.Normal(0.02)
if args.model_parameters:
# load complete XLNetForQA parameters
nlp.utils.load_parameters(net, args.model_parameters, ctx=ctx, cast_dtype=True)
else:
net.start_logits.initialize(init=initializer, ctx=ctx)
net.end_logits.initialize(init=initializer, ctx=ctx)
net.answer_class.initialize(init=initializer, ctx=ctx)
net.hybridize(static_alloc=True)
net_eval.hybridize(static_alloc=True)
SquadXLNetFeautre = collections.namedtuple('SquadXLNetFeautre', [
'example_id', 'qas_id', 'valid_length', 'tokens', 'tok_start_to_orig_index',
'tok_end_to_orig_index', 'token_is_max_context', 'input_ids', 'p_mask', 'segment_ids',
'start_position', 'end_position', 'paragraph_text', 'paragraph_len', 'is_impossible'
])
def convert_examples_to_features(example, tokenizer=None, cls_token=None, sep_token=None,
vocab=None, max_seq_length=384, doc_stride=128,
max_query_length=64, is_training=True):
"""convert the examples to the XLNet features"""
query_tokenized = tokenizer(example.question_text)[:max_query_length]
#tokenize paragraph and get start/end position of the answer in tokenized paragraph
paragraph_tokenized = tokenizer(example.paragraph_text)
chartok_to_tok_index = [] # char to its corresponding token's index
tok_start_to_chartok_index = [] # token index to its first character's index
tok_end_to_chartok_index = [] # token index to its last character's index
char_cnt = 0
for i, token in enumerate(paragraph_tokenized):
chartok_to_tok_index.extend([i] * len(token))
tok_start_to_chartok_index.append(char_cnt)
char_cnt += len(token)
tok_end_to_chartok_index.append(char_cnt - 1)
tok_cat_text = ''.join(paragraph_tokenized).replace(u'▁', ' ')
# XLNet takes a more complicated strategy to match the origin text
# and the tokenized tokens
# Get the LCS matching between origin text and token-concatenated text.
n, m = len(example.paragraph_text), len(tok_cat_text)
max_dist = abs(n - m) + 5
for _ in range(2):
f, g = lcs_match(max_dist, example.paragraph_text, tok_cat_text)
if f[n - 1, m - 1] > 0.8 * n:
break
max_dist *= 2
# Get the mapping from orgin text/tokenized text to tokenized text/origin text
orig_to_chartok_index = [None] * n
chartok_to_orig_index = [None] * m
i, j = n - 1, m - 1
while i >= 0 and j >= 0:
if (i, j) not in g:
break
if g[(i, j)] == 2:
orig_to_chartok_index[i] = j
chartok_to_orig_index[j] = i
i, j = i - 1, j - 1
elif g[(i, j)] == 1:
j = j - 1
else:
i = i - 1
# get start/end mapping
tok_start_to_orig_index = []
tok_end_to_orig_index = []
for i in range(len(paragraph_tokenized)): # for each token in the tokenized paragraph
start_chartok_pos = tok_start_to_chartok_index[i] # first character's index in origin text
end_chartok_pos = tok_end_to_chartok_index[i] # last character's index in origin text
start_orig_pos = convert_index(chartok_to_orig_index, start_chartok_pos, n, is_start=True)
end_orig_pos = convert_index(chartok_to_orig_index, end_chartok_pos, m, is_start=False)
tok_start_to_orig_index.append(start_orig_pos)
tok_end_to_orig_index.append(end_orig_pos)
tok_start_position, tok_end_position = -1, -1
# get mapped start/end position
if is_training and not example.is_impossible:
start_chartok_pos = convert_index(orig_to_chartok_index, example.start_offset,
is_start=True)
tok_start_position = chartok_to_tok_index[start_chartok_pos]
end_chartok_pos = convert_index(orig_to_chartok_index, example.end_offset, is_start=False)
tok_end_position = chartok_to_tok_index[end_chartok_pos]
assert tok_start_position <= tok_end_position
# get doc spans using sliding window
doc_spans, doc_spans_indices = get_doc_spans(paragraph_tokenized,
max_seq_length - len(query_tokenized) - 3,
doc_stride)
# record whether the tokens in a docspan have max context
token_is_max_context = [{
p: check_is_max_context(doc_spans_indices, i, p + doc_spans_indices[i][0])
for p in range(len(doc_span))
} for (i, doc_span) in enumerate(doc_spans)]
# get token -> origin text mapping
cur_tok_start_to_orig_index = [[tok_start_to_orig_index[p + st] for p in range(len(doc_span))]
for doc_span, (st, ed) in zip(doc_spans, doc_spans_indices)]
cur_tok_end_to_orig_index = [[tok_end_to_orig_index[p + st] for p in range(len(doc_span))]
for doc_span, (st, ed) in zip(doc_spans, doc_spans_indices)]
# get sequence features: tokens, segment_ids, p_masks
seq_features = [
concat_sequences([doc_span, query_tokenized], [[sep_token]] * 2 + [[cls_token]],
[[0] * len(doc_span), [1] * len(query_tokenized)], [[1], [1], [0]])
for doc_span in doc_spans
]
# get the start/end positions aligned to doc spans. If is_impossible or position out of span
# set position to cls_index, i.e., last token in the sequence.
if not example.is_impossible:
positions = [
align_position2doc_spans([tok_start_position, tok_end_position], doc_idx, offset=0,
default_value=len(seq[0]) - 1)
for (doc_idx, seq) in zip(doc_spans_indices, seq_features)
]
else:
positions = [(len(seq_feature[0]) - 1, len(seq_feature[0]) - 1)
for seq_feature in seq_features]
features = [
SquadXLNetFeautre(example_id=example.example_id, qas_id=example.qas_id,
tok_start_to_orig_index=t2st, tok_end_to_orig_index=t2ed,
valid_length=len(tokens), tokens=tokens, token_is_max_context=is_max,
input_ids=vocab[tokens], p_mask=p_mask, segment_ids=segment_ids,
start_position=start, end_position=end,
paragraph_text=example.paragraph_text, paragraph_len=len(tokens),
is_impossible=(start == len(tokens) - 1))
for (tokens, segment_ids, p_mask), (
start,
end), is_max, t2st, t2ed in zip(seq_features, positions, token_is_max_context,
cur_tok_start_to_orig_index, cur_tok_end_to_orig_index)
]
return features
def preprocess_dataset(tokenizer, dataset, vocab=None, max_seq_length=384, doc_stride=128,
max_query_length=64, num_workers=16, load_from_pickle=False,
feature_file=None, is_training=True):
"""Loads a dataset into features"""
vocab = tokenizer.vocab if vocab is None else vocab
trans = partial(convert_examples_to_features, tokenizer=tokenizer, cls_token=vocab.cls_token,
sep_token=vocab.sep_token, vocab=vocab, max_seq_length=max_seq_length,
doc_stride=doc_stride, max_query_length=max_query_length)
pool = mp.Pool(num_workers)
start = time.time()
if not load_from_pickle:
example_trans = partial(convert_squad_examples, is_training=is_training)
# convert the raw dataset into raw features
examples = pool.map(example_trans, dataset)
raw_features = list(map(trans, examples)) #pool.map(trans, examples)
if feature_file:
with open(feature_file, 'wb') as file:
pickle.dump(raw_features, file)
else:
assert feature_file, 'feature file should be provided.'
with open(feature_file, 'rb') as file:
raw_features = pickle.load(file)
end = time.time()
pool.close()
log.info('Done! Transform dataset costs %.2f seconds.', (end - start))
return raw_features
def convert_full_features_to_input_features(raw_features):
"""convert the full features into the input features"""
data_features = mx.gluon.data.SimpleDataset(list(itertools.chain.from_iterable(raw_features)))
data_features = data_features.transform(lambda *example: (
example[0], # example_id
example[7], # inputs_id
example[9], # segment_ids
example[2], # valid_length,
example[8], # p_mask
example[10], # start_position,
example[11], # end_position
example[14])) # is_impossible
return data_features
def split_array(arr, num_of_splits):
"""split an array into equal pieces"""
# TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7
size = arr.shape[0]
if size < num_of_splits:
return [arr[i:i + 1] for i in range(size)]
slice_len, rest = divmod(size, num_of_splits)
div_points = [0] + [(slice_len * index + min(index, rest) + slice_len + (index < rest))
for index in range(num_of_splits)]
slices = [arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)]
return slices
def split_and_load(arrs, _ctxs):
"""split and load arrays to a list of contexts"""
# TODO Replace split_array() with gluon.utils.split_data() once targeting MXNet 1.7
assert isinstance(arrs, (list, tuple))
# split and load
loaded_arrs = [[i.as_in_context(ctx) for i, ctx in zip(split_array(arr, len(_ctxs)), _ctxs)]
for arr in arrs]
return zip(*loaded_arrs)
def _apply_gradient_decay():
"""apply layer-wise gradient decay.
Note that the description in origin paper about layer-wise learning rate decay
is inaccurate. According to their implementation, they are actually performing
layer-wise gradient decay. Gradient decay and learning rate decay could be the
same by using standard SGD, but different by using Adaptive optimizer(e.g., Adam).
"""
parameter_not_included = ['seg_emb', 'query_key_bias', 'query_emb_bias', 'query_seg_bias']
num_layers = len(xlnet_base._net.transformer_cells)
for (i, layer_parameters) in enumerate(xlnet_base._net.transformer_cells):
layer_params = layer_parameters.collect_params()
for key, value in layer_params.items():
skip = False
for pn in parameter_not_included:
if pn in key:
skip = True
if skip:
continue
if value.grad_req != 'null':
for arr in value.list_grad():
arr *= args.layerwise_decay**(num_layers - i - 1)
def tr | :
"""Training function."""
segment = 'train'
log.info('Loading %s data...', segment)
# Note that for XLNet, the authors always use squad2 dataset for training
train_data = SQuAD(segment, version='2.0')
if args.debug:
sampled_data = [train_data[i] for i in range(100)]
train_data = mx.gluon.data.SimpleDataset(sampled_data)
log.info('Number of records in Train data: %s', len(train_data))
train_data_features = preprocess_dataset(
tokenizer, train_data, vocab=vocab, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, num_workers=args.num_workers,
max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,
feature_file=args.train_dataset_file)
train_data_input = convert_full_features_to_input_features(train_data_features)
log.info('The number of examples after preprocessing: %s', len(train_data_input))
train_dataloader = mx.gluon.data.DataLoader(train_data_input, batchify_fn=batchify_fn,
batch_size=args.batch_size, num_workers=4,
shuffle=True)
optimizer_params = {'learning_rate': args.lr, 'wd': args.wd}
try:
trainer = mx.gluon.Trainer(net.collect_params(), args.optimizer, optimizer_params,
update_on_kvstore=False)
except ValueError as _:
warnings.warn('AdamW optimizer is not found. Please consider upgrading to '
'mxnet>=1.5.0. Now the original Adam optimizer is used instead.')
trainer = mx.gluon.Trainer(net.collect_params(), 'bertadam', optimizer_params,
update_on_kvstore=False)
num_train_examples = len(train_data_input)
step_size = args.batch_size * args.accumulate if args.accumulate else args.batch_size
num_train_steps = int(num_train_examples / step_size * args.epochs)
epoch_number = args.epochs
if args.training_steps:
num_train_steps = args.training_steps
epoch_number = 100000
log.info('training steps=%d', num_train_steps)
num_warmup_steps = int(num_train_steps * args.warmup_ratio)
step_num = 0
def set_new_lr(step_num, batch_id):
"""set new learning rate"""
# set grad to zero for gradient accumulation
if args.accumulate:
if batch_id % args.accumulate == 0:
net.collect_params().zero_grad()
step_num += 1
else:
step_num += 1
# learning rate schedule
# Notice that this learning rate scheduler is adapted from traditional linear learning
# rate scheduler where step_num >= num_warmup_steps, new_lr = 1 - step_num/num_train_steps
if step_num < num_warmup_steps:
new_lr = args.lr * step_num / num_warmup_steps
else:
offset = (step_num - num_warmup_steps) * args.lr / \
(num_train_steps - num_warmup_steps)
new_lr = args.lr - offset
trainer.set_learning_rate(new_lr)
return step_num
# Do not apply weight decay on LayerNorm and bias terms
for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
# Collect differentiable parameters
params = [p for p in net.collect_params().values() if p.grad_req != 'null']
# Set grad_req if gradient accumulation is required
if args.accumulate:
for p in params:
p.grad_req = 'add'
epoch_tic = time.time()
total_num = 0
log_num = 0
finish_flag = False
for epoch_id in range(epoch_number):
step_loss = 0.0
step_loss_span = 0
step_loss_cls = 0
tic = time.time()
if finish_flag:
break
for batch_id, data in enumerate(train_dataloader):
# set new lr
step_num = set_new_lr(step_num, batch_id)
data_list = list(split_and_load(data, ctx))
# forward and backward
batch_loss = []
batch_loss_sep = []
with mx.autograd.record():
for splited_data in data_list:
_, inputs, token_types, valid_length, p_mask, start_label, end_label, is_impossible = splited_data # pylint: disable=line-too-long
valid_length = valid_length.astype('float32')
log_num += len(inputs)
total_num += len(inputs)
out_sep, out = net(
inputs,
token_types,
valid_length,
[start_label, end_label],
p_mask=p_mask, # pylint: disable=line-too-long
is_impossible=is_impossible)
ls = out.mean() / len(ctx)
batch_loss_sep.append(out_sep)
batch_loss.append(ls)
if args.accumulate:
ls = ls / args.accumulate
ls.backward()
# update
if not args.accumulate or (batch_id + 1) % args.accumulate == 0:
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(params, 1)
_apply_gradient_decay()
trainer.update(1, ignore_stale_grad=True)
step_loss_sep_tmp = np.array(
[[span_ls.mean().asscalar(),
cls_ls.mean().asscalar()] for span_ls, cls_ls in batch_loss_sep])
step_loss_sep_tmp = list(np.sum(step_loss_sep_tmp, axis=0))
step_loss_span += step_loss_sep_tmp[0] / len(ctx)
step_loss_cls += step_loss_sep_tmp[1] / len(ctx)
step_loss += sum([ls.asscalar() for ls in batch_loss])
if (batch_id + 1) % log_interval == 0:
toc = time.time()
log.info(
'Epoch: %d, Batch: %d/%d, Loss=%.4f, lr=%.7f '
'Time cost=%.1f Thoughput=%.2f samples/s', epoch_id + 1, batch_id + 1,
len(train_dataloader), step_loss / log_interval, trainer.learning_rate,
toc - tic, log_num / (toc - tic))
log.info('span_loss: %.4f, cls_loss: %.4f', step_loss_span / log_interval,
step_loss_cls / log_interval)
tic = time.time()
step_loss = 0.0
step_loss_span = 0
step_loss_cls = 0
log_num = 0
if step_num >= num_train_steps:
logging.info('Finish training step: %d', step_num)
finish_flag = True
break
epoch_toc = time.time()
log.info('Time cost=%.2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
version_prefix = 'squad2' if args.version_2 else 'squad1'
ckpt_name = 'model_{}_{}_{}.params'.format(args.model, version_prefix, epoch_id + 1)
params_saved = os.path.join(args.output_dir, ckpt_name)
nlp.utils.save_parameters(net, params_saved)
log.info('params saved in: %s', params_saved)
RawResultExtended = collections.namedtuple(
'RawResultExtended',
['start_top_log_probs', 'start_top_index', 'end_top_log_probs', 'end_top_index', 'cls_logits'])
def evaluate():
"""Evaluate the model on validation dataset.
"""
log.info('Loading dev data...')
if args.version_2:
dev_data = SQuAD('dev', version='2.0')
else:
dev_data = SQuAD('dev', version='1.1')
(_, _), (data_file_name, _) \
= dev_data._data_file[dev_data._version][dev_data._segment]
dev_data_path = os.path.join(dev_data._root, data_file_name)
if args.debug:
sampled_data = [dev_data[0], dev_data[1], dev_data[2]]
dev_data = mx.gluon.data.SimpleDataset(sampled_data)
log.info('Number of records in dev data: %d', len(dev_data))
dev_data_features = preprocess_dataset(
tokenizer, dev_data, vocab=vocab, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, num_workers=args.num_workers,
max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,
feature_file=args.dev_dataset_file)
dev_data_input = convert_full_features_to_input_features(dev_data_features)
log.info('The number of examples after preprocessing: %d', len(dev_data_input))
dev_dataloader = mx.gluon.data.DataLoader(dev_data_input, batchify_fn=batchify_fn,
num_workers=4, batch_size=args.test_batch_size,
shuffle=False, last_batch='keep')
log.info('start prediction')
all_results = collections.defaultdict(list)
epoch_tic = time.time()
total_num = 0
for (batch_id, data) in enumerate(dev_dataloader):
data_list = list(split_and_load(data, ctx))
for splited_data in data_list:
example_ids, inputs, token_types, valid_length, p_mask, _, _, _ = splited_data
total_num += len(inputs)
outputs = net_eval(inputs, token_types, valid_length, p_mask=p_mask)
example_ids = example_ids.asnumpy().tolist()
for c, example_ids in enumerate(example_ids):
result = RawResultExtended(start_top_log_probs=outputs[0][c].asnumpy().tolist(),
start_top_index=outputs[1][c].asnumpy().tolist(),
end_top_log_probs=outputs[2][c].asnumpy().tolist(),
end_top_index=outputs[3][c].asnumpy().tolist(),
cls_logits=outputs[4][c].asnumpy().tolist())
all_results[example_ids].append(result)
if batch_id % args.log_interval == 0:
log.info('Batch: %d/%d', batch_id + 1, len(dev_dataloader))
epoch_toc = time.time()
log.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
log.info('Get prediction results...')
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for features in dev_data_features:
results = all_results[features[0].example_id]
example_qas_id = features[0].qas_id
score_diff, best_non_null_entry, nbest_json = predict_extended(
features=features, results=results, n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length, start_n_top=args.start_top_n,
end_n_top=args.end_top_n)
scores_diff_json[example_qas_id] = score_diff
all_predictions[example_qas_id] = best_non_null_entry
all_nbest_json[example_qas_id] = nbest_json
output_prediction_file = os.path.join(args.output_dir, 'predictions.json')
output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')
output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds.json')
with open(output_prediction_file, 'w') as writer:
writer.write(json.dumps(all_predictions, indent=4) + '\n')
with open(output_nbest_file, 'w') as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + '\n')
with open(output_null_log_odds_file, 'w') as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + '\n')
if os.path.exists(sys.path[0] + '/evaluate-v2.0.py'):
arguments = [
dev_data_path, output_prediction_file, '--na-prob-thresh',
str(args.null_score_diff_threshold)
]
if args.version_2:
arguments += ['--na-prob-file', output_null_log_odds_file]
subprocess.call([sys.executable, sys.path[0] + '/evaluate-v2.0.py'] + arguments)
else:
log.info('Please download evaluate-v2.0.py to get evaluation results for SQuAD. '
'Check index.rst for the detail.')
if __name__ == '__main__':
if not args.only_predict:
train()
evaluate()
else:
evaluate()
| ain() | identifier_name |
run_squad.py | """
Question Answering with XLNet
"""
# pylint:disable=redefined-outer-name,logging-format-interpolation
import os
import time
import argparse
import random
import logging
import warnings
import json
import collections
import pickle
import sys
import itertools
import subprocess
import multiprocessing as mp
from functools import partial
import numpy as np
import mxnet as mx
import gluonnlp as nlp
from gluonnlp.data import SQuAD
from gluonnlp.data.bert.glue import concat_sequences
from gluonnlp.data.bert.squad import get_doc_spans, \
check_is_max_context, convert_squad_examples, align_position2doc_spans
from gluonnlp.data.xlnet.squad import lcs_match, convert_index
from model.qa import XLNetForQA
from transformer import model
from xlnet_qa_evaluate import predict_extended
parser = argparse.ArgumentParser(description='XLNet QA example.'
'We fine-tune the XLNet model on SQuAD dataset.')
# I/O configuration
parser.add_argument('--sentencepiece', type=str, default=None,
help='Path to the sentencepiece .model file for both tokenization and vocab.')
parser.add_argument('--pretrained_xlnet_parameters', type=str, default=None,
help='Pre-trained bert model parameter file. default is None')
parser.add_argument('--load_pickle', action='store_true',
help='Whether do data preprocessing or load from pickled file')
parser.add_argument('--dev_dataset_file', default='./output_dir/out.dev', type=str,
help='Path to dev data features')
parser.add_argument('--train_dataset_file', default='./output_dir/out.train', type=str,
help='Path to train data features')
parser.add_argument('--model_parameters', type=str, default=None, help='Model parameter file')
parser.add_argument(
'--output_dir', type=str, default='./output_dir',
help='The output directory where the model params will be written.'
' default is ./output_dir')
# Training configuration
parser.add_argument('--seed', type=int, default=3, help='Random seed')
parser.add_argument('--version_2', action='store_true', help='Whether use SQuAD v2.0 dataset')
parser.add_argument('--model', type=str, default='xlnet_cased_l12_h768_a12',
choices=['xlnet_cased_l24_h1024_a16', 'xlnet_cased_l12_h768_a12'],
help='The name of pre-trained XLNet model to fine-tune')
parser.add_argument('--dataset', type=str, default='126gb', choices=['126gb'],
help='The dataset BERT pre-trained with. Currently only 126gb is available')
parser.add_argument(
'--uncased', action='store_true', help=
'if set, inputs are converted to lower case. Up to 01/04/2020, all released models are cased')
parser.add_argument('--gpu', type=int, default=None,
help='Number of gpus to use for finetuning. CPU is used if not set.')
parser.add_argument('--log_interval', type=int, default=10, help='report interval. default is 10')
parser.add_argument('--debug', action='store_true',
help='Run the example in test mode for sanity checks')
parser.add_argument('--only_predict', action='store_true', help='Whether to predict only.')
# Hyperparameters
parser.add_argument('--epochs', type=int, default=3, help='number of epochs, default is 3')
parser.add_argument(
'--training_steps', type=int, help='training steps. Note that epochs will be ignored '
'if training steps are set')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size. Number of examples per gpu in a minibatch. default is 32')
parser.add_argument('--test_batch_size', type=int, default=24,
help='Test batch size. default is 24')
parser.add_argument('--optimizer', type=str, default='bertadam',
help='optimization algorithm. default is bertadam')
parser.add_argument(
'--accumulate', type=int, default=None, help='The number of batches for '
'gradients accumulation to simulate large batch size. Default is None')
parser.add_argument('--lr', type=float, default=3e-5,
help='Initial learning rate. default is 5e-5')
parser.add_argument(
'--warmup_ratio', type=float, default=0,
help='ratio of warmup steps that linearly increase learning rate from '
'0 to target learning rate. default is 0')
parser.add_argument('--layerwise_decay', type=float, default=0.75, help='Layer-wise lr decay')
parser.add_argument('--wd', type=float, default=0.01, help='weight decay')
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--attention_dropout', type=float, default=0.1, help='attention dropout')
# Data pre/post processing
parser.add_argument(
'--max_seq_length', type=int, default=512,
help='The maximum total input sequence length after WordPiece tokenization.'
'Sequences longer than this will be truncated, and sequences shorter '
'than this will be padded. default is 512')
parser.add_argument(
'--doc_stride', type=int, default=128,
help='When splitting up a long document into chunks, how much stride to '
'take between chunks. default is 128')
parser.add_argument(
'--max_query_length', type=int, default=64,
help='The maximum number of tokens for the question. Questions longer than '
'this will be truncated to this length. default is 64')
parser.add_argument(
'--round_to', type=int, default=None,
help='The length of padded sequences will be rounded up to be multiple of this argument.'
'When round to is set to 8, training throughput may increase for mixed precision'
'training on GPUs with tensorcores.')
parser.add_argument('--start_top_n', type=int, default=5,
help='Number of start-position candidates')
parser.add_argument('--end_top_n', type=int, default=5,
help='Number of end-position candidates corresponding '
'to a start position')
parser.add_argument('--n_best_size', type=int, default=5, help='top N results written to file')
parser.add_argument(
'--max_answer_length', type=int, default=64,
help='The maximum length of an answer that can be generated. This is needed '
'because the start and end predictions are not conditioned on one another.'
' default is 64')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of workers used for data preprocessing')
parser.add_argument(
'--null_score_diff_threshold', type=float, default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.'
'Typical values are between -1.0 and -5.0. default is 0.0. '
'Note that a best value can be automatically found by the evaluation script')
args = parser.parse_args()
# random seed
np.random.seed(args.seed)
random.seed(args.seed)
mx.random.seed(args.seed)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# set the logger
log = logging.getLogger('gluonnlp')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(levelname)s:%(name)s:%(asctime)s %(message)s',
datefmt='%H:%M:%S')
fh = logging.FileHandler(os.path.join(args.output_dir, 'finetune_squad.log'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(fh)
log.info(args)
pretrained_xlnet_parameters = args.pretrained_xlnet_parameters
if pretrained_xlnet_parameters and args.model_parameters:
raise ValueError('Cannot provide both pre-trained BERT parameters and '
'BertForQA model parameters.')
ctx = [mx.cpu(0)] if not args.gpu else [mx.gpu(i) for i in range(args.gpu)]
log_interval = args.log_interval * args.accumulate if args.accumulate else args.log_interval
if args.accumulate:
log.info('Using gradient accumulation. Effective batch size = %d',
args.accumulate * args.batch_size)
if args.max_seq_length <= args.max_query_length + 3:
raise ValueError('The max_seq_length (%d) must be greater than max_query_length '
'(%d) + 3' % (args.max_seq_length, args.max_query_length))
get_pretrained = True
get_model_params = {
'name': args.model,
'dataset_name': args.dataset,
'pretrained': get_pretrained,
'ctx': ctx,
'use_decoder': False,
'dropout': args.dropout,
'attention_dropout': args.attention_dropout
}
# model, vocabulary and tokenizer
xlnet_base, vocab, tokenizer = model.get_model(**get_model_params)
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Stack('int32'), # example_id
nlp.data.batchify.Pad(axis=0, pad_val=vocab[vocab.padding_token], dtype='int32',
round_to=args.round_to), # input_ids
nlp.data.batchify.Pad(axis=0, pad_val=3, dtype='int32', round_to=args.round_to), # segment_ids
nlp.data.batchify.Stack('float32'), # valid_length
nlp.data.batchify.Pad(axis=0, pad_val=1, round_to=args.round_to), # p_mask
nlp.data.batchify.Stack('float32'), # start_position
nlp.data.batchify.Stack('float32'), # end_position
nlp.data.batchify.Stack('float32')) # is_impossible
if pretrained_xlnet_parameters:
# only load XLnetModel parameters
nlp.utils.load_parameters(xlnet_base, pretrained_xlnet_parameters, ctx=ctx, ignore_extra=True,
cast_dtype=True)
units = xlnet_base._net._units
net = XLNetForQA(xlnet_base=xlnet_base, start_top_n=args.start_top_n, end_top_n=args.end_top_n,
units=units)
net_eval = XLNetForQA(xlnet_base=xlnet_base, start_top_n=args.start_top_n,
end_top_n=args.end_top_n, units=units, is_eval=True,
params=net.collect_params())
initializer = mx.init.Normal(0.02)
if args.model_parameters:
# load complete XLNetForQA parameters
nlp.utils.load_parameters(net, args.model_parameters, ctx=ctx, cast_dtype=True)
else:
net.start_logits.initialize(init=initializer, ctx=ctx)
net.end_logits.initialize(init=initializer, ctx=ctx)
net.answer_class.initialize(init=initializer, ctx=ctx)
net.hybridize(static_alloc=True)
net_eval.hybridize(static_alloc=True)
SquadXLNetFeautre = collections.namedtuple('SquadXLNetFeautre', [
'example_id', 'qas_id', 'valid_length', 'tokens', 'tok_start_to_orig_index',
'tok_end_to_orig_index', 'token_is_max_context', 'input_ids', 'p_mask', 'segment_ids',
'start_position', 'end_position', 'paragraph_text', 'paragraph_len', 'is_impossible'
])
def convert_examples_to_features(example, tokenizer=None, cls_token=None, sep_token=None,
vocab=None, max_seq_length=384, doc_stride=128,
max_query_length=64, is_training=True):
"""convert the examples to the XLNet features"""
query_tokenized = tokenizer(example.question_text)[:max_query_length]
#tokenize paragraph and get start/end position of the answer in tokenized paragraph
paragraph_tokenized = tokenizer(example.paragraph_text)
chartok_to_tok_index = [] # char to its corresponding token's index
tok_start_to_chartok_index = [] # token index to its first character's index
tok_end_to_chartok_index = [] # token index to its last character's index
char_cnt = 0
for i, token in enumerate(paragraph_tokenized):
chartok_to_tok_index.extend([i] * len(token))
tok_start_to_chartok_index.append(char_cnt)
char_cnt += len(token)
tok_end_to_chartok_index.append(char_cnt - 1)
tok_cat_text = ''.join(paragraph_tokenized).replace(u'▁', ' ')
# XLNet takes a more complicated strategy to match the origin text
# and the tokenized tokens
# Get the LCS matching between origin text and token-concatenated text.
n, m = len(example.paragraph_text), len(tok_cat_text)
max_dist = abs(n - m) + 5
for _ in range(2):
f, g = lcs_match(max_dist, example.paragraph_text, tok_cat_text)
if f[n - 1, m - 1] > 0.8 * n:
break
max_dist *= 2
# Get the mapping from orgin text/tokenized text to tokenized text/origin text
orig_to_chartok_index = [None] * n
chartok_to_orig_index = [None] * m
i, j = n - 1, m - 1
while i >= 0 and j >= 0:
if (i, j) not in g:
break
if g[(i, j)] == 2:
orig_to_chartok_index[i] = j
chartok_to_orig_index[j] = i
i, j = i - 1, j - 1
elif g[(i, j)] == 1:
j = j - 1
else:
i = i - 1
# get start/end mapping
tok_start_to_orig_index = []
tok_end_to_orig_index = []
for i in range(len(paragraph_tokenized)): # for each token in the tokenized paragraph
start_chartok_pos = tok_start_to_chartok_index[i] # first character's index in origin text
end_chartok_pos = tok_end_to_chartok_index[i] # last character's index in origin text
start_orig_pos = convert_index(chartok_to_orig_index, start_chartok_pos, n, is_start=True)
end_orig_pos = convert_index(chartok_to_orig_index, end_chartok_pos, m, is_start=False)
tok_start_to_orig_index.append(start_orig_pos)
tok_end_to_orig_index.append(end_orig_pos)
tok_start_position, tok_end_position = -1, -1
# get mapped start/end position
if is_training and not example.is_impossible:
start_chartok_pos = convert_index(orig_to_chartok_index, example.start_offset,
is_start=True)
tok_start_position = chartok_to_tok_index[start_chartok_pos]
end_chartok_pos = convert_index(orig_to_chartok_index, example.end_offset, is_start=False)
tok_end_position = chartok_to_tok_index[end_chartok_pos]
assert tok_start_position <= tok_end_position
# get doc spans using sliding window
doc_spans, doc_spans_indices = get_doc_spans(paragraph_tokenized,
max_seq_length - len(query_tokenized) - 3,
doc_stride)
# record whether the tokens in a docspan have max context
token_is_max_context = [{
p: check_is_max_context(doc_spans_indices, i, p + doc_spans_indices[i][0])
for p in range(len(doc_span))
} for (i, doc_span) in enumerate(doc_spans)]
# get token -> origin text mapping
cur_tok_start_to_orig_index = [[tok_start_to_orig_index[p + st] for p in range(len(doc_span))]
for doc_span, (st, ed) in zip(doc_spans, doc_spans_indices)]
cur_tok_end_to_orig_index = [[tok_end_to_orig_index[p + st] for p in range(len(doc_span))]
for doc_span, (st, ed) in zip(doc_spans, doc_spans_indices)]
# get sequence features: tokens, segment_ids, p_masks
seq_features = [
concat_sequences([doc_span, query_tokenized], [[sep_token]] * 2 + [[cls_token]],
[[0] * len(doc_span), [1] * len(query_tokenized)], [[1], [1], [0]])
for doc_span in doc_spans
]
# get the start/end positions aligned to doc spans. If is_impossible or position out of span
# set position to cls_index, i.e., last token in the sequence.
if not example.is_impossible:
positions = [
align_position2doc_spans([tok_start_position, tok_end_position], doc_idx, offset=0,
default_value=len(seq[0]) - 1)
for (doc_idx, seq) in zip(doc_spans_indices, seq_features)
]
else:
positions = [(len(seq_feature[0]) - 1, len(seq_feature[0]) - 1)
for seq_feature in seq_features]
features = [
SquadXLNetFeautre(example_id=example.example_id, qas_id=example.qas_id,
tok_start_to_orig_index=t2st, tok_end_to_orig_index=t2ed,
valid_length=len(tokens), tokens=tokens, token_is_max_context=is_max,
input_ids=vocab[tokens], p_mask=p_mask, segment_ids=segment_ids,
start_position=start, end_position=end,
paragraph_text=example.paragraph_text, paragraph_len=len(tokens),
is_impossible=(start == len(tokens) - 1))
for (tokens, segment_ids, p_mask), (
start,
end), is_max, t2st, t2ed in zip(seq_features, positions, token_is_max_context,
cur_tok_start_to_orig_index, cur_tok_end_to_orig_index)
]
return features
def preprocess_dataset(tokenizer, dataset, vocab=None, max_seq_length=384, doc_stride=128,
max_query_length=64, num_workers=16, load_from_pickle=False,
feature_file=None, is_training=True):
"""Loads a dataset into features"""
vocab = tokenizer.vocab if vocab is None else vocab
trans = partial(convert_examples_to_features, tokenizer=tokenizer, cls_token=vocab.cls_token,
sep_token=vocab.sep_token, vocab=vocab, max_seq_length=max_seq_length,
doc_stride=doc_stride, max_query_length=max_query_length)
pool = mp.Pool(num_workers)
start = time.time()
if not load_from_pickle:
example_trans = partial(convert_squad_examples, is_training=is_training)
# convert the raw dataset into raw features
examples = pool.map(example_trans, dataset)
raw_features = list(map(trans, examples)) #pool.map(trans, examples)
if feature_file:
with open(feature_file, 'wb') as file:
pickle.dump(raw_features, file)
else:
assert feature_file, 'feature file should be provided.'
with open(feature_file, 'rb') as file:
raw_features = pickle.load(file)
end = time.time()
pool.close()
log.info('Done! Transform dataset costs %.2f seconds.', (end - start))
return raw_features
def convert_full_features_to_input_features(raw_features):
"""convert the full features into the input features"""
data_features = mx.gluon.data.SimpleDataset(list(itertools.chain.from_iterable(raw_features)))
data_features = data_features.transform(lambda *example: (
example[0], # example_id
example[7], # inputs_id
example[9], # segment_ids
example[2], # valid_length,
example[8], # p_mask
example[10], # start_position,
example[11], # end_position
example[14])) # is_impossible
return data_features
def split_array(arr, num_of_splits):
"" |
def split_and_load(arrs, _ctxs):
"""split and load arrays to a list of contexts"""
# TODO Replace split_array() with gluon.utils.split_data() once targeting MXNet 1.7
assert isinstance(arrs, (list, tuple))
# split and load
loaded_arrs = [[i.as_in_context(ctx) for i, ctx in zip(split_array(arr, len(_ctxs)), _ctxs)]
for arr in arrs]
return zip(*loaded_arrs)
def _apply_gradient_decay():
"""apply layer-wise gradient decay.
Note that the description in origin paper about layer-wise learning rate decay
is inaccurate. According to their implementation, they are actually performing
layer-wise gradient decay. Gradient decay and learning rate decay could be the
same by using standard SGD, but different by using Adaptive optimizer(e.g., Adam).
"""
parameter_not_included = ['seg_emb', 'query_key_bias', 'query_emb_bias', 'query_seg_bias']
num_layers = len(xlnet_base._net.transformer_cells)
for (i, layer_parameters) in enumerate(xlnet_base._net.transformer_cells):
layer_params = layer_parameters.collect_params()
for key, value in layer_params.items():
skip = False
for pn in parameter_not_included:
if pn in key:
skip = True
if skip:
continue
if value.grad_req != 'null':
for arr in value.list_grad():
arr *= args.layerwise_decay**(num_layers - i - 1)
def train():
"""Training function."""
segment = 'train'
log.info('Loading %s data...', segment)
# Note that for XLNet, the authors always use squad2 dataset for training
train_data = SQuAD(segment, version='2.0')
if args.debug:
sampled_data = [train_data[i] for i in range(100)]
train_data = mx.gluon.data.SimpleDataset(sampled_data)
log.info('Number of records in Train data: %s', len(train_data))
train_data_features = preprocess_dataset(
tokenizer, train_data, vocab=vocab, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, num_workers=args.num_workers,
max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,
feature_file=args.train_dataset_file)
train_data_input = convert_full_features_to_input_features(train_data_features)
log.info('The number of examples after preprocessing: %s', len(train_data_input))
train_dataloader = mx.gluon.data.DataLoader(train_data_input, batchify_fn=batchify_fn,
batch_size=args.batch_size, num_workers=4,
shuffle=True)
optimizer_params = {'learning_rate': args.lr, 'wd': args.wd}
try:
trainer = mx.gluon.Trainer(net.collect_params(), args.optimizer, optimizer_params,
update_on_kvstore=False)
except ValueError as _:
warnings.warn('AdamW optimizer is not found. Please consider upgrading to '
'mxnet>=1.5.0. Now the original Adam optimizer is used instead.')
trainer = mx.gluon.Trainer(net.collect_params(), 'bertadam', optimizer_params,
update_on_kvstore=False)
num_train_examples = len(train_data_input)
step_size = args.batch_size * args.accumulate if args.accumulate else args.batch_size
num_train_steps = int(num_train_examples / step_size * args.epochs)
epoch_number = args.epochs
if args.training_steps:
num_train_steps = args.training_steps
epoch_number = 100000
log.info('training steps=%d', num_train_steps)
num_warmup_steps = int(num_train_steps * args.warmup_ratio)
step_num = 0
def set_new_lr(step_num, batch_id):
"""set new learning rate"""
# set grad to zero for gradient accumulation
if args.accumulate:
if batch_id % args.accumulate == 0:
net.collect_params().zero_grad()
step_num += 1
else:
step_num += 1
# learning rate schedule
# Notice that this learning rate scheduler is adapted from traditional linear learning
# rate scheduler where step_num >= num_warmup_steps, new_lr = 1 - step_num/num_train_steps
if step_num < num_warmup_steps:
new_lr = args.lr * step_num / num_warmup_steps
else:
offset = (step_num - num_warmup_steps) * args.lr / \
(num_train_steps - num_warmup_steps)
new_lr = args.lr - offset
trainer.set_learning_rate(new_lr)
return step_num
# Do not apply weight decay on LayerNorm and bias terms
for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
# Collect differentiable parameters
params = [p for p in net.collect_params().values() if p.grad_req != 'null']
# Set grad_req if gradient accumulation is required
if args.accumulate:
for p in params:
p.grad_req = 'add'
epoch_tic = time.time()
total_num = 0
log_num = 0
finish_flag = False
for epoch_id in range(epoch_number):
step_loss = 0.0
step_loss_span = 0
step_loss_cls = 0
tic = time.time()
if finish_flag:
break
for batch_id, data in enumerate(train_dataloader):
# set new lr
step_num = set_new_lr(step_num, batch_id)
data_list = list(split_and_load(data, ctx))
# forward and backward
batch_loss = []
batch_loss_sep = []
with mx.autograd.record():
for splited_data in data_list:
_, inputs, token_types, valid_length, p_mask, start_label, end_label, is_impossible = splited_data # pylint: disable=line-too-long
valid_length = valid_length.astype('float32')
log_num += len(inputs)
total_num += len(inputs)
out_sep, out = net(
inputs,
token_types,
valid_length,
[start_label, end_label],
p_mask=p_mask, # pylint: disable=line-too-long
is_impossible=is_impossible)
ls = out.mean() / len(ctx)
batch_loss_sep.append(out_sep)
batch_loss.append(ls)
if args.accumulate:
ls = ls / args.accumulate
ls.backward()
# update
if not args.accumulate or (batch_id + 1) % args.accumulate == 0:
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(params, 1)
_apply_gradient_decay()
trainer.update(1, ignore_stale_grad=True)
step_loss_sep_tmp = np.array(
[[span_ls.mean().asscalar(),
cls_ls.mean().asscalar()] for span_ls, cls_ls in batch_loss_sep])
step_loss_sep_tmp = list(np.sum(step_loss_sep_tmp, axis=0))
step_loss_span += step_loss_sep_tmp[0] / len(ctx)
step_loss_cls += step_loss_sep_tmp[1] / len(ctx)
step_loss += sum([ls.asscalar() for ls in batch_loss])
if (batch_id + 1) % log_interval == 0:
toc = time.time()
log.info(
'Epoch: %d, Batch: %d/%d, Loss=%.4f, lr=%.7f '
'Time cost=%.1f Thoughput=%.2f samples/s', epoch_id + 1, batch_id + 1,
len(train_dataloader), step_loss / log_interval, trainer.learning_rate,
toc - tic, log_num / (toc - tic))
log.info('span_loss: %.4f, cls_loss: %.4f', step_loss_span / log_interval,
step_loss_cls / log_interval)
tic = time.time()
step_loss = 0.0
step_loss_span = 0
step_loss_cls = 0
log_num = 0
if step_num >= num_train_steps:
logging.info('Finish training step: %d', step_num)
finish_flag = True
break
epoch_toc = time.time()
log.info('Time cost=%.2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
version_prefix = 'squad2' if args.version_2 else 'squad1'
ckpt_name = 'model_{}_{}_{}.params'.format(args.model, version_prefix, epoch_id + 1)
params_saved = os.path.join(args.output_dir, ckpt_name)
nlp.utils.save_parameters(net, params_saved)
log.info('params saved in: %s', params_saved)
RawResultExtended = collections.namedtuple(
'RawResultExtended',
['start_top_log_probs', 'start_top_index', 'end_top_log_probs', 'end_top_index', 'cls_logits'])
def evaluate():
"""Evaluate the model on validation dataset.
"""
log.info('Loading dev data...')
if args.version_2:
dev_data = SQuAD('dev', version='2.0')
else:
dev_data = SQuAD('dev', version='1.1')
(_, _), (data_file_name, _) \
= dev_data._data_file[dev_data._version][dev_data._segment]
dev_data_path = os.path.join(dev_data._root, data_file_name)
if args.debug:
sampled_data = [dev_data[0], dev_data[1], dev_data[2]]
dev_data = mx.gluon.data.SimpleDataset(sampled_data)
log.info('Number of records in dev data: %d', len(dev_data))
dev_data_features = preprocess_dataset(
tokenizer, dev_data, vocab=vocab, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, num_workers=args.num_workers,
max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,
feature_file=args.dev_dataset_file)
dev_data_input = convert_full_features_to_input_features(dev_data_features)
log.info('The number of examples after preprocessing: %d', len(dev_data_input))
dev_dataloader = mx.gluon.data.DataLoader(dev_data_input, batchify_fn=batchify_fn,
num_workers=4, batch_size=args.test_batch_size,
shuffle=False, last_batch='keep')
log.info('start prediction')
all_results = collections.defaultdict(list)
epoch_tic = time.time()
total_num = 0
for (batch_id, data) in enumerate(dev_dataloader):
data_list = list(split_and_load(data, ctx))
for splited_data in data_list:
example_ids, inputs, token_types, valid_length, p_mask, _, _, _ = splited_data
total_num += len(inputs)
outputs = net_eval(inputs, token_types, valid_length, p_mask=p_mask)
example_ids = example_ids.asnumpy().tolist()
for c, example_ids in enumerate(example_ids):
result = RawResultExtended(start_top_log_probs=outputs[0][c].asnumpy().tolist(),
start_top_index=outputs[1][c].asnumpy().tolist(),
end_top_log_probs=outputs[2][c].asnumpy().tolist(),
end_top_index=outputs[3][c].asnumpy().tolist(),
cls_logits=outputs[4][c].asnumpy().tolist())
all_results[example_ids].append(result)
if batch_id % args.log_interval == 0:
log.info('Batch: %d/%d', batch_id + 1, len(dev_dataloader))
epoch_toc = time.time()
log.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
log.info('Get prediction results...')
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for features in dev_data_features:
results = all_results[features[0].example_id]
example_qas_id = features[0].qas_id
score_diff, best_non_null_entry, nbest_json = predict_extended(
features=features, results=results, n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length, start_n_top=args.start_top_n,
end_n_top=args.end_top_n)
scores_diff_json[example_qas_id] = score_diff
all_predictions[example_qas_id] = best_non_null_entry
all_nbest_json[example_qas_id] = nbest_json
output_prediction_file = os.path.join(args.output_dir, 'predictions.json')
output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')
output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds.json')
with open(output_prediction_file, 'w') as writer:
writer.write(json.dumps(all_predictions, indent=4) + '\n')
with open(output_nbest_file, 'w') as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + '\n')
with open(output_null_log_odds_file, 'w') as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + '\n')
if os.path.exists(sys.path[0] + '/evaluate-v2.0.py'):
arguments = [
dev_data_path, output_prediction_file, '--na-prob-thresh',
str(args.null_score_diff_threshold)
]
if args.version_2:
arguments += ['--na-prob-file', output_null_log_odds_file]
subprocess.call([sys.executable, sys.path[0] + '/evaluate-v2.0.py'] + arguments)
else:
log.info('Please download evaluate-v2.0.py to get evaluation results for SQuAD. '
'Check index.rst for the detail.')
if __name__ == '__main__':
if not args.only_predict:
train()
evaluate()
else:
evaluate()
| "split an array into equal pieces"""
# TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7
size = arr.shape[0]
if size < num_of_splits:
return [arr[i:i + 1] for i in range(size)]
slice_len, rest = divmod(size, num_of_splits)
div_points = [0] + [(slice_len * index + min(index, rest) + slice_len + (index < rest))
for index in range(num_of_splits)]
slices = [arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)]
return slices
| identifier_body |
run_squad.py | """
Question Answering with XLNet
"""
# pylint:disable=redefined-outer-name,logging-format-interpolation
import os
import time
import argparse
import random
import logging
import warnings
import json
import collections
import pickle
import sys
import itertools
import subprocess
import multiprocessing as mp
from functools import partial
import numpy as np
import mxnet as mx
import gluonnlp as nlp
from gluonnlp.data import SQuAD
from gluonnlp.data.bert.glue import concat_sequences
from gluonnlp.data.bert.squad import get_doc_spans, \
check_is_max_context, convert_squad_examples, align_position2doc_spans
from gluonnlp.data.xlnet.squad import lcs_match, convert_index
from model.qa import XLNetForQA
from transformer import model
from xlnet_qa_evaluate import predict_extended
parser = argparse.ArgumentParser(description='XLNet QA example.'
'We fine-tune the XLNet model on SQuAD dataset.')
# I/O configuration
parser.add_argument('--sentencepiece', type=str, default=None,
help='Path to the sentencepiece .model file for both tokenization and vocab.')
parser.add_argument('--pretrained_xlnet_parameters', type=str, default=None,
help='Pre-trained bert model parameter file. default is None')
parser.add_argument('--load_pickle', action='store_true',
help='Whether do data preprocessing or load from pickled file')
parser.add_argument('--dev_dataset_file', default='./output_dir/out.dev', type=str,
help='Path to dev data features')
parser.add_argument('--train_dataset_file', default='./output_dir/out.train', type=str,
help='Path to train data features')
parser.add_argument('--model_parameters', type=str, default=None, help='Model parameter file')
parser.add_argument(
'--output_dir', type=str, default='./output_dir',
help='The output directory where the model params will be written.'
' default is ./output_dir')
# Training configuration
parser.add_argument('--seed', type=int, default=3, help='Random seed')
parser.add_argument('--version_2', action='store_true', help='Whether use SQuAD v2.0 dataset')
parser.add_argument('--model', type=str, default='xlnet_cased_l12_h768_a12',
choices=['xlnet_cased_l24_h1024_a16', 'xlnet_cased_l12_h768_a12'],
help='The name of pre-trained XLNet model to fine-tune')
parser.add_argument('--dataset', type=str, default='126gb', choices=['126gb'],
help='The dataset BERT pre-trained with. Currently only 126gb is available')
parser.add_argument(
'--uncased', action='store_true', help=
'if set, inputs are converted to lower case. Up to 01/04/2020, all released models are cased')
parser.add_argument('--gpu', type=int, default=None,
help='Number of gpus to use for finetuning. CPU is used if not set.')
parser.add_argument('--log_interval', type=int, default=10, help='report interval. default is 10')
parser.add_argument('--debug', action='store_true',
help='Run the example in test mode for sanity checks')
parser.add_argument('--only_predict', action='store_true', help='Whether to predict only.')
# Hyperparameters
parser.add_argument('--epochs', type=int, default=3, help='number of epochs, default is 3')
parser.add_argument(
'--training_steps', type=int, help='training steps. Note that epochs will be ignored '
'if training steps are set')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size. Number of examples per gpu in a minibatch. default is 32')
parser.add_argument('--test_batch_size', type=int, default=24,
help='Test batch size. default is 24')
parser.add_argument('--optimizer', type=str, default='bertadam',
help='optimization algorithm. default is bertadam')
parser.add_argument(
'--accumulate', type=int, default=None, help='The number of batches for '
'gradients accumulation to simulate large batch size. Default is None')
parser.add_argument('--lr', type=float, default=3e-5,
help='Initial learning rate. default is 5e-5')
parser.add_argument(
'--warmup_ratio', type=float, default=0,
help='ratio of warmup steps that linearly increase learning rate from '
'0 to target learning rate. default is 0')
parser.add_argument('--layerwise_decay', type=float, default=0.75, help='Layer-wise lr decay')
parser.add_argument('--wd', type=float, default=0.01, help='weight decay')
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--attention_dropout', type=float, default=0.1, help='attention dropout')
# Data pre/post processing
parser.add_argument(
'--max_seq_length', type=int, default=512,
help='The maximum total input sequence length after WordPiece tokenization.'
'Sequences longer than this will be truncated, and sequences shorter '
'than this will be padded. default is 512')
parser.add_argument(
'--doc_stride', type=int, default=128,
help='When splitting up a long document into chunks, how much stride to '
'take between chunks. default is 128')
parser.add_argument(
'--max_query_length', type=int, default=64,
help='The maximum number of tokens for the question. Questions longer than '
'this will be truncated to this length. default is 64')
parser.add_argument(
'--round_to', type=int, default=None,
help='The length of padded sequences will be rounded up to be multiple of this argument.'
'When round to is set to 8, training throughput may increase for mixed precision'
'training on GPUs with tensorcores.')
parser.add_argument('--start_top_n', type=int, default=5,
help='Number of start-position candidates')
parser.add_argument('--end_top_n', type=int, default=5,
help='Number of end-position candidates corresponding '
'to a start position')
parser.add_argument('--n_best_size', type=int, default=5, help='top N results written to file')
parser.add_argument(
'--max_answer_length', type=int, default=64,
help='The maximum length of an answer that can be generated. This is needed '
'because the start and end predictions are not conditioned on one another.'
' default is 64')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of workers used for data preprocessing')
parser.add_argument(
'--null_score_diff_threshold', type=float, default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.'
'Typical values are between -1.0 and -5.0. default is 0.0. '
'Note that a best value can be automatically found by the evaluation script')
args = parser.parse_args()
# random seed
np.random.seed(args.seed)
random.seed(args.seed)
mx.random.seed(args.seed)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# set the logger
log = logging.getLogger('gluonnlp')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(levelname)s:%(name)s:%(asctime)s %(message)s',
datefmt='%H:%M:%S')
fh = logging.FileHandler(os.path.join(args.output_dir, 'finetune_squad.log'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(fh)
log.info(args)
pretrained_xlnet_parameters = args.pretrained_xlnet_parameters
if pretrained_xlnet_parameters and args.model_parameters:
raise ValueError('Cannot provide both pre-trained BERT parameters and '
'BertForQA model parameters.')
ctx = [mx.cpu(0)] if not args.gpu else [mx.gpu(i) for i in range(args.gpu)]
log_interval = args.log_interval * args.accumulate if args.accumulate else args.log_interval
if args.accumulate:
log.info('Using gradient accumulation. Effective batch size = %d',
args.accumulate * args.batch_size)
if args.max_seq_length <= args.max_query_length + 3:
raise ValueError('The max_seq_length (%d) must be greater than max_query_length '
'(%d) + 3' % (args.max_seq_length, args.max_query_length))
get_pretrained = True
get_model_params = {
'name': args.model,
'dataset_name': args.dataset,
'pretrained': get_pretrained,
'ctx': ctx,
'use_decoder': False,
'dropout': args.dropout,
'attention_dropout': args.attention_dropout
}
# model, vocabulary and tokenizer
xlnet_base, vocab, tokenizer = model.get_model(**get_model_params)
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Stack('int32'), # example_id
nlp.data.batchify.Pad(axis=0, pad_val=vocab[vocab.padding_token], dtype='int32',
round_to=args.round_to), # input_ids
nlp.data.batchify.Pad(axis=0, pad_val=3, dtype='int32', round_to=args.round_to), # segment_ids
nlp.data.batchify.Stack('float32'), # valid_length
nlp.data.batchify.Pad(axis=0, pad_val=1, round_to=args.round_to), # p_mask
nlp.data.batchify.Stack('float32'), # start_position
nlp.data.batchify.Stack('float32'), # end_position
nlp.data.batchify.Stack('float32')) # is_impossible
if pretrained_xlnet_parameters:
# only load XLnetModel parameters
nlp.utils.load_parameters(xlnet_base, pretrained_xlnet_parameters, ctx=ctx, ignore_extra=True,
cast_dtype=True)
units = xlnet_base._net._units
net = XLNetForQA(xlnet_base=xlnet_base, start_top_n=args.start_top_n, end_top_n=args.end_top_n,
units=units)
net_eval = XLNetForQA(xlnet_base=xlnet_base, start_top_n=args.start_top_n,
end_top_n=args.end_top_n, units=units, is_eval=True,
params=net.collect_params())
initializer = mx.init.Normal(0.02)
if args.model_parameters:
# load complete XLNetForQA parameters
nlp.utils.load_parameters(net, args.model_parameters, ctx=ctx, cast_dtype=True)
else:
net.start_logits.initialize(init=initializer, ctx=ctx)
net.end_logits.initialize(init=initializer, ctx=ctx)
net.answer_class.initialize(init=initializer, ctx=ctx)
net.hybridize(static_alloc=True)
net_eval.hybridize(static_alloc=True)
SquadXLNetFeautre = collections.namedtuple('SquadXLNetFeautre', [
'example_id', 'qas_id', 'valid_length', 'tokens', 'tok_start_to_orig_index',
'tok_end_to_orig_index', 'token_is_max_context', 'input_ids', 'p_mask', 'segment_ids',
'start_position', 'end_position', 'paragraph_text', 'paragraph_len', 'is_impossible'
])
def convert_examples_to_features(example, tokenizer=None, cls_token=None, sep_token=None,
vocab=None, max_seq_length=384, doc_stride=128,
max_query_length=64, is_training=True):
"""convert the examples to the XLNet features"""
query_tokenized = tokenizer(example.question_text)[:max_query_length]
#tokenize paragraph and get start/end position of the answer in tokenized paragraph
paragraph_tokenized = tokenizer(example.paragraph_text)
chartok_to_tok_index = [] # char to its corresponding token's index
tok_start_to_chartok_index = [] # token index to its first character's index
tok_end_to_chartok_index = [] # token index to its last character's index
char_cnt = 0
for i, token in enumerate(paragraph_tokenized):
chartok_to_tok_index.extend([i] * len(token))
tok_start_to_chartok_index.append(char_cnt)
char_cnt += len(token)
tok_end_to_chartok_index.append(char_cnt - 1)
tok_cat_text = ''.join(paragraph_tokenized).replace(u'▁', ' ')
# XLNet takes a more complicated strategy to match the origin text
# and the tokenized tokens
# Get the LCS matching between origin text and token-concatenated text.
n, m = len(example.paragraph_text), len(tok_cat_text)
max_dist = abs(n - m) + 5
for _ in range(2):
f, g = lcs_match(max_dist, example.paragraph_text, tok_cat_text)
if f[n - 1, m - 1] > 0.8 * n:
break
max_dist *= 2
# Get the mapping from orgin text/tokenized text to tokenized text/origin text
orig_to_chartok_index = [None] * n
chartok_to_orig_index = [None] * m
i, j = n - 1, m - 1
while i >= 0 and j >= 0:
if (i, j) not in g:
break
if g[(i, j)] == 2:
orig_to_chartok_index[i] = j
chartok_to_orig_index[j] = i
i, j = i - 1, j - 1
elif g[(i, j)] == 1:
j = j - 1
else:
i = i - 1
# get start/end mapping
tok_start_to_orig_index = []
tok_end_to_orig_index = []
for i in range(len(paragraph_tokenized)): # for each token in the tokenized paragraph
start_chartok_pos = tok_start_to_chartok_index[i] # first character's index in origin text
end_chartok_pos = tok_end_to_chartok_index[i] # last character's index in origin text
start_orig_pos = convert_index(chartok_to_orig_index, start_chartok_pos, n, is_start=True)
end_orig_pos = convert_index(chartok_to_orig_index, end_chartok_pos, m, is_start=False)
tok_start_to_orig_index.append(start_orig_pos)
tok_end_to_orig_index.append(end_orig_pos)
tok_start_position, tok_end_position = -1, -1
# get mapped start/end position
if is_training and not example.is_impossible:
start_chartok_pos = convert_index(orig_to_chartok_index, example.start_offset,
is_start=True)
tok_start_position = chartok_to_tok_index[start_chartok_pos]
end_chartok_pos = convert_index(orig_to_chartok_index, example.end_offset, is_start=False)
tok_end_position = chartok_to_tok_index[end_chartok_pos]
assert tok_start_position <= tok_end_position
# get doc spans using sliding window
doc_spans, doc_spans_indices = get_doc_spans(paragraph_tokenized,
max_seq_length - len(query_tokenized) - 3,
doc_stride)
# record whether the tokens in a docspan have max context
token_is_max_context = [{
p: check_is_max_context(doc_spans_indices, i, p + doc_spans_indices[i][0])
for p in range(len(doc_span))
} for (i, doc_span) in enumerate(doc_spans)]
# get token -> origin text mapping
cur_tok_start_to_orig_index = [[tok_start_to_orig_index[p + st] for p in range(len(doc_span))]
for doc_span, (st, ed) in zip(doc_spans, doc_spans_indices)]
cur_tok_end_to_orig_index = [[tok_end_to_orig_index[p + st] for p in range(len(doc_span))]
for doc_span, (st, ed) in zip(doc_spans, doc_spans_indices)]
# get sequence features: tokens, segment_ids, p_masks
seq_features = [
concat_sequences([doc_span, query_tokenized], [[sep_token]] * 2 + [[cls_token]],
[[0] * len(doc_span), [1] * len(query_tokenized)], [[1], [1], [0]])
for doc_span in doc_spans
]
# get the start/end positions aligned to doc spans. If is_impossible or position out of span
# set position to cls_index, i.e., last token in the sequence.
if not example.is_impossible:
positions = [
align_position2doc_spans([tok_start_position, tok_end_position], doc_idx, offset=0,
default_value=len(seq[0]) - 1)
for (doc_idx, seq) in zip(doc_spans_indices, seq_features)
]
else:
positions = [(len(seq_feature[0]) - 1, len(seq_feature[0]) - 1)
for seq_feature in seq_features]
features = [
SquadXLNetFeautre(example_id=example.example_id, qas_id=example.qas_id,
tok_start_to_orig_index=t2st, tok_end_to_orig_index=t2ed,
valid_length=len(tokens), tokens=tokens, token_is_max_context=is_max,
input_ids=vocab[tokens], p_mask=p_mask, segment_ids=segment_ids,
start_position=start, end_position=end,
paragraph_text=example.paragraph_text, paragraph_len=len(tokens),
is_impossible=(start == len(tokens) - 1))
for (tokens, segment_ids, p_mask), (
start,
end), is_max, t2st, t2ed in zip(seq_features, positions, token_is_max_context,
cur_tok_start_to_orig_index, cur_tok_end_to_orig_index)
]
return features
def preprocess_dataset(tokenizer, dataset, vocab=None, max_seq_length=384, doc_stride=128,
max_query_length=64, num_workers=16, load_from_pickle=False,
feature_file=None, is_training=True):
"""Loads a dataset into features"""
vocab = tokenizer.vocab if vocab is None else vocab
trans = partial(convert_examples_to_features, tokenizer=tokenizer, cls_token=vocab.cls_token,
sep_token=vocab.sep_token, vocab=vocab, max_seq_length=max_seq_length,
doc_stride=doc_stride, max_query_length=max_query_length)
pool = mp.Pool(num_workers)
start = time.time()
if not load_from_pickle:
example_trans = partial(convert_squad_examples, is_training=is_training)
# convert the raw dataset into raw features
examples = pool.map(example_trans, dataset)
raw_features = list(map(trans, examples)) #pool.map(trans, examples)
if feature_file:
with open(feature_file, 'wb') as file:
pickle.dump(raw_features, file)
else:
assert feature_file, 'feature file should be provided.'
with open(feature_file, 'rb') as file:
raw_features = pickle.load(file)
end = time.time()
pool.close()
log.info('Done! Transform dataset costs %.2f seconds.', (end - start))
return raw_features
def convert_full_features_to_input_features(raw_features):
"""convert the full features into the input features"""
data_features = mx.gluon.data.SimpleDataset(list(itertools.chain.from_iterable(raw_features)))
data_features = data_features.transform(lambda *example: (
example[0], # example_id
example[7], # inputs_id
example[9], # segment_ids
example[2], # valid_length,
example[8], # p_mask
example[10], # start_position,
example[11], # end_position
example[14])) # is_impossible
return data_features
def split_array(arr, num_of_splits):
"""split an array into equal pieces"""
# TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7
size = arr.shape[0]
if size < num_of_splits:
return [arr[i:i + 1] for i in range(size)]
slice_len, rest = divmod(size, num_of_splits)
div_points = [0] + [(slice_len * index + min(index, rest) + slice_len + (index < rest))
for index in range(num_of_splits)]
slices = [arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)]
return slices
def split_and_load(arrs, _ctxs):
"""split and load arrays to a list of contexts"""
# TODO Replace split_array() with gluon.utils.split_data() once targeting MXNet 1.7
assert isinstance(arrs, (list, tuple))
# split and load
loaded_arrs = [[i.as_in_context(ctx) for i, ctx in zip(split_array(arr, len(_ctxs)), _ctxs)]
for arr in arrs]
return zip(*loaded_arrs)
def _apply_gradient_decay():
"""apply layer-wise gradient decay.
Note that the description in origin paper about layer-wise learning rate decay
is inaccurate. According to their implementation, they are actually performing
layer-wise gradient decay. Gradient decay and learning rate decay could be the
same by using standard SGD, but different by using Adaptive optimizer(e.g., Adam).
"""
parameter_not_included = ['seg_emb', 'query_key_bias', 'query_emb_bias', 'query_seg_bias']
num_layers = len(xlnet_base._net.transformer_cells)
for (i, layer_parameters) in enumerate(xlnet_base._net.transformer_cells):
layer_params = layer_parameters.collect_params()
for key, value in layer_params.items():
skip = False
for pn in parameter_not_included:
if pn in key:
skip = True
if skip:
continue
if value.grad_req != 'null':
for arr in value.list_grad():
arr *= args.layerwise_decay**(num_layers - i - 1)
def train():
"""Training function."""
segment = 'train'
log.info('Loading %s data...', segment)
# Note that for XLNet, the authors always use squad2 dataset for training
train_data = SQuAD(segment, version='2.0')
if args.debug:
sampled_data = [train_data[i] for i in range(100)]
train_data = mx.gluon.data.SimpleDataset(sampled_data)
log.info('Number of records in Train data: %s', len(train_data))
train_data_features = preprocess_dataset(
tokenizer, train_data, vocab=vocab, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, num_workers=args.num_workers,
max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,
feature_file=args.train_dataset_file)
train_data_input = convert_full_features_to_input_features(train_data_features)
log.info('The number of examples after preprocessing: %s', len(train_data_input))
train_dataloader = mx.gluon.data.DataLoader(train_data_input, batchify_fn=batchify_fn,
batch_size=args.batch_size, num_workers=4,
shuffle=True)
optimizer_params = {'learning_rate': args.lr, 'wd': args.wd}
try:
trainer = mx.gluon.Trainer(net.collect_params(), args.optimizer, optimizer_params,
update_on_kvstore=False)
except ValueError as _:
warnings.warn('AdamW optimizer is not found. Please consider upgrading to '
'mxnet>=1.5.0. Now the original Adam optimizer is used instead.')
trainer = mx.gluon.Trainer(net.collect_params(), 'bertadam', optimizer_params,
update_on_kvstore=False)
num_train_examples = len(train_data_input)
step_size = args.batch_size * args.accumulate if args.accumulate else args.batch_size
num_train_steps = int(num_train_examples / step_size * args.epochs)
epoch_number = args.epochs
if args.training_steps:
num_train_steps = args.training_steps
epoch_number = 100000
log.info('training steps=%d', num_train_steps)
num_warmup_steps = int(num_train_steps * args.warmup_ratio)
step_num = 0
def set_new_lr(step_num, batch_id):
"""set new learning rate"""
# set grad to zero for gradient accumulation
if args.accumulate:
if batch_id % args.accumulate == 0:
net.collect_params().zero_grad()
step_num += 1
else:
step_num += 1
# learning rate schedule
# Notice that this learning rate scheduler is adapted from traditional linear learning
# rate scheduler where step_num >= num_warmup_steps, new_lr = 1 - step_num/num_train_steps
if step_num < num_warmup_steps:
new_lr = args.lr * step_num / num_warmup_steps
else:
offset = (step_num - num_warmup_steps) * args.lr / \
(num_train_steps - num_warmup_steps)
new_lr = args.lr - offset
trainer.set_learning_rate(new_lr)
return step_num
# Do not apply weight decay on LayerNorm and bias terms
for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
# Collect differentiable parameters
params = [p for p in net.collect_params().values() if p.grad_req != 'null']
# Set grad_req if gradient accumulation is required
if args.accumulate:
for p in params:
p.grad_req = 'add'
epoch_tic = time.time()
total_num = 0
log_num = 0
finish_flag = False
for epoch_id in range(epoch_number):
step_loss = 0.0
step_loss_span = 0
step_loss_cls = 0
tic = time.time()
if finish_flag:
break
for batch_id, data in enumerate(train_dataloader):
# set new lr
step_num = set_new_lr(step_num, batch_id)
data_list = list(split_and_load(data, ctx))
# forward and backward
batch_loss = []
batch_loss_sep = []
with mx.autograd.record():
for splited_data in data_list:
_, inputs, token_types, valid_length, p_mask, start_label, end_label, is_impossible = splited_data # pylint: disable=line-too-long
valid_length = valid_length.astype('float32')
log_num += len(inputs)
total_num += len(inputs)
out_sep, out = net(
inputs,
token_types,
valid_length,
[start_label, end_label],
p_mask=p_mask, # pylint: disable=line-too-long
is_impossible=is_impossible)
ls = out.mean() / len(ctx)
batch_loss_sep.append(out_sep)
batch_loss.append(ls)
if args.accumulate:
ls = ls / args.accumulate
ls.backward()
# update
if not args.accumulate or (batch_id + 1) % args.accumulate == 0:
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(params, 1)
_apply_gradient_decay()
trainer.update(1, ignore_stale_grad=True)
step_loss_sep_tmp = np.array(
[[span_ls.mean().asscalar(),
cls_ls.mean().asscalar()] for span_ls, cls_ls in batch_loss_sep])
step_loss_sep_tmp = list(np.sum(step_loss_sep_tmp, axis=0))
step_loss_span += step_loss_sep_tmp[0] / len(ctx)
step_loss_cls += step_loss_sep_tmp[1] / len(ctx)
step_loss += sum([ls.asscalar() for ls in batch_loss])
if (batch_id + 1) % log_interval == 0:
toc = time.time()
log.info(
'Epoch: %d, Batch: %d/%d, Loss=%.4f, lr=%.7f '
'Time cost=%.1f Thoughput=%.2f samples/s', epoch_id + 1, batch_id + 1,
len(train_dataloader), step_loss / log_interval, trainer.learning_rate,
toc - tic, log_num / (toc - tic))
log.info('span_loss: %.4f, cls_loss: %.4f', step_loss_span / log_interval,
step_loss_cls / log_interval)
tic = time.time()
step_loss = 0.0
step_loss_span = 0
step_loss_cls = 0
log_num = 0
if step_num >= num_train_steps:
logging.info('Finish training step: %d', step_num)
finish_flag = True
break
epoch_toc = time.time()
log.info('Time cost=%.2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
version_prefix = 'squad2' if args.version_2 else 'squad1'
ckpt_name = 'model_{}_{}_{}.params'.format(args.model, version_prefix, epoch_id + 1)
params_saved = os.path.join(args.output_dir, ckpt_name)
nlp.utils.save_parameters(net, params_saved)
log.info('params saved in: %s', params_saved)
RawResultExtended = collections.namedtuple(
'RawResultExtended',
['start_top_log_probs', 'start_top_index', 'end_top_log_probs', 'end_top_index', 'cls_logits'])
def evaluate():
"""Evaluate the model on validation dataset.
"""
log.info('Loading dev data...')
if args.version_2:
dev_data = SQuAD('dev', version='2.0')
else:
dev_data = SQuAD('dev', version='1.1')
(_, _), (data_file_name, _) \
= dev_data._data_file[dev_data._version][dev_data._segment]
dev_data_path = os.path.join(dev_data._root, data_file_name)
if args.debug:
sampled_data = [dev_data[0], dev_data[1], dev_data[2]]
dev_data = mx.gluon.data.SimpleDataset(sampled_data)
log.info('Number of records in dev data: %d', len(dev_data))
dev_data_features = preprocess_dataset(
tokenizer, dev_data, vocab=vocab, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, num_workers=args.num_workers,
max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,
feature_file=args.dev_dataset_file)
dev_data_input = convert_full_features_to_input_features(dev_data_features)
log.info('The number of examples after preprocessing: %d', len(dev_data_input))
dev_dataloader = mx.gluon.data.DataLoader(dev_data_input, batchify_fn=batchify_fn,
num_workers=4, batch_size=args.test_batch_size,
shuffle=False, last_batch='keep')
log.info('start prediction')
all_results = collections.defaultdict(list)
epoch_tic = time.time()
total_num = 0
for (batch_id, data) in enumerate(dev_dataloader):
data_list = list(split_and_load(data, ctx))
for splited_data in data_list:
example_ids, inputs, token_types, valid_length, p_mask, _, _, _ = splited_data
total_num += len(inputs)
outputs = net_eval(inputs, token_types, valid_length, p_mask=p_mask)
example_ids = example_ids.asnumpy().tolist()
for c, example_ids in enumerate(example_ids):
result = RawResultExtended(start_top_log_probs=outputs[0][c].asnumpy().tolist(),
start_top_index=outputs[1][c].asnumpy().tolist(),
end_top_log_probs=outputs[2][c].asnumpy().tolist(),
end_top_index=outputs[3][c].asnumpy().tolist(),
cls_logits=outputs[4][c].asnumpy().tolist())
all_results[example_ids].append(result)
if batch_id % args.log_interval == 0:
log.info('Batch: %d/%d', batch_id + 1, len(dev_dataloader))
epoch_toc = time.time()
log.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
log.info('Get prediction results...')
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict() | for features in dev_data_features:
results = all_results[features[0].example_id]
example_qas_id = features[0].qas_id
score_diff, best_non_null_entry, nbest_json = predict_extended(
features=features, results=results, n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length, start_n_top=args.start_top_n,
end_n_top=args.end_top_n)
scores_diff_json[example_qas_id] = score_diff
all_predictions[example_qas_id] = best_non_null_entry
all_nbest_json[example_qas_id] = nbest_json
output_prediction_file = os.path.join(args.output_dir, 'predictions.json')
output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')
output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds.json')
with open(output_prediction_file, 'w') as writer:
writer.write(json.dumps(all_predictions, indent=4) + '\n')
with open(output_nbest_file, 'w') as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + '\n')
with open(output_null_log_odds_file, 'w') as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + '\n')
if os.path.exists(sys.path[0] + '/evaluate-v2.0.py'):
arguments = [
dev_data_path, output_prediction_file, '--na-prob-thresh',
str(args.null_score_diff_threshold)
]
if args.version_2:
arguments += ['--na-prob-file', output_null_log_odds_file]
subprocess.call([sys.executable, sys.path[0] + '/evaluate-v2.0.py'] + arguments)
else:
log.info('Please download evaluate-v2.0.py to get evaluation results for SQuAD. '
'Check index.rst for the detail.')
if __name__ == '__main__':
if not args.only_predict:
train()
evaluate()
else:
evaluate() | scores_diff_json = collections.OrderedDict() | random_line_split |
run_squad.py | """
Question Answering with XLNet
"""
# pylint:disable=redefined-outer-name,logging-format-interpolation
import os
import time
import argparse
import random
import logging
import warnings
import json
import collections
import pickle
import sys
import itertools
import subprocess
import multiprocessing as mp
from functools import partial
import numpy as np
import mxnet as mx
import gluonnlp as nlp
from gluonnlp.data import SQuAD
from gluonnlp.data.bert.glue import concat_sequences
from gluonnlp.data.bert.squad import get_doc_spans, \
check_is_max_context, convert_squad_examples, align_position2doc_spans
from gluonnlp.data.xlnet.squad import lcs_match, convert_index
from model.qa import XLNetForQA
from transformer import model
from xlnet_qa_evaluate import predict_extended
parser = argparse.ArgumentParser(description='XLNet QA example.'
'We fine-tune the XLNet model on SQuAD dataset.')
# I/O configuration
parser.add_argument('--sentencepiece', type=str, default=None,
help='Path to the sentencepiece .model file for both tokenization and vocab.')
parser.add_argument('--pretrained_xlnet_parameters', type=str, default=None,
help='Pre-trained bert model parameter file. default is None')
parser.add_argument('--load_pickle', action='store_true',
help='Whether do data preprocessing or load from pickled file')
parser.add_argument('--dev_dataset_file', default='./output_dir/out.dev', type=str,
help='Path to dev data features')
parser.add_argument('--train_dataset_file', default='./output_dir/out.train', type=str,
help='Path to train data features')
parser.add_argument('--model_parameters', type=str, default=None, help='Model parameter file')
parser.add_argument(
'--output_dir', type=str, default='./output_dir',
help='The output directory where the model params will be written.'
' default is ./output_dir')
# Training configuration
parser.add_argument('--seed', type=int, default=3, help='Random seed')
parser.add_argument('--version_2', action='store_true', help='Whether use SQuAD v2.0 dataset')
parser.add_argument('--model', type=str, default='xlnet_cased_l12_h768_a12',
choices=['xlnet_cased_l24_h1024_a16', 'xlnet_cased_l12_h768_a12'],
help='The name of pre-trained XLNet model to fine-tune')
parser.add_argument('--dataset', type=str, default='126gb', choices=['126gb'],
help='The dataset BERT pre-trained with. Currently only 126gb is available')
parser.add_argument(
'--uncased', action='store_true', help=
'if set, inputs are converted to lower case. Up to 01/04/2020, all released models are cased')
parser.add_argument('--gpu', type=int, default=None,
help='Number of gpus to use for finetuning. CPU is used if not set.')
parser.add_argument('--log_interval', type=int, default=10, help='report interval. default is 10')
parser.add_argument('--debug', action='store_true',
help='Run the example in test mode for sanity checks')
parser.add_argument('--only_predict', action='store_true', help='Whether to predict only.')
# Hyperparameters
parser.add_argument('--epochs', type=int, default=3, help='number of epochs, default is 3')
parser.add_argument(
'--training_steps', type=int, help='training steps. Note that epochs will be ignored '
'if training steps are set')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size. Number of examples per gpu in a minibatch. default is 32')
parser.add_argument('--test_batch_size', type=int, default=24,
help='Test batch size. default is 24')
parser.add_argument('--optimizer', type=str, default='bertadam',
help='optimization algorithm. default is bertadam')
parser.add_argument(
'--accumulate', type=int, default=None, help='The number of batches for '
'gradients accumulation to simulate large batch size. Default is None')
parser.add_argument('--lr', type=float, default=3e-5,
help='Initial learning rate. default is 5e-5')
parser.add_argument(
'--warmup_ratio', type=float, default=0,
help='ratio of warmup steps that linearly increase learning rate from '
'0 to target learning rate. default is 0')
parser.add_argument('--layerwise_decay', type=float, default=0.75, help='Layer-wise lr decay')
parser.add_argument('--wd', type=float, default=0.01, help='weight decay')
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--attention_dropout', type=float, default=0.1, help='attention dropout')
# Data pre/post processing
parser.add_argument(
'--max_seq_length', type=int, default=512,
help='The maximum total input sequence length after WordPiece tokenization.'
'Sequences longer than this will be truncated, and sequences shorter '
'than this will be padded. default is 512')
parser.add_argument(
'--doc_stride', type=int, default=128,
help='When splitting up a long document into chunks, how much stride to '
'take between chunks. default is 128')
parser.add_argument(
'--max_query_length', type=int, default=64,
help='The maximum number of tokens for the question. Questions longer than '
'this will be truncated to this length. default is 64')
parser.add_argument(
'--round_to', type=int, default=None,
help='The length of padded sequences will be rounded up to be multiple of this argument.'
'When round to is set to 8, training throughput may increase for mixed precision'
'training on GPUs with tensorcores.')
parser.add_argument('--start_top_n', type=int, default=5,
help='Number of start-position candidates')
parser.add_argument('--end_top_n', type=int, default=5,
help='Number of end-position candidates corresponding '
'to a start position')
parser.add_argument('--n_best_size', type=int, default=5, help='top N results written to file')
parser.add_argument(
'--max_answer_length', type=int, default=64,
help='The maximum length of an answer that can be generated. This is needed '
'because the start and end predictions are not conditioned on one another.'
' default is 64')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of workers used for data preprocessing')
parser.add_argument(
'--null_score_diff_threshold', type=float, default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.'
'Typical values are between -1.0 and -5.0. default is 0.0. '
'Note that a best value can be automatically found by the evaluation script')
args = parser.parse_args()
# random seed
np.random.seed(args.seed)
random.seed(args.seed)
mx.random.seed(args.seed)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# set the logger
log = logging.getLogger('gluonnlp')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(levelname)s:%(name)s:%(asctime)s %(message)s',
datefmt='%H:%M:%S')
fh = logging.FileHandler(os.path.join(args.output_dir, 'finetune_squad.log'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(fh)
log.info(args)
pretrained_xlnet_parameters = args.pretrained_xlnet_parameters
if pretrained_xlnet_parameters and args.model_parameters:
raise ValueError('Cannot provide both pre-trained BERT parameters and '
'BertForQA model parameters.')
ctx = [mx.cpu(0)] if not args.gpu else [mx.gpu(i) for i in range(args.gpu)]
log_interval = args.log_interval * args.accumulate if args.accumulate else args.log_interval
if args.accumulate:
log.info('Using gradient accumulation. Effective batch size = %d',
args.accumulate * args.batch_size)
if args.max_seq_length <= args.max_query_length + 3:
raise ValueError('The max_seq_length (%d) must be greater than max_query_length '
'(%d) + 3' % (args.max_seq_length, args.max_query_length))
get_pretrained = True
get_model_params = {
'name': args.model,
'dataset_name': args.dataset,
'pretrained': get_pretrained,
'ctx': ctx,
'use_decoder': False,
'dropout': args.dropout,
'attention_dropout': args.attention_dropout
}
# model, vocabulary and tokenizer
xlnet_base, vocab, tokenizer = model.get_model(**get_model_params)
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Stack('int32'), # example_id
nlp.data.batchify.Pad(axis=0, pad_val=vocab[vocab.padding_token], dtype='int32',
round_to=args.round_to), # input_ids
nlp.data.batchify.Pad(axis=0, pad_val=3, dtype='int32', round_to=args.round_to), # segment_ids
nlp.data.batchify.Stack('float32'), # valid_length
nlp.data.batchify.Pad(axis=0, pad_val=1, round_to=args.round_to), # p_mask
nlp.data.batchify.Stack('float32'), # start_position
nlp.data.batchify.Stack('float32'), # end_position
nlp.data.batchify.Stack('float32')) # is_impossible
if pretrained_xlnet_parameters:
# only load XLnetModel parameters
nlp.utils.load_parameters(xlnet_base, pretrained_xlnet_parameters, ctx=ctx, ignore_extra=True,
cast_dtype=True)
units = xlnet_base._net._units
net = XLNetForQA(xlnet_base=xlnet_base, start_top_n=args.start_top_n, end_top_n=args.end_top_n,
units=units)
net_eval = XLNetForQA(xlnet_base=xlnet_base, start_top_n=args.start_top_n,
end_top_n=args.end_top_n, units=units, is_eval=True,
params=net.collect_params())
initializer = mx.init.Normal(0.02)
if args.model_parameters:
# load complete XLNetForQA parameters
nlp.utils.load_parameters(net, args.model_parameters, ctx=ctx, cast_dtype=True)
else:
net.start_logits.initialize(init=initializer, ctx=ctx)
net.end_logits.initialize(init=initializer, ctx=ctx)
net.answer_class.initialize(init=initializer, ctx=ctx)
net.hybridize(static_alloc=True)
net_eval.hybridize(static_alloc=True)
SquadXLNetFeautre = collections.namedtuple('SquadXLNetFeautre', [
'example_id', 'qas_id', 'valid_length', 'tokens', 'tok_start_to_orig_index',
'tok_end_to_orig_index', 'token_is_max_context', 'input_ids', 'p_mask', 'segment_ids',
'start_position', 'end_position', 'paragraph_text', 'paragraph_len', 'is_impossible'
])
def convert_examples_to_features(example, tokenizer=None, cls_token=None, sep_token=None,
vocab=None, max_seq_length=384, doc_stride=128,
max_query_length=64, is_training=True):
"""convert the examples to the XLNet features"""
query_tokenized = tokenizer(example.question_text)[:max_query_length]
#tokenize paragraph and get start/end position of the answer in tokenized paragraph
paragraph_tokenized = tokenizer(example.paragraph_text)
chartok_to_tok_index = [] # char to its corresponding token's index
tok_start_to_chartok_index = [] # token index to its first character's index
tok_end_to_chartok_index = [] # token index to its last character's index
char_cnt = 0
for i, token in enumerate(paragraph_tokenized):
chartok_to_tok_index.extend([i] * len(token))
tok_start_to_chartok_index.append(char_cnt)
char_cnt += len(token)
tok_end_to_chartok_index.append(char_cnt - 1)
tok_cat_text = ''.join(paragraph_tokenized).replace(u'▁', ' ')
# XLNet takes a more complicated strategy to match the origin text
# and the tokenized tokens
# Get the LCS matching between origin text and token-concatenated text.
n, m = len(example.paragraph_text), len(tok_cat_text)
max_dist = abs(n - m) + 5
for _ in range(2):
f, g = lcs_match(max_dist, example.paragraph_text, tok_cat_text)
if f[n - 1, m - 1] > 0.8 * n:
break
max_dist *= 2
# Get the mapping from orgin text/tokenized text to tokenized text/origin text
orig_to_chartok_index = [None] * n
chartok_to_orig_index = [None] * m
i, j = n - 1, m - 1
while i >= 0 and j >= 0:
if (i, j) not in g:
break
if g[(i, j)] == 2:
orig_to_chartok_index[i] = j
chartok_to_orig_index[j] = i
i, j = i - 1, j - 1
elif g[(i, j)] == 1:
j = j - 1
else:
i = i - 1
# get start/end mapping
tok_start_to_orig_index = []
tok_end_to_orig_index = []
for i in range(len(paragraph_tokenized)): # for each token in the tokenized paragraph
start_chartok_pos = tok_start_to_chartok_index[i] # first character's index in origin text
end_chartok_pos = tok_end_to_chartok_index[i] # last character's index in origin text
start_orig_pos = convert_index(chartok_to_orig_index, start_chartok_pos, n, is_start=True)
end_orig_pos = convert_index(chartok_to_orig_index, end_chartok_pos, m, is_start=False)
tok_start_to_orig_index.append(start_orig_pos)
tok_end_to_orig_index.append(end_orig_pos)
tok_start_position, tok_end_position = -1, -1
# get mapped start/end position
if is_training and not example.is_impossible:
start_chartok_pos = convert_index(orig_to_chartok_index, example.start_offset,
is_start=True)
tok_start_position = chartok_to_tok_index[start_chartok_pos]
end_chartok_pos = convert_index(orig_to_chartok_index, example.end_offset, is_start=False)
tok_end_position = chartok_to_tok_index[end_chartok_pos]
assert tok_start_position <= tok_end_position
# get doc spans using sliding window
doc_spans, doc_spans_indices = get_doc_spans(paragraph_tokenized,
max_seq_length - len(query_tokenized) - 3,
doc_stride)
# record whether the tokens in a docspan have max context
token_is_max_context = [{
p: check_is_max_context(doc_spans_indices, i, p + doc_spans_indices[i][0])
for p in range(len(doc_span))
} for (i, doc_span) in enumerate(doc_spans)]
# get token -> origin text mapping
cur_tok_start_to_orig_index = [[tok_start_to_orig_index[p + st] for p in range(len(doc_span))]
for doc_span, (st, ed) in zip(doc_spans, doc_spans_indices)]
cur_tok_end_to_orig_index = [[tok_end_to_orig_index[p + st] for p in range(len(doc_span))]
for doc_span, (st, ed) in zip(doc_spans, doc_spans_indices)]
# get sequence features: tokens, segment_ids, p_masks
seq_features = [
concat_sequences([doc_span, query_tokenized], [[sep_token]] * 2 + [[cls_token]],
[[0] * len(doc_span), [1] * len(query_tokenized)], [[1], [1], [0]])
for doc_span in doc_spans
]
# get the start/end positions aligned to doc spans. If is_impossible or position out of span
# set position to cls_index, i.e., last token in the sequence.
if not example.is_impossible:
positions = [
align_position2doc_spans([tok_start_position, tok_end_position], doc_idx, offset=0,
default_value=len(seq[0]) - 1)
for (doc_idx, seq) in zip(doc_spans_indices, seq_features)
]
else:
po | features = [
SquadXLNetFeautre(example_id=example.example_id, qas_id=example.qas_id,
tok_start_to_orig_index=t2st, tok_end_to_orig_index=t2ed,
valid_length=len(tokens), tokens=tokens, token_is_max_context=is_max,
input_ids=vocab[tokens], p_mask=p_mask, segment_ids=segment_ids,
start_position=start, end_position=end,
paragraph_text=example.paragraph_text, paragraph_len=len(tokens),
is_impossible=(start == len(tokens) - 1))
for (tokens, segment_ids, p_mask), (
start,
end), is_max, t2st, t2ed in zip(seq_features, positions, token_is_max_context,
cur_tok_start_to_orig_index, cur_tok_end_to_orig_index)
]
return features
def preprocess_dataset(tokenizer, dataset, vocab=None, max_seq_length=384, doc_stride=128,
max_query_length=64, num_workers=16, load_from_pickle=False,
feature_file=None, is_training=True):
"""Loads a dataset into features"""
vocab = tokenizer.vocab if vocab is None else vocab
trans = partial(convert_examples_to_features, tokenizer=tokenizer, cls_token=vocab.cls_token,
sep_token=vocab.sep_token, vocab=vocab, max_seq_length=max_seq_length,
doc_stride=doc_stride, max_query_length=max_query_length)
pool = mp.Pool(num_workers)
start = time.time()
if not load_from_pickle:
example_trans = partial(convert_squad_examples, is_training=is_training)
# convert the raw dataset into raw features
examples = pool.map(example_trans, dataset)
raw_features = list(map(trans, examples)) #pool.map(trans, examples)
if feature_file:
with open(feature_file, 'wb') as file:
pickle.dump(raw_features, file)
else:
assert feature_file, 'feature file should be provided.'
with open(feature_file, 'rb') as file:
raw_features = pickle.load(file)
end = time.time()
pool.close()
log.info('Done! Transform dataset costs %.2f seconds.', (end - start))
return raw_features
def convert_full_features_to_input_features(raw_features):
"""convert the full features into the input features"""
data_features = mx.gluon.data.SimpleDataset(list(itertools.chain.from_iterable(raw_features)))
data_features = data_features.transform(lambda *example: (
example[0], # example_id
example[7], # inputs_id
example[9], # segment_ids
example[2], # valid_length,
example[8], # p_mask
example[10], # start_position,
example[11], # end_position
example[14])) # is_impossible
return data_features
def split_array(arr, num_of_splits):
"""split an array into equal pieces"""
# TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7
size = arr.shape[0]
if size < num_of_splits:
return [arr[i:i + 1] for i in range(size)]
slice_len, rest = divmod(size, num_of_splits)
div_points = [0] + [(slice_len * index + min(index, rest) + slice_len + (index < rest))
for index in range(num_of_splits)]
slices = [arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)]
return slices
def split_and_load(arrs, _ctxs):
"""split and load arrays to a list of contexts"""
# TODO Replace split_array() with gluon.utils.split_data() once targeting MXNet 1.7
assert isinstance(arrs, (list, tuple))
# split and load
loaded_arrs = [[i.as_in_context(ctx) for i, ctx in zip(split_array(arr, len(_ctxs)), _ctxs)]
for arr in arrs]
return zip(*loaded_arrs)
def _apply_gradient_decay():
"""apply layer-wise gradient decay.
Note that the description in origin paper about layer-wise learning rate decay
is inaccurate. According to their implementation, they are actually performing
layer-wise gradient decay. Gradient decay and learning rate decay could be the
same by using standard SGD, but different by using Adaptive optimizer(e.g., Adam).
"""
parameter_not_included = ['seg_emb', 'query_key_bias', 'query_emb_bias', 'query_seg_bias']
num_layers = len(xlnet_base._net.transformer_cells)
for (i, layer_parameters) in enumerate(xlnet_base._net.transformer_cells):
layer_params = layer_parameters.collect_params()
for key, value in layer_params.items():
skip = False
for pn in parameter_not_included:
if pn in key:
skip = True
if skip:
continue
if value.grad_req != 'null':
for arr in value.list_grad():
arr *= args.layerwise_decay**(num_layers - i - 1)
def train():
"""Training function."""
segment = 'train'
log.info('Loading %s data...', segment)
# Note that for XLNet, the authors always use squad2 dataset for training
train_data = SQuAD(segment, version='2.0')
if args.debug:
sampled_data = [train_data[i] for i in range(100)]
train_data = mx.gluon.data.SimpleDataset(sampled_data)
log.info('Number of records in Train data: %s', len(train_data))
train_data_features = preprocess_dataset(
tokenizer, train_data, vocab=vocab, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, num_workers=args.num_workers,
max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,
feature_file=args.train_dataset_file)
train_data_input = convert_full_features_to_input_features(train_data_features)
log.info('The number of examples after preprocessing: %s', len(train_data_input))
train_dataloader = mx.gluon.data.DataLoader(train_data_input, batchify_fn=batchify_fn,
batch_size=args.batch_size, num_workers=4,
shuffle=True)
optimizer_params = {'learning_rate': args.lr, 'wd': args.wd}
try:
trainer = mx.gluon.Trainer(net.collect_params(), args.optimizer, optimizer_params,
update_on_kvstore=False)
except ValueError as _:
warnings.warn('AdamW optimizer is not found. Please consider upgrading to '
'mxnet>=1.5.0. Now the original Adam optimizer is used instead.')
trainer = mx.gluon.Trainer(net.collect_params(), 'bertadam', optimizer_params,
update_on_kvstore=False)
num_train_examples = len(train_data_input)
step_size = args.batch_size * args.accumulate if args.accumulate else args.batch_size
num_train_steps = int(num_train_examples / step_size * args.epochs)
epoch_number = args.epochs
if args.training_steps:
num_train_steps = args.training_steps
epoch_number = 100000
log.info('training steps=%d', num_train_steps)
num_warmup_steps = int(num_train_steps * args.warmup_ratio)
step_num = 0
def set_new_lr(step_num, batch_id):
"""set new learning rate"""
# set grad to zero for gradient accumulation
if args.accumulate:
if batch_id % args.accumulate == 0:
net.collect_params().zero_grad()
step_num += 1
else:
step_num += 1
# learning rate schedule
# Notice that this learning rate scheduler is adapted from traditional linear learning
# rate scheduler where step_num >= num_warmup_steps, new_lr = 1 - step_num/num_train_steps
if step_num < num_warmup_steps:
new_lr = args.lr * step_num / num_warmup_steps
else:
offset = (step_num - num_warmup_steps) * args.lr / \
(num_train_steps - num_warmup_steps)
new_lr = args.lr - offset
trainer.set_learning_rate(new_lr)
return step_num
# Do not apply weight decay on LayerNorm and bias terms
for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
# Collect differentiable parameters
params = [p for p in net.collect_params().values() if p.grad_req != 'null']
# Set grad_req if gradient accumulation is required
if args.accumulate:
for p in params:
p.grad_req = 'add'
epoch_tic = time.time()
total_num = 0
log_num = 0
finish_flag = False
for epoch_id in range(epoch_number):
step_loss = 0.0
step_loss_span = 0
step_loss_cls = 0
tic = time.time()
if finish_flag:
break
for batch_id, data in enumerate(train_dataloader):
# set new lr
step_num = set_new_lr(step_num, batch_id)
data_list = list(split_and_load(data, ctx))
# forward and backward
batch_loss = []
batch_loss_sep = []
with mx.autograd.record():
for splited_data in data_list:
_, inputs, token_types, valid_length, p_mask, start_label, end_label, is_impossible = splited_data # pylint: disable=line-too-long
valid_length = valid_length.astype('float32')
log_num += len(inputs)
total_num += len(inputs)
out_sep, out = net(
inputs,
token_types,
valid_length,
[start_label, end_label],
p_mask=p_mask, # pylint: disable=line-too-long
is_impossible=is_impossible)
ls = out.mean() / len(ctx)
batch_loss_sep.append(out_sep)
batch_loss.append(ls)
if args.accumulate:
ls = ls / args.accumulate
ls.backward()
# update
if not args.accumulate or (batch_id + 1) % args.accumulate == 0:
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(params, 1)
_apply_gradient_decay()
trainer.update(1, ignore_stale_grad=True)
step_loss_sep_tmp = np.array(
[[span_ls.mean().asscalar(),
cls_ls.mean().asscalar()] for span_ls, cls_ls in batch_loss_sep])
step_loss_sep_tmp = list(np.sum(step_loss_sep_tmp, axis=0))
step_loss_span += step_loss_sep_tmp[0] / len(ctx)
step_loss_cls += step_loss_sep_tmp[1] / len(ctx)
step_loss += sum([ls.asscalar() for ls in batch_loss])
if (batch_id + 1) % log_interval == 0:
toc = time.time()
log.info(
'Epoch: %d, Batch: %d/%d, Loss=%.4f, lr=%.7f '
'Time cost=%.1f Thoughput=%.2f samples/s', epoch_id + 1, batch_id + 1,
len(train_dataloader), step_loss / log_interval, trainer.learning_rate,
toc - tic, log_num / (toc - tic))
log.info('span_loss: %.4f, cls_loss: %.4f', step_loss_span / log_interval,
step_loss_cls / log_interval)
tic = time.time()
step_loss = 0.0
step_loss_span = 0
step_loss_cls = 0
log_num = 0
if step_num >= num_train_steps:
logging.info('Finish training step: %d', step_num)
finish_flag = True
break
epoch_toc = time.time()
log.info('Time cost=%.2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
version_prefix = 'squad2' if args.version_2 else 'squad1'
ckpt_name = 'model_{}_{}_{}.params'.format(args.model, version_prefix, epoch_id + 1)
params_saved = os.path.join(args.output_dir, ckpt_name)
nlp.utils.save_parameters(net, params_saved)
log.info('params saved in: %s', params_saved)
RawResultExtended = collections.namedtuple(
'RawResultExtended',
['start_top_log_probs', 'start_top_index', 'end_top_log_probs', 'end_top_index', 'cls_logits'])
def evaluate():
"""Evaluate the model on validation dataset.
"""
log.info('Loading dev data...')
if args.version_2:
dev_data = SQuAD('dev', version='2.0')
else:
dev_data = SQuAD('dev', version='1.1')
(_, _), (data_file_name, _) \
= dev_data._data_file[dev_data._version][dev_data._segment]
dev_data_path = os.path.join(dev_data._root, data_file_name)
if args.debug:
sampled_data = [dev_data[0], dev_data[1], dev_data[2]]
dev_data = mx.gluon.data.SimpleDataset(sampled_data)
log.info('Number of records in dev data: %d', len(dev_data))
dev_data_features = preprocess_dataset(
tokenizer, dev_data, vocab=vocab, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, num_workers=args.num_workers,
max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,
feature_file=args.dev_dataset_file)
dev_data_input = convert_full_features_to_input_features(dev_data_features)
log.info('The number of examples after preprocessing: %d', len(dev_data_input))
dev_dataloader = mx.gluon.data.DataLoader(dev_data_input, batchify_fn=batchify_fn,
num_workers=4, batch_size=args.test_batch_size,
shuffle=False, last_batch='keep')
log.info('start prediction')
all_results = collections.defaultdict(list)
epoch_tic = time.time()
total_num = 0
for (batch_id, data) in enumerate(dev_dataloader):
data_list = list(split_and_load(data, ctx))
for splited_data in data_list:
example_ids, inputs, token_types, valid_length, p_mask, _, _, _ = splited_data
total_num += len(inputs)
outputs = net_eval(inputs, token_types, valid_length, p_mask=p_mask)
example_ids = example_ids.asnumpy().tolist()
for c, example_ids in enumerate(example_ids):
result = RawResultExtended(start_top_log_probs=outputs[0][c].asnumpy().tolist(),
start_top_index=outputs[1][c].asnumpy().tolist(),
end_top_log_probs=outputs[2][c].asnumpy().tolist(),
end_top_index=outputs[3][c].asnumpy().tolist(),
cls_logits=outputs[4][c].asnumpy().tolist())
all_results[example_ids].append(result)
if batch_id % args.log_interval == 0:
log.info('Batch: %d/%d', batch_id + 1, len(dev_dataloader))
epoch_toc = time.time()
log.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
log.info('Get prediction results...')
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for features in dev_data_features:
results = all_results[features[0].example_id]
example_qas_id = features[0].qas_id
score_diff, best_non_null_entry, nbest_json = predict_extended(
features=features, results=results, n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length, start_n_top=args.start_top_n,
end_n_top=args.end_top_n)
scores_diff_json[example_qas_id] = score_diff
all_predictions[example_qas_id] = best_non_null_entry
all_nbest_json[example_qas_id] = nbest_json
output_prediction_file = os.path.join(args.output_dir, 'predictions.json')
output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')
output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds.json')
with open(output_prediction_file, 'w') as writer:
writer.write(json.dumps(all_predictions, indent=4) + '\n')
with open(output_nbest_file, 'w') as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + '\n')
with open(output_null_log_odds_file, 'w') as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + '\n')
if os.path.exists(sys.path[0] + '/evaluate-v2.0.py'):
arguments = [
dev_data_path, output_prediction_file, '--na-prob-thresh',
str(args.null_score_diff_threshold)
]
if args.version_2:
arguments += ['--na-prob-file', output_null_log_odds_file]
subprocess.call([sys.executable, sys.path[0] + '/evaluate-v2.0.py'] + arguments)
else:
log.info('Please download evaluate-v2.0.py to get evaluation results for SQuAD. '
'Check index.rst for the detail.')
if __name__ == '__main__':
if not args.only_predict:
train()
evaluate()
else:
evaluate()
| sitions = [(len(seq_feature[0]) - 1, len(seq_feature[0]) - 1)
for seq_feature in seq_features]
| conditional_block |
fix-md-dialect.py | #########################################################################
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
## Markdown has these types of paragraph: heading, text, list item (bullet or numbered),
## codeblock, table, and block quote.
##
## This script fixes up differences in Markdown dialect, between Github-MD and doxia-markdown.
## Specifically, it fixes these problems:
## 1. In Github-MD, bullets and codeblock starts are self-delimiting. In doxia-markdown, they
## must be separated from preceding text or (in the case of codeblocks) bullets, by a blank line.
## Failure to do so causes the bullet or codeblock delimiter to be interpreted as ordinary text,
## and the content gets munched into the preceding paragraph. The codeblock delimiter (```) as text
## gets interpreted as a codephrase delimiter (`) plus a preceding or following empty codephrase (``).
## 2. Github-MD is liberal in regard to what an 'indent' is, allowing 1, 2, 4, or 8 blanks, or
## a tab. We mostly use 2 blanks. Doxia-markdown requires strictly 4 spaces or a tab. Failure
## to adhere to this requirement causes indents to be ignored or misinterpreted, leading again to
## paragraph munching and delimiter ignoring.
## 3. In Doxia-markdown, if you indent below a header or text paragraph, it is interpreted as
## an implicit codeblock start. In Github-MD, we only start codeblocks with the explicit
## codeblock delimiter (```) and sometimes indent below text just for visual emphasis, so the
## doxia-markdown interpretation is unwelcome. Thus, in our rewrite, we disallow indenting below
## text or headers. This may make the text less pretty than the Github-MD presentation, but it
## avoids the incorrect codeblocking.
## 4. In Doxia-markdown, the indent of the end-codeblock delimiter must match that of the
## begin-codeblock delimiter, or it won't be recognized and the codeblock will run on.
## 5. Relative links need to be re-written. '.md' files need to be changed to '.html', and
## as best we can we will re-write named anchors referring to tags autogenerated from headers.
## The problem with generated tags is that Github-MD forces header text to lower-case, and
## replaces blank spaces with hyphens, while doxia-markdown leaves case unchanged, and replaces
## blanks with underscores. Fortunately we seem to have a culture of using link references that
## are typographically the same as the header text, so we have some basis for fixing most links.
## 6. H1 headers don't get named anchors generated, unlike H2 and lower headers. Don't know
## why doxia-markdown has this deficiency, perhaps it assumes H1 will only be used once at the
## beginning of the doc. We will insert an explicit anchor just before the H1 headers, to fix.
##
## So far, we're ignoring tables and block quotes.
##
## This script also manages the re-writing of named files to *.tmp, then mv to replace the original file.
import sys
import os
import inspect
import re
# These are the characters excluded by Markdown from use in auto-generated anchor text for Headings.
EXCLUDED_CHARS_REGEX_GHM = r'[^\w\-]' # all non-alphanumerics except "-" and "_". Whitespace are previously converted.
EXCLUDED_CHARS_REGEX_DOX = r'[^\w\.\-]' # all non-alphanumerics except "-", "_", and ".". Whitespace are previously converted.
def report_error(s) :
print >>sys.stderr, "ERROR: " + s
print >>sys.stderr, "on line: " + str(FNR) + " in file: " + FILENAME
print >>sys.stderr, inputline
exit(1)
def trace(msg) :
if TRACE :
print >>sys.stderr, "TRACE: " + inspect.currentframe().f_back.f_code.co_name + " : InputLine " + str(FNR) + " : " + msg
class INDENT_STACK :
'This class maintains the indent stack during doc parsing.'
def __init__(self) :
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def init_indent(self) :
del self.my_stack
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def push_indent(self, n, new_type) :
#Increment the logical depth only if under a bullet type. This fixes problem #3.
level = self.logical_indent_level() + (self.current_type() == "bullet") # plus 1 if true
self.my_stack.append( {'physical':n, 'logical':level, 'type':new_type} )
def set_current_type(self, new_type) :
# adjust topmost type
self.my_stack[-1]['type'] = new_type
def pop_indent(self) :
if len(self.my_stack) > 1 :
return self.my_stack.pop()['physical']
else :
return 0
def current_indent(self) :
# top of stack, physical
return self.my_stack[-1]['physical']
def logical_indent_level(self) :
# top of stack, logical
return self.my_stack[-1]['logical']
def current_type(self) :
# top of stack, type
return self.my_stack[-1]['type']
## End class INDENT_STACK
global indent_stack
indent_stack = INDENT_STACK() # single instance
def convert_tabs(s) :
# Courtesy of Python, this does a real column-aware tab expansion.
# If this doesn't work, we'll need to go back to erroring on " \t", that is, spaces followed by tabs.
trace("orig length {0}".format(len(s)) )
ct = s.count("\t")
s = s.expandtabs(4)
trace("after {0} tab substitutions, end length is {1}".format(ct, len(s)) )
return s
def fix_prefix_blanks(new_type) :
global inputline
# Fix up the indenting (prefix blanks) in inputline. This fixes problem #2.
# Don't worry about blank lines here, they are filtered out before calling this method.
# Both uses and maintains the indent stack, which is why we need the new_type passed in.
prefix_blanks = re.search(r'^[\s]*', inputline)
if prefix_blanks :
prefix_blanks = prefix_blanks.group()
trace("After prefix-blanks match, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
prefix_blanks = convert_tabs(prefix_blanks)
else :
prefix_blanks = ""
trace("After convert_tabs, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
# prefix_blanks now contains the 'physical' indent of the current paragraph, after tab substitution.
# The indent of this paragraph may be > or == to the previous paragraph. Those are the easy cases.
# If the indent is less than previous, is it equal to the indent of the next lower indented object?
# Or of a lower yet object? Or is it intermediate between two lower objects currently in the stack?
# The latter case is an anomoly, but there's no enforcement in Github-MD.
# The following logic is an empirical reverse engineering, that seems adequate so far.
# It basically says, find a prior level of indent that this is not less than, and then pretend that
# the objects between it and this object weren't there.
trace("current logical_indent_level is {0} and current_indent is {1}".format(
indent_stack.logical_indent_level(), indent_stack.current_indent() ))
while len(prefix_blanks) < indent_stack.current_indent() :
indent_stack.pop_indent()
if len(prefix_blanks) > indent_stack.current_indent() :
indent_stack.push_indent(len(prefix_blanks), new_type)
else : # len(prefix_blanks) == indent_stack.current_indent()
indent_stack.set_current_type(new_type)
trace(("After evaluating this line's prefix-blanks and prev_type, new logical_indent_level() is {0} " +
"and current_indent is {1}").format(indent_stack.logical_indent_level(), indent_stack.current_indent() ))
# Now whack off the prefix blanks, and replace with a standardized string of blanks appropriate to
# the logical indent level.
trace("Orig line is " + inputline)
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline, 1)
trace("New line is " + inputline)
def rewrite_relative_links() :
global inputline
trace("entering with line: " + inputline)
# Fix up the relative links in inputline. This fixes problem #5.
num_links = inputline.count("](")
links = re.findall(r'\[[^\]]+\]\([^)]+\)', inputline)
num_whole_links = len(links)
trace("num_links = {0}, num_whole_links = {1}".format(num_links, num_whole_links))
if (num_links != num_whole_links) :
if re.search(r'\[[^\][!]*\![\s]*\[', inputline) :
# Nested link label expressions, with '!'.
# Special case where a link value is inlined into the link label,
# as in the first line of the base README.md file. Bail on such lines.
trace("WARNING: Found nested link label expressions.")
return
else :
report_error("Found link split across multiple lines. We can't process this.")
for linkitem in links :
pieces = re.search(r'(\[[\s`]*)([^\]]*[^\s`\]])([\s`]*\]\([\s]*)([^\s]+)([\s]*\))', linkitem).groups()
trace("Link: " + linkitem)
trace("Pieces: " + " ".join( (pieces[0],pieces[1],pieces[2],pieces[3],pieces[4]) ))
labeltext = pieces[1]
href = pieces[3]
trace("Extracted labeltext is: " + labeltext)
trace("Extracted href is: " + href)
if re.search(r'^http|\?', href) :
# Don't rewrite absolute or parameterized URLs; neither is native to this markdown book.
trace("skipping absolute or parameterized URL")
continue
# Rewrite implicit index references to explicit, so the book will work as well
# with 'file:///' preview as with a real web server.
# We are only concerned with file path names here, so split at '#' if present.
num_sharps = href.count("#")
if (num_sharps >= 2) :
report_error("Multiple #'s in a single link href.")
elif (num_sharps == 1) :
# Implicit index references are directory names, which seldom have a filetype suffix.
# On the other hand, explicit file references must have filetype, else the browser
# won't know what to do with it. So if no filetype extension, assume is a directory
# and add 'index.html'. Skip if this is an intra-document link.
if not re.search(r'^#|\.[^/#]+#', href) :
if not href.count("/#") :
href = re.sub(r'#', "/#", href, 1)
href = re.sub(r'/#', "/index.html#", href, 1)
# Fix up '.md' references.
href = re.sub(r'^README\.md#', "index.html#", href)
href = re.sub(r'/README\.md#', "/index.html#", href)
href = re.sub(r'\.md#', ".html#", href)
else : # num_sharps == 0
# Same logic as above, just at $ instead of #.
if not re.search(r'\.[^/]+$', href) :
if not href.endswith("/") :
href = href + "/"
href = re.sub(r'/$', "/index.html", href)
# Fix up '.md' references.
href = re.sub(r'^README\.md$', "index.html", href)
href = re.sub(r'/README\.md$', "/index.html", href)
href = re.sub(r'\.md$', ".html", href)
trace("After .md fixup, href is: " + href)
# Re-write named anchors referring to generated tags.
sharp = href.find("#")
if (sharp >= 0) :
named_anchor = href[sharp+1 : ]
trace('named_anchor = "' + named_anchor + '"')
trace('labeltext = "' + labeltext + '"')
scratch = labeltext.lower() # Github-MD forces all anchors to lowercase
scratch = re.sub(r'[\s]', "-", scratch) # convert whitespace to "-"
scratch = re.sub(EXCLUDED_CHARS_REGEX_GHM, "", scratch) # strip non-alphanumerics
if (scratch == named_anchor) :
trace("Found a rewritable case")
scratch = labeltext # Doxia-markdown doesn't change case
scratch = re.sub(r'[\s]', "_", scratch) # convert whitespace to "_"
scratch = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", scratch) # strip non-alphanumerics except "."
href = re.sub("#" + named_anchor, "#" + scratch, href)
trace("After anchor rewrite, href is: " + href)
| if (href != pieces[3]) :
# Assemble the full link string to prevent similar substrings (to href) in different contexts being substituted.
scratch = pieces[0] + pieces[1] + pieces[2] + href + pieces[4]
trace("Fixed link text is: " + scratch)
trace("linkitem is still: " + linkitem)
k = inputline.find(linkitem)
inputline = inputline[ : k] + scratch + inputline[ k + len(linkitem) : ]
trace("Fixed inputline is: " + inputline)
################################################
# begin state machine
global inputline, active_type
BLANKS = " "
TRACE = 0
FNR = -1
trace("Starting trace")
# Github uses relative indents, but doxia wants only and exactly multiples of 4.
# To turn the more forgiving into more regular, we must track both logical and actual indents.
indent_stack.init_indent()
# Paragraph type can be none, text, bullet, code, or heading.
# Note 'current_type()' used in managing the logical indent level on the indent stack,
# and 'active_type' used in the pattern recognition state machine, are deliberately different.
active_type = "none"
# Note: order of the below 'if' clauses is critically important for the state machine.
# Don't change the order.
if len(sys.argv) <= 1 :
report_error("Please provide names of files to be processed, as command line arguments.")
for FILENAME in sys.argv[1:] :
infile = open(FILENAME, 'r')
outfile = open(FILENAME + ".tmp", 'w')
FNR = 0
H1_COUNT = 0
for inputline in infile :
FNR += 1
inputline = inputline.rstrip("\n")
if '](' in inputline :
# Detect lines with hyperlinks in them, and re-write them if necessary and possible.
# This is the only fall-through block, and we put it at the very beginning.
rewrite_relative_links(); # in inputline
# Fall through for further processing.
if (active_type == "code") and ("```" not in inputline) :
trace("in codeblock, regular line")
# what happens in the codeblock, stays in the codeblock
# Put this case first (after link detection), so we don't have to test it in all the other cases.
print >>outfile, inputline
continue
if (active_type == "code") and ("```" in inputline) :
trace("in codeblock, end delimiter line")
# detect end of codeblock
# This must be the second case.
if re.search(r'```[\s]*[^\s]', inputline) :
# If there's text following the end-``` on the same line, error out and fix it in the source file.
report_error("Text following codeblock end delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
active_type = "none"
# Force the indenting of the end-``` to match the beginning. This fixes problem #4.
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline)
print >>outfile, inputline
continue
if (active_type != "code") and ("```" in inputline) :
trace("start codeblock, delimiter line")
# detect start of codeblock
if re.search(r'[^\s][\s]*```', inputline) :
# If there's text preceding the begin-``` on the same line, error out and fix it in the source file.
report_error("Text preceding codeblock start delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
if active_type == "text" or active_type == "bullet" :
print >>outfile, "" # Need preceding blank line before codeblock, in doxia.
active_type = "code"
fix_prefix_blanks(active_type) # in inputline
print >>outfile, inputline
continue
if re.search(r'^[\s]*$', inputline) :
trace("blank line")
# detect blank lines
active_type = "none"
print >>outfile, inputline # Perhaps this should be print "" instead?
continue
if re.search(r'^[\s]*([*+-]|[\d]+\.)[\s]', inputline) :
trace("bullet line")
# detect bullet line (numbered or not)
if (active_type == "text") :
print >>outfile, "" # Need preceding blank line between text and bullet, in doxia. This fixes problem #1.
active_type = "bullet"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
if inputline.startswith("#") :
trace("header line")
# detects header lines, which are self-delimiting, and cannot have indenting
# Header line resets the indenting as well as current type
active_type = "none"
indent_stack.init_indent()
if re.search(r'^#[^#]', inputline) :
# First-level headers ("H1") need explicit anchor inserted (Doxia style). This fixes problem #6.
anchor_name = re.sub(r' ', "_", inputline[1:].strip())
anchor_name = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", anchor_name)
anchor_text = '<a name="' + anchor_name + '"></a>'
if H1_COUNT == 0 :
# Treat the first header differently - put the header after instead of before
# This is necessary to preserve document metadata titling in generated html.
# However, it means the title itself gets hidden above the top of window, when the link is used.
H1_COUNT = 1
print >>outfile, inputline
print >>outfile, anchor_text
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line after.
else :
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line first.
print >>outfile, anchor_text
print >>outfile, inputline
else :
# H2 or deeper level of header, doxia auto-generates anchor.
print >>outfile, inputline
continue
if re.search(r'^[\s]*#', inputline) :
trace("header line, bad")
report_error("Header specification character (#) detected with indenting. This is presumed to be an error, since it will render as text. If intentional, put a period or other printable character before it.")
## default action -- last case in state machine switch
trace("text line")
# Everything else is text-like, and therefore continues active_type, unless none.
if (active_type == "none") :
# Start new text paragraph.
active_type = "text"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
else :
# This is just a continuation of current text or bullet.
# Indenting is irrelevant.
print >>outfile, inputline
continue
## end loop on inputlines
if (active_type == "code") :
report_error("Unmatched codeblock delimiter (```) detected.")
infile.close()
outfile.close()
os.rename(FILENAME + ".tmp", FILENAME)
## end loop on FILENAMEs
trace("ending trace") | # Now swap out the bad href for the fixed one in inputline. | random_line_split |
fix-md-dialect.py | #########################################################################
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
## Markdown has these types of paragraph: heading, text, list item (bullet or numbered),
## codeblock, table, and block quote.
##
## This script fixes up differences in Markdown dialect, between Github-MD and doxia-markdown.
## Specifically, it fixes these problems:
## 1. In Github-MD, bullets and codeblock starts are self-delimiting. In doxia-markdown, they
## must be separated from preceding text or (in the case of codeblocks) bullets, by a blank line.
## Failure to do so causes the bullet or codeblock delimiter to be interpreted as ordinary text,
## and the content gets munched into the preceding paragraph. The codeblock delimiter (```) as text
## gets interpreted as a codephrase delimiter (`) plus a preceding or following empty codephrase (``).
## 2. Github-MD is liberal in regard to what an 'indent' is, allowing 1, 2, 4, or 8 blanks, or
## a tab. We mostly use 2 blanks. Doxia-markdown requires strictly 4 spaces or a tab. Failure
## to adhere to this requirement causes indents to be ignored or misinterpreted, leading again to
## paragraph munching and delimiter ignoring.
## 3. In Doxia-markdown, if you indent below a header or text paragraph, it is interpreted as
## an implicit codeblock start. In Github-MD, we only start codeblocks with the explicit
## codeblock delimiter (```) and sometimes indent below text just for visual emphasis, so the
## doxia-markdown interpretation is unwelcome. Thus, in our rewrite, we disallow indenting below
## text or headers. This may make the text less pretty than the Github-MD presentation, but it
## avoids the incorrect codeblocking.
## 4. In Doxia-markdown, the indent of the end-codeblock delimiter must match that of the
## begin-codeblock delimiter, or it won't be recognized and the codeblock will run on.
## 5. Relative links need to be re-written. '.md' files need to be changed to '.html', and
## as best we can we will re-write named anchors referring to tags autogenerated from headers.
## The problem with generated tags is that Github-MD forces header text to lower-case, and
## replaces blank spaces with hyphens, while doxia-markdown leaves case unchanged, and replaces
## blanks with underscores. Fortunately we seem to have a culture of using link references that
## are typographically the same as the header text, so we have some basis for fixing most links.
## 6. H1 headers don't get named anchors generated, unlike H2 and lower headers. Don't know
## why doxia-markdown has this deficiency, perhaps it assumes H1 will only be used once at the
## beginning of the doc. We will insert an explicit anchor just before the H1 headers, to fix.
##
## So far, we're ignoring tables and block quotes.
##
## This script also manages the re-writing of named files to *.tmp, then mv to replace the original file.
import sys
import os
import inspect
import re
# These are the characters excluded by Markdown from use in auto-generated anchor text for Headings.
EXCLUDED_CHARS_REGEX_GHM = r'[^\w\-]' # all non-alphanumerics except "-" and "_". Whitespace are previously converted.
EXCLUDED_CHARS_REGEX_DOX = r'[^\w\.\-]' # all non-alphanumerics except "-", "_", and ".". Whitespace are previously converted.
def report_error(s) :
print >>sys.stderr, "ERROR: " + s
print >>sys.stderr, "on line: " + str(FNR) + " in file: " + FILENAME
print >>sys.stderr, inputline
exit(1)
def trace(msg) :
if TRACE :
print >>sys.stderr, "TRACE: " + inspect.currentframe().f_back.f_code.co_name + " : InputLine " + str(FNR) + " : " + msg
class INDENT_STACK :
'This class maintains the indent stack during doc parsing.'
def __init__(self) :
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def init_indent(self) :
del self.my_stack
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def push_indent(self, n, new_type) :
#Increment the logical depth only if under a bullet type. This fixes problem #3.
level = self.logical_indent_level() + (self.current_type() == "bullet") # plus 1 if true
self.my_stack.append( {'physical':n, 'logical':level, 'type':new_type} )
def set_current_type(self, new_type) :
# adjust topmost type
self.my_stack[-1]['type'] = new_type
def pop_indent(self) :
if len(self.my_stack) > 1 :
return self.my_stack.pop()['physical']
else :
return 0
def current_indent(self) :
# top of stack, physical
return self.my_stack[-1]['physical']
def logical_indent_level(self) :
# top of stack, logical
return self.my_stack[-1]['logical']
def current_type(self) :
# top of stack, type
return self.my_stack[-1]['type']
## End class INDENT_STACK
global indent_stack
indent_stack = INDENT_STACK() # single instance
def | (s) :
# Courtesy of Python, this does a real column-aware tab expansion.
# If this doesn't work, we'll need to go back to erroring on " \t", that is, spaces followed by tabs.
trace("orig length {0}".format(len(s)) )
ct = s.count("\t")
s = s.expandtabs(4)
trace("after {0} tab substitutions, end length is {1}".format(ct, len(s)) )
return s
def fix_prefix_blanks(new_type) :
global inputline
# Fix up the indenting (prefix blanks) in inputline. This fixes problem #2.
# Don't worry about blank lines here, they are filtered out before calling this method.
# Both uses and maintains the indent stack, which is why we need the new_type passed in.
prefix_blanks = re.search(r'^[\s]*', inputline)
if prefix_blanks :
prefix_blanks = prefix_blanks.group()
trace("After prefix-blanks match, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
prefix_blanks = convert_tabs(prefix_blanks)
else :
prefix_blanks = ""
trace("After convert_tabs, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
# prefix_blanks now contains the 'physical' indent of the current paragraph, after tab substitution.
# The indent of this paragraph may be > or == to the previous paragraph. Those are the easy cases.
# If the indent is less than previous, is it equal to the indent of the next lower indented object?
# Or of a lower yet object? Or is it intermediate between two lower objects currently in the stack?
# The latter case is an anomoly, but there's no enforcement in Github-MD.
# The following logic is an empirical reverse engineering, that seems adequate so far.
# It basically says, find a prior level of indent that this is not less than, and then pretend that
# the objects between it and this object weren't there.
trace("current logical_indent_level is {0} and current_indent is {1}".format(
indent_stack.logical_indent_level(), indent_stack.current_indent() ))
while len(prefix_blanks) < indent_stack.current_indent() :
indent_stack.pop_indent()
if len(prefix_blanks) > indent_stack.current_indent() :
indent_stack.push_indent(len(prefix_blanks), new_type)
else : # len(prefix_blanks) == indent_stack.current_indent()
indent_stack.set_current_type(new_type)
trace(("After evaluating this line's prefix-blanks and prev_type, new logical_indent_level() is {0} " +
"and current_indent is {1}").format(indent_stack.logical_indent_level(), indent_stack.current_indent() ))
# Now whack off the prefix blanks, and replace with a standardized string of blanks appropriate to
# the logical indent level.
trace("Orig line is " + inputline)
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline, 1)
trace("New line is " + inputline)
def rewrite_relative_links() :
global inputline
trace("entering with line: " + inputline)
# Fix up the relative links in inputline. This fixes problem #5.
num_links = inputline.count("](")
links = re.findall(r'\[[^\]]+\]\([^)]+\)', inputline)
num_whole_links = len(links)
trace("num_links = {0}, num_whole_links = {1}".format(num_links, num_whole_links))
if (num_links != num_whole_links) :
if re.search(r'\[[^\][!]*\![\s]*\[', inputline) :
# Nested link label expressions, with '!'.
# Special case where a link value is inlined into the link label,
# as in the first line of the base README.md file. Bail on such lines.
trace("WARNING: Found nested link label expressions.")
return
else :
report_error("Found link split across multiple lines. We can't process this.")
for linkitem in links :
pieces = re.search(r'(\[[\s`]*)([^\]]*[^\s`\]])([\s`]*\]\([\s]*)([^\s]+)([\s]*\))', linkitem).groups()
trace("Link: " + linkitem)
trace("Pieces: " + " ".join( (pieces[0],pieces[1],pieces[2],pieces[3],pieces[4]) ))
labeltext = pieces[1]
href = pieces[3]
trace("Extracted labeltext is: " + labeltext)
trace("Extracted href is: " + href)
if re.search(r'^http|\?', href) :
# Don't rewrite absolute or parameterized URLs; neither is native to this markdown book.
trace("skipping absolute or parameterized URL")
continue
# Rewrite implicit index references to explicit, so the book will work as well
# with 'file:///' preview as with a real web server.
# We are only concerned with file path names here, so split at '#' if present.
num_sharps = href.count("#")
if (num_sharps >= 2) :
report_error("Multiple #'s in a single link href.")
elif (num_sharps == 1) :
# Implicit index references are directory names, which seldom have a filetype suffix.
# On the other hand, explicit file references must have filetype, else the browser
# won't know what to do with it. So if no filetype extension, assume is a directory
# and add 'index.html'. Skip if this is an intra-document link.
if not re.search(r'^#|\.[^/#]+#', href) :
if not href.count("/#") :
href = re.sub(r'#', "/#", href, 1)
href = re.sub(r'/#', "/index.html#", href, 1)
# Fix up '.md' references.
href = re.sub(r'^README\.md#', "index.html#", href)
href = re.sub(r'/README\.md#', "/index.html#", href)
href = re.sub(r'\.md#', ".html#", href)
else : # num_sharps == 0
# Same logic as above, just at $ instead of #.
if not re.search(r'\.[^/]+$', href) :
if not href.endswith("/") :
href = href + "/"
href = re.sub(r'/$', "/index.html", href)
# Fix up '.md' references.
href = re.sub(r'^README\.md$', "index.html", href)
href = re.sub(r'/README\.md$', "/index.html", href)
href = re.sub(r'\.md$', ".html", href)
trace("After .md fixup, href is: " + href)
# Re-write named anchors referring to generated tags.
sharp = href.find("#")
if (sharp >= 0) :
named_anchor = href[sharp+1 : ]
trace('named_anchor = "' + named_anchor + '"')
trace('labeltext = "' + labeltext + '"')
scratch = labeltext.lower() # Github-MD forces all anchors to lowercase
scratch = re.sub(r'[\s]', "-", scratch) # convert whitespace to "-"
scratch = re.sub(EXCLUDED_CHARS_REGEX_GHM, "", scratch) # strip non-alphanumerics
if (scratch == named_anchor) :
trace("Found a rewritable case")
scratch = labeltext # Doxia-markdown doesn't change case
scratch = re.sub(r'[\s]', "_", scratch) # convert whitespace to "_"
scratch = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", scratch) # strip non-alphanumerics except "."
href = re.sub("#" + named_anchor, "#" + scratch, href)
trace("After anchor rewrite, href is: " + href)
# Now swap out the bad href for the fixed one in inputline.
if (href != pieces[3]) :
# Assemble the full link string to prevent similar substrings (to href) in different contexts being substituted.
scratch = pieces[0] + pieces[1] + pieces[2] + href + pieces[4]
trace("Fixed link text is: " + scratch)
trace("linkitem is still: " + linkitem)
k = inputline.find(linkitem)
inputline = inputline[ : k] + scratch + inputline[ k + len(linkitem) : ]
trace("Fixed inputline is: " + inputline)
################################################
# begin state machine
global inputline, active_type
BLANKS = " "
TRACE = 0
FNR = -1
trace("Starting trace")
# Github uses relative indents, but doxia wants only and exactly multiples of 4.
# To turn the more forgiving into more regular, we must track both logical and actual indents.
indent_stack.init_indent()
# Paragraph type can be none, text, bullet, code, or heading.
# Note 'current_type()' used in managing the logical indent level on the indent stack,
# and 'active_type' used in the pattern recognition state machine, are deliberately different.
active_type = "none"
# Note: order of the below 'if' clauses is critically important for the state machine.
# Don't change the order.
if len(sys.argv) <= 1 :
report_error("Please provide names of files to be processed, as command line arguments.")
for FILENAME in sys.argv[1:] :
infile = open(FILENAME, 'r')
outfile = open(FILENAME + ".tmp", 'w')
FNR = 0
H1_COUNT = 0
for inputline in infile :
FNR += 1
inputline = inputline.rstrip("\n")
if '](' in inputline :
# Detect lines with hyperlinks in them, and re-write them if necessary and possible.
# This is the only fall-through block, and we put it at the very beginning.
rewrite_relative_links(); # in inputline
# Fall through for further processing.
if (active_type == "code") and ("```" not in inputline) :
trace("in codeblock, regular line")
# what happens in the codeblock, stays in the codeblock
# Put this case first (after link detection), so we don't have to test it in all the other cases.
print >>outfile, inputline
continue
if (active_type == "code") and ("```" in inputline) :
trace("in codeblock, end delimiter line")
# detect end of codeblock
# This must be the second case.
if re.search(r'```[\s]*[^\s]', inputline) :
# If there's text following the end-``` on the same line, error out and fix it in the source file.
report_error("Text following codeblock end delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
active_type = "none"
# Force the indenting of the end-``` to match the beginning. This fixes problem #4.
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline)
print >>outfile, inputline
continue
if (active_type != "code") and ("```" in inputline) :
trace("start codeblock, delimiter line")
# detect start of codeblock
if re.search(r'[^\s][\s]*```', inputline) :
# If there's text preceding the begin-``` on the same line, error out and fix it in the source file.
report_error("Text preceding codeblock start delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
if active_type == "text" or active_type == "bullet" :
print >>outfile, "" # Need preceding blank line before codeblock, in doxia.
active_type = "code"
fix_prefix_blanks(active_type) # in inputline
print >>outfile, inputline
continue
if re.search(r'^[\s]*$', inputline) :
trace("blank line")
# detect blank lines
active_type = "none"
print >>outfile, inputline # Perhaps this should be print "" instead?
continue
if re.search(r'^[\s]*([*+-]|[\d]+\.)[\s]', inputline) :
trace("bullet line")
# detect bullet line (numbered or not)
if (active_type == "text") :
print >>outfile, "" # Need preceding blank line between text and bullet, in doxia. This fixes problem #1.
active_type = "bullet"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
if inputline.startswith("#") :
trace("header line")
# detects header lines, which are self-delimiting, and cannot have indenting
# Header line resets the indenting as well as current type
active_type = "none"
indent_stack.init_indent()
if re.search(r'^#[^#]', inputline) :
# First-level headers ("H1") need explicit anchor inserted (Doxia style). This fixes problem #6.
anchor_name = re.sub(r' ', "_", inputline[1:].strip())
anchor_name = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", anchor_name)
anchor_text = '<a name="' + anchor_name + '"></a>'
if H1_COUNT == 0 :
# Treat the first header differently - put the header after instead of before
# This is necessary to preserve document metadata titling in generated html.
# However, it means the title itself gets hidden above the top of window, when the link is used.
H1_COUNT = 1
print >>outfile, inputline
print >>outfile, anchor_text
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line after.
else :
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line first.
print >>outfile, anchor_text
print >>outfile, inputline
else :
# H2 or deeper level of header, doxia auto-generates anchor.
print >>outfile, inputline
continue
if re.search(r'^[\s]*#', inputline) :
trace("header line, bad")
report_error("Header specification character (#) detected with indenting. This is presumed to be an error, since it will render as text. If intentional, put a period or other printable character before it.")
## default action -- last case in state machine switch
trace("text line")
# Everything else is text-like, and therefore continues active_type, unless none.
if (active_type == "none") :
# Start new text paragraph.
active_type = "text"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
else :
# This is just a continuation of current text or bullet.
# Indenting is irrelevant.
print >>outfile, inputline
continue
## end loop on inputlines
if (active_type == "code") :
report_error("Unmatched codeblock delimiter (```) detected.")
infile.close()
outfile.close()
os.rename(FILENAME + ".tmp", FILENAME)
## end loop on FILENAMEs
trace("ending trace")
| convert_tabs | identifier_name |
fix-md-dialect.py | #########################################################################
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
## Markdown has these types of paragraph: heading, text, list item (bullet or numbered),
## codeblock, table, and block quote.
##
## This script fixes up differences in Markdown dialect, between Github-MD and doxia-markdown.
## Specifically, it fixes these problems:
## 1. In Github-MD, bullets and codeblock starts are self-delimiting. In doxia-markdown, they
## must be separated from preceding text or (in the case of codeblocks) bullets, by a blank line.
## Failure to do so causes the bullet or codeblock delimiter to be interpreted as ordinary text,
## and the content gets munched into the preceding paragraph. The codeblock delimiter (```) as text
## gets interpreted as a codephrase delimiter (`) plus a preceding or following empty codephrase (``).
## 2. Github-MD is liberal in regard to what an 'indent' is, allowing 1, 2, 4, or 8 blanks, or
## a tab. We mostly use 2 blanks. Doxia-markdown requires strictly 4 spaces or a tab. Failure
## to adhere to this requirement causes indents to be ignored or misinterpreted, leading again to
## paragraph munching and delimiter ignoring.
## 3. In Doxia-markdown, if you indent below a header or text paragraph, it is interpreted as
## an implicit codeblock start. In Github-MD, we only start codeblocks with the explicit
## codeblock delimiter (```) and sometimes indent below text just for visual emphasis, so the
## doxia-markdown interpretation is unwelcome. Thus, in our rewrite, we disallow indenting below
## text or headers. This may make the text less pretty than the Github-MD presentation, but it
## avoids the incorrect codeblocking.
## 4. In Doxia-markdown, the indent of the end-codeblock delimiter must match that of the
## begin-codeblock delimiter, or it won't be recognized and the codeblock will run on.
## 5. Relative links need to be re-written. '.md' files need to be changed to '.html', and
## as best we can we will re-write named anchors referring to tags autogenerated from headers.
## The problem with generated tags is that Github-MD forces header text to lower-case, and
## replaces blank spaces with hyphens, while doxia-markdown leaves case unchanged, and replaces
## blanks with underscores. Fortunately we seem to have a culture of using link references that
## are typographically the same as the header text, so we have some basis for fixing most links.
## 6. H1 headers don't get named anchors generated, unlike H2 and lower headers. Don't know
## why doxia-markdown has this deficiency, perhaps it assumes H1 will only be used once at the
## beginning of the doc. We will insert an explicit anchor just before the H1 headers, to fix.
##
## So far, we're ignoring tables and block quotes.
##
## This script also manages the re-writing of named files to *.tmp, then mv to replace the original file.
import sys
import os
import inspect
import re
# These are the characters excluded by Markdown from use in auto-generated anchor text for Headings.
EXCLUDED_CHARS_REGEX_GHM = r'[^\w\-]' # all non-alphanumerics except "-" and "_". Whitespace are previously converted.
EXCLUDED_CHARS_REGEX_DOX = r'[^\w\.\-]' # all non-alphanumerics except "-", "_", and ".". Whitespace are previously converted.
def report_error(s) :
print >>sys.stderr, "ERROR: " + s
print >>sys.stderr, "on line: " + str(FNR) + " in file: " + FILENAME
print >>sys.stderr, inputline
exit(1)
def trace(msg) :
if TRACE :
print >>sys.stderr, "TRACE: " + inspect.currentframe().f_back.f_code.co_name + " : InputLine " + str(FNR) + " : " + msg
class INDENT_STACK :
|
global indent_stack
indent_stack = INDENT_STACK() # single instance
def convert_tabs(s) :
# Courtesy of Python, this does a real column-aware tab expansion.
# If this doesn't work, we'll need to go back to erroring on " \t", that is, spaces followed by tabs.
trace("orig length {0}".format(len(s)) )
ct = s.count("\t")
s = s.expandtabs(4)
trace("after {0} tab substitutions, end length is {1}".format(ct, len(s)) )
return s
def fix_prefix_blanks(new_type) :
global inputline
# Fix up the indenting (prefix blanks) in inputline. This fixes problem #2.
# Don't worry about blank lines here, they are filtered out before calling this method.
# Both uses and maintains the indent stack, which is why we need the new_type passed in.
prefix_blanks = re.search(r'^[\s]*', inputline)
if prefix_blanks :
prefix_blanks = prefix_blanks.group()
trace("After prefix-blanks match, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
prefix_blanks = convert_tabs(prefix_blanks)
else :
prefix_blanks = ""
trace("After convert_tabs, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
# prefix_blanks now contains the 'physical' indent of the current paragraph, after tab substitution.
# The indent of this paragraph may be > or == to the previous paragraph. Those are the easy cases.
# If the indent is less than previous, is it equal to the indent of the next lower indented object?
# Or of a lower yet object? Or is it intermediate between two lower objects currently in the stack?
# The latter case is an anomoly, but there's no enforcement in Github-MD.
# The following logic is an empirical reverse engineering, that seems adequate so far.
# It basically says, find a prior level of indent that this is not less than, and then pretend that
# the objects between it and this object weren't there.
trace("current logical_indent_level is {0} and current_indent is {1}".format(
indent_stack.logical_indent_level(), indent_stack.current_indent() ))
while len(prefix_blanks) < indent_stack.current_indent() :
indent_stack.pop_indent()
if len(prefix_blanks) > indent_stack.current_indent() :
indent_stack.push_indent(len(prefix_blanks), new_type)
else : # len(prefix_blanks) == indent_stack.current_indent()
indent_stack.set_current_type(new_type)
trace(("After evaluating this line's prefix-blanks and prev_type, new logical_indent_level() is {0} " +
"and current_indent is {1}").format(indent_stack.logical_indent_level(), indent_stack.current_indent() ))
# Now whack off the prefix blanks, and replace with a standardized string of blanks appropriate to
# the logical indent level.
trace("Orig line is " + inputline)
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline, 1)
trace("New line is " + inputline)
def rewrite_relative_links() :
global inputline
trace("entering with line: " + inputline)
# Fix up the relative links in inputline. This fixes problem #5.
num_links = inputline.count("](")
links = re.findall(r'\[[^\]]+\]\([^)]+\)', inputline)
num_whole_links = len(links)
trace("num_links = {0}, num_whole_links = {1}".format(num_links, num_whole_links))
if (num_links != num_whole_links) :
if re.search(r'\[[^\][!]*\![\s]*\[', inputline) :
# Nested link label expressions, with '!'.
# Special case where a link value is inlined into the link label,
# as in the first line of the base README.md file. Bail on such lines.
trace("WARNING: Found nested link label expressions.")
return
else :
report_error("Found link split across multiple lines. We can't process this.")
for linkitem in links :
pieces = re.search(r'(\[[\s`]*)([^\]]*[^\s`\]])([\s`]*\]\([\s]*)([^\s]+)([\s]*\))', linkitem).groups()
trace("Link: " + linkitem)
trace("Pieces: " + " ".join( (pieces[0],pieces[1],pieces[2],pieces[3],pieces[4]) ))
labeltext = pieces[1]
href = pieces[3]
trace("Extracted labeltext is: " + labeltext)
trace("Extracted href is: " + href)
if re.search(r'^http|\?', href) :
# Don't rewrite absolute or parameterized URLs; neither is native to this markdown book.
trace("skipping absolute or parameterized URL")
continue
# Rewrite implicit index references to explicit, so the book will work as well
# with 'file:///' preview as with a real web server.
# We are only concerned with file path names here, so split at '#' if present.
num_sharps = href.count("#")
if (num_sharps >= 2) :
report_error("Multiple #'s in a single link href.")
elif (num_sharps == 1) :
# Implicit index references are directory names, which seldom have a filetype suffix.
# On the other hand, explicit file references must have filetype, else the browser
# won't know what to do with it. So if no filetype extension, assume is a directory
# and add 'index.html'. Skip if this is an intra-document link.
if not re.search(r'^#|\.[^/#]+#', href) :
if not href.count("/#") :
href = re.sub(r'#', "/#", href, 1)
href = re.sub(r'/#', "/index.html#", href, 1)
# Fix up '.md' references.
href = re.sub(r'^README\.md#', "index.html#", href)
href = re.sub(r'/README\.md#', "/index.html#", href)
href = re.sub(r'\.md#', ".html#", href)
else : # num_sharps == 0
# Same logic as above, just at $ instead of #.
if not re.search(r'\.[^/]+$', href) :
if not href.endswith("/") :
href = href + "/"
href = re.sub(r'/$', "/index.html", href)
# Fix up '.md' references.
href = re.sub(r'^README\.md$', "index.html", href)
href = re.sub(r'/README\.md$', "/index.html", href)
href = re.sub(r'\.md$', ".html", href)
trace("After .md fixup, href is: " + href)
# Re-write named anchors referring to generated tags.
sharp = href.find("#")
if (sharp >= 0) :
named_anchor = href[sharp+1 : ]
trace('named_anchor = "' + named_anchor + '"')
trace('labeltext = "' + labeltext + '"')
scratch = labeltext.lower() # Github-MD forces all anchors to lowercase
scratch = re.sub(r'[\s]', "-", scratch) # convert whitespace to "-"
scratch = re.sub(EXCLUDED_CHARS_REGEX_GHM, "", scratch) # strip non-alphanumerics
if (scratch == named_anchor) :
trace("Found a rewritable case")
scratch = labeltext # Doxia-markdown doesn't change case
scratch = re.sub(r'[\s]', "_", scratch) # convert whitespace to "_"
scratch = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", scratch) # strip non-alphanumerics except "."
href = re.sub("#" + named_anchor, "#" + scratch, href)
trace("After anchor rewrite, href is: " + href)
# Now swap out the bad href for the fixed one in inputline.
if (href != pieces[3]) :
# Assemble the full link string to prevent similar substrings (to href) in different contexts being substituted.
scratch = pieces[0] + pieces[1] + pieces[2] + href + pieces[4]
trace("Fixed link text is: " + scratch)
trace("linkitem is still: " + linkitem)
k = inputline.find(linkitem)
inputline = inputline[ : k] + scratch + inputline[ k + len(linkitem) : ]
trace("Fixed inputline is: " + inputline)
################################################
# begin state machine
global inputline, active_type
BLANKS = " "
TRACE = 0
FNR = -1
trace("Starting trace")
# Github uses relative indents, but doxia wants only and exactly multiples of 4.
# To turn the more forgiving into more regular, we must track both logical and actual indents.
indent_stack.init_indent()
# Paragraph type can be none, text, bullet, code, or heading.
# Note 'current_type()' used in managing the logical indent level on the indent stack,
# and 'active_type' used in the pattern recognition state machine, are deliberately different.
active_type = "none"
# Note: order of the below 'if' clauses is critically important for the state machine.
# Don't change the order.
if len(sys.argv) <= 1 :
report_error("Please provide names of files to be processed, as command line arguments.")
for FILENAME in sys.argv[1:] :
infile = open(FILENAME, 'r')
outfile = open(FILENAME + ".tmp", 'w')
FNR = 0
H1_COUNT = 0
for inputline in infile :
FNR += 1
inputline = inputline.rstrip("\n")
if '](' in inputline :
# Detect lines with hyperlinks in them, and re-write them if necessary and possible.
# This is the only fall-through block, and we put it at the very beginning.
rewrite_relative_links(); # in inputline
# Fall through for further processing.
if (active_type == "code") and ("```" not in inputline) :
trace("in codeblock, regular line")
# what happens in the codeblock, stays in the codeblock
# Put this case first (after link detection), so we don't have to test it in all the other cases.
print >>outfile, inputline
continue
if (active_type == "code") and ("```" in inputline) :
trace("in codeblock, end delimiter line")
# detect end of codeblock
# This must be the second case.
if re.search(r'```[\s]*[^\s]', inputline) :
# If there's text following the end-``` on the same line, error out and fix it in the source file.
report_error("Text following codeblock end delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
active_type = "none"
# Force the indenting of the end-``` to match the beginning. This fixes problem #4.
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline)
print >>outfile, inputline
continue
if (active_type != "code") and ("```" in inputline) :
trace("start codeblock, delimiter line")
# detect start of codeblock
if re.search(r'[^\s][\s]*```', inputline) :
# If there's text preceding the begin-``` on the same line, error out and fix it in the source file.
report_error("Text preceding codeblock start delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
if active_type == "text" or active_type == "bullet" :
print >>outfile, "" # Need preceding blank line before codeblock, in doxia.
active_type = "code"
fix_prefix_blanks(active_type) # in inputline
print >>outfile, inputline
continue
if re.search(r'^[\s]*$', inputline) :
trace("blank line")
# detect blank lines
active_type = "none"
print >>outfile, inputline # Perhaps this should be print "" instead?
continue
if re.search(r'^[\s]*([*+-]|[\d]+\.)[\s]', inputline) :
trace("bullet line")
# detect bullet line (numbered or not)
if (active_type == "text") :
print >>outfile, "" # Need preceding blank line between text and bullet, in doxia. This fixes problem #1.
active_type = "bullet"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
if inputline.startswith("#") :
trace("header line")
# detects header lines, which are self-delimiting, and cannot have indenting
# Header line resets the indenting as well as current type
active_type = "none"
indent_stack.init_indent()
if re.search(r'^#[^#]', inputline) :
# First-level headers ("H1") need explicit anchor inserted (Doxia style). This fixes problem #6.
anchor_name = re.sub(r' ', "_", inputline[1:].strip())
anchor_name = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", anchor_name)
anchor_text = '<a name="' + anchor_name + '"></a>'
if H1_COUNT == 0 :
# Treat the first header differently - put the header after instead of before
# This is necessary to preserve document metadata titling in generated html.
# However, it means the title itself gets hidden above the top of window, when the link is used.
H1_COUNT = 1
print >>outfile, inputline
print >>outfile, anchor_text
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line after.
else :
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line first.
print >>outfile, anchor_text
print >>outfile, inputline
else :
# H2 or deeper level of header, doxia auto-generates anchor.
print >>outfile, inputline
continue
if re.search(r'^[\s]*#', inputline) :
trace("header line, bad")
report_error("Header specification character (#) detected with indenting. This is presumed to be an error, since it will render as text. If intentional, put a period or other printable character before it.")
## default action -- last case in state machine switch
trace("text line")
# Everything else is text-like, and therefore continues active_type, unless none.
if (active_type == "none") :
# Start new text paragraph.
active_type = "text"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
else :
# This is just a continuation of current text or bullet.
# Indenting is irrelevant.
print >>outfile, inputline
continue
## end loop on inputlines
if (active_type == "code") :
report_error("Unmatched codeblock delimiter (```) detected.")
infile.close()
outfile.close()
os.rename(FILENAME + ".tmp", FILENAME)
## end loop on FILENAMEs
trace("ending trace")
| 'This class maintains the indent stack during doc parsing.'
def __init__(self) :
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def init_indent(self) :
del self.my_stack
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def push_indent(self, n, new_type) :
#Increment the logical depth only if under a bullet type. This fixes problem #3.
level = self.logical_indent_level() + (self.current_type() == "bullet") # plus 1 if true
self.my_stack.append( {'physical':n, 'logical':level, 'type':new_type} )
def set_current_type(self, new_type) :
# adjust topmost type
self.my_stack[-1]['type'] = new_type
def pop_indent(self) :
if len(self.my_stack) > 1 :
return self.my_stack.pop()['physical']
else :
return 0
def current_indent(self) :
# top of stack, physical
return self.my_stack[-1]['physical']
def logical_indent_level(self) :
# top of stack, logical
return self.my_stack[-1]['logical']
def current_type(self) :
# top of stack, type
return self.my_stack[-1]['type']
## End class INDENT_STACK | identifier_body |
fix-md-dialect.py | #########################################################################
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
## Markdown has these types of paragraph: heading, text, list item (bullet or numbered),
## codeblock, table, and block quote.
##
## This script fixes up differences in Markdown dialect, between Github-MD and doxia-markdown.
## Specifically, it fixes these problems:
## 1. In Github-MD, bullets and codeblock starts are self-delimiting. In doxia-markdown, they
## must be separated from preceding text or (in the case of codeblocks) bullets, by a blank line.
## Failure to do so causes the bullet or codeblock delimiter to be interpreted as ordinary text,
## and the content gets munched into the preceding paragraph. The codeblock delimiter (```) as text
## gets interpreted as a codephrase delimiter (`) plus a preceding or following empty codephrase (``).
## 2. Github-MD is liberal in regard to what an 'indent' is, allowing 1, 2, 4, or 8 blanks, or
## a tab. We mostly use 2 blanks. Doxia-markdown requires strictly 4 spaces or a tab. Failure
## to adhere to this requirement causes indents to be ignored or misinterpreted, leading again to
## paragraph munching and delimiter ignoring.
## 3. In Doxia-markdown, if you indent below a header or text paragraph, it is interpreted as
## an implicit codeblock start. In Github-MD, we only start codeblocks with the explicit
## codeblock delimiter (```) and sometimes indent below text just for visual emphasis, so the
## doxia-markdown interpretation is unwelcome. Thus, in our rewrite, we disallow indenting below
## text or headers. This may make the text less pretty than the Github-MD presentation, but it
## avoids the incorrect codeblocking.
## 4. In Doxia-markdown, the indent of the end-codeblock delimiter must match that of the
## begin-codeblock delimiter, or it won't be recognized and the codeblock will run on.
## 5. Relative links need to be re-written. '.md' files need to be changed to '.html', and
## as best we can we will re-write named anchors referring to tags autogenerated from headers.
## The problem with generated tags is that Github-MD forces header text to lower-case, and
## replaces blank spaces with hyphens, while doxia-markdown leaves case unchanged, and replaces
## blanks with underscores. Fortunately we seem to have a culture of using link references that
## are typographically the same as the header text, so we have some basis for fixing most links.
## 6. H1 headers don't get named anchors generated, unlike H2 and lower headers. Don't know
## why doxia-markdown has this deficiency, perhaps it assumes H1 will only be used once at the
## beginning of the doc. We will insert an explicit anchor just before the H1 headers, to fix.
##
## So far, we're ignoring tables and block quotes.
##
## This script also manages the re-writing of named files to *.tmp, then mv to replace the original file.
import sys
import os
import inspect
import re
# These are the characters excluded by Markdown from use in auto-generated anchor text for Headings.
EXCLUDED_CHARS_REGEX_GHM = r'[^\w\-]' # all non-alphanumerics except "-" and "_". Whitespace are previously converted.
EXCLUDED_CHARS_REGEX_DOX = r'[^\w\.\-]' # all non-alphanumerics except "-", "_", and ".". Whitespace are previously converted.
def report_error(s) :
print >>sys.stderr, "ERROR: " + s
print >>sys.stderr, "on line: " + str(FNR) + " in file: " + FILENAME
print >>sys.stderr, inputline
exit(1)
def trace(msg) :
if TRACE :
print >>sys.stderr, "TRACE: " + inspect.currentframe().f_back.f_code.co_name + " : InputLine " + str(FNR) + " : " + msg
class INDENT_STACK :
'This class maintains the indent stack during doc parsing.'
def __init__(self) :
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def init_indent(self) :
del self.my_stack
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def push_indent(self, n, new_type) :
#Increment the logical depth only if under a bullet type. This fixes problem #3.
level = self.logical_indent_level() + (self.current_type() == "bullet") # plus 1 if true
self.my_stack.append( {'physical':n, 'logical':level, 'type':new_type} )
def set_current_type(self, new_type) :
# adjust topmost type
self.my_stack[-1]['type'] = new_type
def pop_indent(self) :
if len(self.my_stack) > 1 :
return self.my_stack.pop()['physical']
else :
return 0
def current_indent(self) :
# top of stack, physical
return self.my_stack[-1]['physical']
def logical_indent_level(self) :
# top of stack, logical
return self.my_stack[-1]['logical']
def current_type(self) :
# top of stack, type
return self.my_stack[-1]['type']
## End class INDENT_STACK
global indent_stack
indent_stack = INDENT_STACK() # single instance
def convert_tabs(s) :
# Courtesy of Python, this does a real column-aware tab expansion.
# If this doesn't work, we'll need to go back to erroring on " \t", that is, spaces followed by tabs.
trace("orig length {0}".format(len(s)) )
ct = s.count("\t")
s = s.expandtabs(4)
trace("after {0} tab substitutions, end length is {1}".format(ct, len(s)) )
return s
def fix_prefix_blanks(new_type) :
global inputline
# Fix up the indenting (prefix blanks) in inputline. This fixes problem #2.
# Don't worry about blank lines here, they are filtered out before calling this method.
# Both uses and maintains the indent stack, which is why we need the new_type passed in.
prefix_blanks = re.search(r'^[\s]*', inputline)
if prefix_blanks :
prefix_blanks = prefix_blanks.group()
trace("After prefix-blanks match, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
prefix_blanks = convert_tabs(prefix_blanks)
else :
prefix_blanks = ""
trace("After convert_tabs, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
# prefix_blanks now contains the 'physical' indent of the current paragraph, after tab substitution.
# The indent of this paragraph may be > or == to the previous paragraph. Those are the easy cases.
# If the indent is less than previous, is it equal to the indent of the next lower indented object?
# Or of a lower yet object? Or is it intermediate between two lower objects currently in the stack?
# The latter case is an anomoly, but there's no enforcement in Github-MD.
# The following logic is an empirical reverse engineering, that seems adequate so far.
# It basically says, find a prior level of indent that this is not less than, and then pretend that
# the objects between it and this object weren't there.
trace("current logical_indent_level is {0} and current_indent is {1}".format(
indent_stack.logical_indent_level(), indent_stack.current_indent() ))
while len(prefix_blanks) < indent_stack.current_indent() :
indent_stack.pop_indent()
if len(prefix_blanks) > indent_stack.current_indent() :
indent_stack.push_indent(len(prefix_blanks), new_type)
else : # len(prefix_blanks) == indent_stack.current_indent()
indent_stack.set_current_type(new_type)
trace(("After evaluating this line's prefix-blanks and prev_type, new logical_indent_level() is {0} " +
"and current_indent is {1}").format(indent_stack.logical_indent_level(), indent_stack.current_indent() ))
# Now whack off the prefix blanks, and replace with a standardized string of blanks appropriate to
# the logical indent level.
trace("Orig line is " + inputline)
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline, 1)
trace("New line is " + inputline)
def rewrite_relative_links() :
global inputline
trace("entering with line: " + inputline)
# Fix up the relative links in inputline. This fixes problem #5.
num_links = inputline.count("](")
links = re.findall(r'\[[^\]]+\]\([^)]+\)', inputline)
num_whole_links = len(links)
trace("num_links = {0}, num_whole_links = {1}".format(num_links, num_whole_links))
if (num_links != num_whole_links) :
if re.search(r'\[[^\][!]*\![\s]*\[', inputline) :
# Nested link label expressions, with '!'.
# Special case where a link value is inlined into the link label,
# as in the first line of the base README.md file. Bail on such lines.
trace("WARNING: Found nested link label expressions.")
return
else :
|
for linkitem in links :
pieces = re.search(r'(\[[\s`]*)([^\]]*[^\s`\]])([\s`]*\]\([\s]*)([^\s]+)([\s]*\))', linkitem).groups()
trace("Link: " + linkitem)
trace("Pieces: " + " ".join( (pieces[0],pieces[1],pieces[2],pieces[3],pieces[4]) ))
labeltext = pieces[1]
href = pieces[3]
trace("Extracted labeltext is: " + labeltext)
trace("Extracted href is: " + href)
if re.search(r'^http|\?', href) :
# Don't rewrite absolute or parameterized URLs; neither is native to this markdown book.
trace("skipping absolute or parameterized URL")
continue
# Rewrite implicit index references to explicit, so the book will work as well
# with 'file:///' preview as with a real web server.
# We are only concerned with file path names here, so split at '#' if present.
num_sharps = href.count("#")
if (num_sharps >= 2) :
report_error("Multiple #'s in a single link href.")
elif (num_sharps == 1) :
# Implicit index references are directory names, which seldom have a filetype suffix.
# On the other hand, explicit file references must have filetype, else the browser
# won't know what to do with it. So if no filetype extension, assume is a directory
# and add 'index.html'. Skip if this is an intra-document link.
if not re.search(r'^#|\.[^/#]+#', href) :
if not href.count("/#") :
href = re.sub(r'#', "/#", href, 1)
href = re.sub(r'/#', "/index.html#", href, 1)
# Fix up '.md' references.
href = re.sub(r'^README\.md#', "index.html#", href)
href = re.sub(r'/README\.md#', "/index.html#", href)
href = re.sub(r'\.md#', ".html#", href)
else : # num_sharps == 0
# Same logic as above, just at $ instead of #.
if not re.search(r'\.[^/]+$', href) :
if not href.endswith("/") :
href = href + "/"
href = re.sub(r'/$', "/index.html", href)
# Fix up '.md' references.
href = re.sub(r'^README\.md$', "index.html", href)
href = re.sub(r'/README\.md$', "/index.html", href)
href = re.sub(r'\.md$', ".html", href)
trace("After .md fixup, href is: " + href)
# Re-write named anchors referring to generated tags.
sharp = href.find("#")
if (sharp >= 0) :
named_anchor = href[sharp+1 : ]
trace('named_anchor = "' + named_anchor + '"')
trace('labeltext = "' + labeltext + '"')
scratch = labeltext.lower() # Github-MD forces all anchors to lowercase
scratch = re.sub(r'[\s]', "-", scratch) # convert whitespace to "-"
scratch = re.sub(EXCLUDED_CHARS_REGEX_GHM, "", scratch) # strip non-alphanumerics
if (scratch == named_anchor) :
trace("Found a rewritable case")
scratch = labeltext # Doxia-markdown doesn't change case
scratch = re.sub(r'[\s]', "_", scratch) # convert whitespace to "_"
scratch = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", scratch) # strip non-alphanumerics except "."
href = re.sub("#" + named_anchor, "#" + scratch, href)
trace("After anchor rewrite, href is: " + href)
# Now swap out the bad href for the fixed one in inputline.
if (href != pieces[3]) :
# Assemble the full link string to prevent similar substrings (to href) in different contexts being substituted.
scratch = pieces[0] + pieces[1] + pieces[2] + href + pieces[4]
trace("Fixed link text is: " + scratch)
trace("linkitem is still: " + linkitem)
k = inputline.find(linkitem)
inputline = inputline[ : k] + scratch + inputline[ k + len(linkitem) : ]
trace("Fixed inputline is: " + inputline)
################################################
# begin state machine
global inputline, active_type
BLANKS = " "
TRACE = 0
FNR = -1
trace("Starting trace")
# Github uses relative indents, but doxia wants only and exactly multiples of 4.
# To turn the more forgiving into more regular, we must track both logical and actual indents.
indent_stack.init_indent()
# Paragraph type can be none, text, bullet, code, or heading.
# Note 'current_type()' used in managing the logical indent level on the indent stack,
# and 'active_type' used in the pattern recognition state machine, are deliberately different.
active_type = "none"
# Note: order of the below 'if' clauses is critically important for the state machine.
# Don't change the order.
if len(sys.argv) <= 1 :
report_error("Please provide names of files to be processed, as command line arguments.")
for FILENAME in sys.argv[1:] :
infile = open(FILENAME, 'r')
outfile = open(FILENAME + ".tmp", 'w')
FNR = 0
H1_COUNT = 0
for inputline in infile :
FNR += 1
inputline = inputline.rstrip("\n")
if '](' in inputline :
# Detect lines with hyperlinks in them, and re-write them if necessary and possible.
# This is the only fall-through block, and we put it at the very beginning.
rewrite_relative_links(); # in inputline
# Fall through for further processing.
if (active_type == "code") and ("```" not in inputline) :
trace("in codeblock, regular line")
# what happens in the codeblock, stays in the codeblock
# Put this case first (after link detection), so we don't have to test it in all the other cases.
print >>outfile, inputline
continue
if (active_type == "code") and ("```" in inputline) :
trace("in codeblock, end delimiter line")
# detect end of codeblock
# This must be the second case.
if re.search(r'```[\s]*[^\s]', inputline) :
# If there's text following the end-``` on the same line, error out and fix it in the source file.
report_error("Text following codeblock end delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
active_type = "none"
# Force the indenting of the end-``` to match the beginning. This fixes problem #4.
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline)
print >>outfile, inputline
continue
if (active_type != "code") and ("```" in inputline) :
trace("start codeblock, delimiter line")
# detect start of codeblock
if re.search(r'[^\s][\s]*```', inputline) :
# If there's text preceding the begin-``` on the same line, error out and fix it in the source file.
report_error("Text preceding codeblock start delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
if active_type == "text" or active_type == "bullet" :
print >>outfile, "" # Need preceding blank line before codeblock, in doxia.
active_type = "code"
fix_prefix_blanks(active_type) # in inputline
print >>outfile, inputline
continue
if re.search(r'^[\s]*$', inputline) :
trace("blank line")
# detect blank lines
active_type = "none"
print >>outfile, inputline # Perhaps this should be print "" instead?
continue
if re.search(r'^[\s]*([*+-]|[\d]+\.)[\s]', inputline) :
trace("bullet line")
# detect bullet line (numbered or not)
if (active_type == "text") :
print >>outfile, "" # Need preceding blank line between text and bullet, in doxia. This fixes problem #1.
active_type = "bullet"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
if inputline.startswith("#") :
trace("header line")
# detects header lines, which are self-delimiting, and cannot have indenting
# Header line resets the indenting as well as current type
active_type = "none"
indent_stack.init_indent()
if re.search(r'^#[^#]', inputline) :
# First-level headers ("H1") need explicit anchor inserted (Doxia style). This fixes problem #6.
anchor_name = re.sub(r' ', "_", inputline[1:].strip())
anchor_name = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", anchor_name)
anchor_text = '<a name="' + anchor_name + '"></a>'
if H1_COUNT == 0 :
# Treat the first header differently - put the header after instead of before
# This is necessary to preserve document metadata titling in generated html.
# However, it means the title itself gets hidden above the top of window, when the link is used.
H1_COUNT = 1
print >>outfile, inputline
print >>outfile, anchor_text
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line after.
else :
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line first.
print >>outfile, anchor_text
print >>outfile, inputline
else :
# H2 or deeper level of header, doxia auto-generates anchor.
print >>outfile, inputline
continue
if re.search(r'^[\s]*#', inputline) :
trace("header line, bad")
report_error("Header specification character (#) detected with indenting. This is presumed to be an error, since it will render as text. If intentional, put a period or other printable character before it.")
## default action -- last case in state machine switch
trace("text line")
# Everything else is text-like, and therefore continues active_type, unless none.
if (active_type == "none") :
# Start new text paragraph.
active_type = "text"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
else :
# This is just a continuation of current text or bullet.
# Indenting is irrelevant.
print >>outfile, inputline
continue
## end loop on inputlines
if (active_type == "code") :
report_error("Unmatched codeblock delimiter (```) detected.")
infile.close()
outfile.close()
os.rename(FILENAME + ".tmp", FILENAME)
## end loop on FILENAMEs
trace("ending trace")
| report_error("Found link split across multiple lines. We can't process this.") | conditional_block |
utils.js | const Apify = require('apify');
const Puppeteer = require('puppeteer'); // eslint-disable-line
const { PlacePaginationData, Review } = require('./typedefs'); // eslint-disable-line
| /**
* Wait until google map loader disappear
* @param {Puppeteer.Page} page
* @return {Promise<void>}
*/
const waitForGoogleMapLoader = async (page) => {
if (await page.$('#searchbox')) {
// @ts-ignore
await page.waitForFunction(() => !document.querySelector('#searchbox')
.classList.contains('loading'), { timeout: DEFAULT_TIMEOUT });
}
// 2019-05-19: New progress bar
await page.waitForFunction(() => !document.querySelector('.loading-pane-section-loading'), { timeout: DEFAULT_TIMEOUT });
};
/** @param {string} googleResponseString */
const stringifyGoogleXrhResponse = (googleResponseString) => {
return JSON.parse(googleResponseString.replace(')]}\'', ''));
};
/** @param {number} float */
const fixFloatNumber = (float) => Number(float.toFixed(7));
/**
* @param {any} result
* @param {boolean} isAdvertisement
*/
const parsePaginationResult = (result, isAdvertisement) => {
// index 14 has detailed data about each place
const detailInfoIndex = isAdvertisement ? 15 : 14;
const place = result[detailInfoIndex];
if (!place) {
return;
}
// Some places don't have any address
const addressDetail = place[183] ? place[183][1] : undefined;
const addressParsed = addressDetail ? {
neighborhood: addressDetail[1],
street: addressDetail[2],
city: addressDetail[3],
postalCode: addressDetail[4],
state: addressDetail[5],
countryCode: addressDetail[6],
} : undefined;
const coordsArr = place[9];
// TODO: Very rarely place[9] is empty, figure out why
const coords = coordsArr
? { lat: fixFloatNumber(coordsArr[2]), lng: fixFloatNumber(coordsArr[3]) }
: { lat: null, lng: null };
return {
placeId: place[78],
coords,
addressParsed,
isAdvertisement,
};
}
/**
* Response from google xhr is kind a weird. Mix of array of array.
* This function parse places from the response body.
* @param {Buffer} responseBodyBuffer
* @return {PlacePaginationData[]}
*/
const parseSearchPlacesResponseBody = (responseBodyBuffer) => {
/** @type {PlacePaginationData[]} */
const placePaginationData = [];
const jsonString = responseBodyBuffer
.toString('utf-8')
.replace('/*""*/', '');
const jsonObject = JSON.parse(jsonString);
const data = stringifyGoogleXrhResponse(jsonObject.d);
// We are paring ads but seems Google is not showing them to the scraper right now
const ads = (data[2] && data[2][1] && data[2][1][0]) || [];
ads.forEach((/** @type {any} */ ad) => {
const placeData = parsePaginationResult(ad, true);
if (placeData) {
placePaginationData.push(placeData);
} else {
log.warning(`[SEARCH]: Cannot find place data for advertisement in search.`)
}
})
/** @type {any} Too complex to type out*/
let organicResults = data[0][1];
// If the search goes to search results, the first one is not a place
// If the search goes to a place directly, the first one is that place
if (organicResults.length > 1) {
organicResults = organicResults.slice(1)
}
organicResults.forEach((/** @type {any} */ result ) => {
const placeData = parsePaginationResult(result, false);
if (placeData) {
placePaginationData.push(placeData);
} else {
log.warning(`[SEARCH]: Cannot find place data in search.`)
}
});
return placePaginationData;
};
/**
* Parses review from a single review array json Google format
* @param {any} jsonArray
* @param {string} reviewsTranslation
* @return {Review}
*/
const parseReviewFromJson = (jsonArray, reviewsTranslation) => {
let text = jsonArray[3];
// Optionally remove translation
// TODO: Perhaps the text is differentiated in the JSON
if (typeof text === 'string' && reviewsTranslation !== 'originalAndTranslated') {
const splitReviewText = text.split('\n\n(Original)\n');
if (reviewsTranslation === 'onlyOriginal') {
// Fallback if there is no translation
text = splitReviewText[1] || splitReviewText[0];
} else if (reviewsTranslation === 'onlyTranslated') {
text = splitReviewText[0];
}
text = text.replace('(Translated by Google)', '').replace('\n\n(Original)\n', '').trim();
}
return {
name: jsonArray[0][1],
text,
publishAt: jsonArray[1],
publishedAtDate: new Date(jsonArray[27]).toISOString(),
likesCount: jsonArray[15],
reviewId: jsonArray[10],
reviewUrl: jsonArray[18],
reviewerId: jsonArray[6],
reviewerUrl: jsonArray[0][0],
reviewerNumberOfReviews: jsonArray[12] && jsonArray[12][1] && jsonArray[12][1][1],
isLocalGuide: jsonArray[12] && jsonArray[12][1] && Array.isArray(jsonArray[12][1][0]),
// On some places google shows reviews from other services like booking
// There isn't stars but rating for this places reviews
stars: jsonArray[4] || null,
// Trip advisor
rating: jsonArray[25] ? jsonArray[25][1] : null,
responseFromOwnerDate: jsonArray[9] && jsonArray[9][3]
? new Date(jsonArray[9][3]).toISOString()
: null,
responseFromOwnerText: jsonArray[9] ? jsonArray[9][1] : null,
};
}
/**
* Response from google xhr is kind a weird. Mix of array of array.
* This function parse reviews from the response body.
* @param {Buffer | string} responseBody
* @param {string} reviewsTranslation
* @return [place]
*/
const parseReviewFromResponseBody = (responseBody, reviewsTranslation) => {
/** @type {Review[]} */
const currentReviews = [];
const stringBody = typeof responseBody === 'string'
? responseBody
: responseBody.toString('utf-8');
let results;
try {
results = stringifyGoogleXrhResponse(stringBody);
} catch (e) {
return { error: e.message };
}
if (!results || !results[2]) {
return { currentReviews };
}
results[2].forEach((/** @type {any} */ jsonArray) => {
const review = parseReviewFromJson(jsonArray, reviewsTranslation);
currentReviews.push(review);
});
return { currentReviews };
};
/**
* Method scrolls page to xpos, ypos.
* @param {Puppeteer.Page} page
* @param {string} selectorToScroll
* @param {number} scrollToHeight
*/
const scrollTo = async (page, selectorToScroll, scrollToHeight) => {
try {
await page.waitForSelector(selectorToScroll);
} catch (e) {
log.warning(`Could not find selector ${selectorToScroll} to scroll to - ${page.url()}`);
}
await page.evaluate((selector, height) => {
const scrollable = document.querySelector(selector);
scrollable.scrollTop = height;
}, selectorToScroll, scrollToHeight);
};
/** @param {string} url */
const parseZoomFromUrl = (url) => {
const zoomMatch = url.match(/@[0-9.-]+,[0-9.-]+,([0-9.]+)z/);
return zoomMatch ? Number(zoomMatch[1]) : null;
};
/** @param {string[]} imageUrls */
const enlargeImageUrls = (imageUrls) => {
// w1920-h1080
const FULL_RESOLUTION = {
width: 1920,
height: 1080,
};
return imageUrls.map((imageUrl) => {
const sizeMatch = imageUrl.match(/=s\d+/);
const widthHeightMatch = imageUrl.match(/=w\d+-h\d+/);
if (sizeMatch) {
return imageUrl.replace(sizeMatch[0], `=s${FULL_RESOLUTION.width}`);
}
if (widthHeightMatch) {
return imageUrl.replace(widthHeightMatch[0], `=w${FULL_RESOLUTION.width}-h${FULL_RESOLUTION.height}`);
}
return imageUrl;
});
};
/**
* Waits until a predicate (funcion that returns bool) returns true
*
* ```
* let eventFired = false;
* await waiter(() => eventFired, { timeout: 120000, pollInterval: 1000 })
* // Something happening elsewhere that will set eventFired to true
* ```
*
* @param {function} predicate
* @param {object} [options]
* @param {number} [options.timeout]
* @param {number} [options.pollInterval]
* @param {string} [options.timeoutErrorMeesage]
* @param {string} [options.successMessage]
*/
const waiter = async (predicate, options = {}) => {
const { timeout = 120000, pollInterval = 1000, timeoutErrorMeesage, successMessage } = options;
const start = Date.now();
for (;;) {
if (await predicate()) {
if (successMessage) {
log.info(successMessage);
}
return;
}
const waitingFor = Date.now() - start;
if (waitingFor > timeout) {
throw new Error(timeoutErrorMeesage || `Timeout reached when waiting for predicate for ${waitingFor} ms`);
}
await new Promise((resolve) => setTimeout(resolve, pollInterval));
}
};
/**
* @param {Puppeteer.Page} page
* @param {string} url
* @param {boolean} persistCookiesPerSession
* @param {Apify.Session | undefined} session
*/
const waitAndHandleConsentScreen = async (page, url, persistCookiesPerSession, session) => {
// TODO: Test if the new consent screen works well!
const predicate = async (shouldClick = false) => {
// handling consent page (usually shows up on startup), handles non .com domains
const consentButton = await page.$('[action^="https://consent.google"] button');
if (consentButton) {
if (shouldClick) {
await Promise.all([
page.waitForNavigation({ timeout: 60000 }),
consentButton.click()
]);
}
return true;
}
// handling consent frame in maps
// (this only happens rarely, but still happens)
for (const frame of page.mainFrame().childFrames()) {
if (frame.url().match(/consent\.google\.[a-z.]+/)) {
if (shouldClick) {
await frame.click('#introAgreeButton');
}
return true;
}
}
};
/**
* Puts the CONSENT Cookie into the session
*/
const updateCookies = async () => {
if (session) {
const cookies = await page.cookies(url);
// Without changing the domain, apify won't find the cookie later.
// Changing the domain can duplicate cookies in the saved session state, so only the necessary cookie is saved here.
if (cookies) {
let consentCookie = cookies.filter(cookie => cookie.name=="CONSENT")[0];
// overwrite the pending cookie to make sure, we don't set the pending cookie when Apify is fixed
session.setPuppeteerCookies([{... consentCookie}], "https://www.google.com/");
if (consentCookie) {
consentCookie.domain = "www.google.com"
}
session.setPuppeteerCookies([consentCookie], "https://www.google.com/");
}
} else {
log.warning("Session is undefined -> consent screen cookies not saved")
}
}
await waiter(predicate, {
timeout: 60000,
pollInterval: 500,
timeoutErrorMeesage: `Waiting for consent screen timeouted after 60000ms on URL: ${url}`,
successMessage: `Approved consent screen on URL: ${url}`,
});
await predicate(true);
if (persistCookiesPerSession) {
await updateCookies();
}
};
module.exports = {
waitForGoogleMapLoader,
parseSearchPlacesResponseBody,
parseReviewFromResponseBody,
parseReviewFromJson,
scrollTo,
parseZoomFromUrl,
enlargeImageUrls,
waiter,
waitAndHandleConsentScreen,
}; | const { DEFAULT_TIMEOUT } = require('./consts');
const { log } = Apify.utils;
| random_line_split |
utils.js | const Apify = require('apify');
const Puppeteer = require('puppeteer'); // eslint-disable-line
const { PlacePaginationData, Review } = require('./typedefs'); // eslint-disable-line
const { DEFAULT_TIMEOUT } = require('./consts');
const { log } = Apify.utils;
/**
* Wait until google map loader disappear
* @param {Puppeteer.Page} page
* @return {Promise<void>}
*/
const waitForGoogleMapLoader = async (page) => {
if (await page.$('#searchbox')) {
// @ts-ignore
await page.waitForFunction(() => !document.querySelector('#searchbox')
.classList.contains('loading'), { timeout: DEFAULT_TIMEOUT });
}
// 2019-05-19: New progress bar
await page.waitForFunction(() => !document.querySelector('.loading-pane-section-loading'), { timeout: DEFAULT_TIMEOUT });
};
/** @param {string} googleResponseString */
const stringifyGoogleXrhResponse = (googleResponseString) => {
return JSON.parse(googleResponseString.replace(')]}\'', ''));
};
/** @param {number} float */
const fixFloatNumber = (float) => Number(float.toFixed(7));
/**
* @param {any} result
* @param {boolean} isAdvertisement
*/
const parsePaginationResult = (result, isAdvertisement) => {
// index 14 has detailed data about each place
const detailInfoIndex = isAdvertisement ? 15 : 14;
const place = result[detailInfoIndex];
if (!place) {
return;
}
// Some places don't have any address
const addressDetail = place[183] ? place[183][1] : undefined;
const addressParsed = addressDetail ? {
neighborhood: addressDetail[1],
street: addressDetail[2],
city: addressDetail[3],
postalCode: addressDetail[4],
state: addressDetail[5],
countryCode: addressDetail[6],
} : undefined;
const coordsArr = place[9];
// TODO: Very rarely place[9] is empty, figure out why
const coords = coordsArr
? { lat: fixFloatNumber(coordsArr[2]), lng: fixFloatNumber(coordsArr[3]) }
: { lat: null, lng: null };
return {
placeId: place[78],
coords,
addressParsed,
isAdvertisement,
};
}
/**
* Response from google xhr is kind a weird. Mix of array of array.
* This function parse places from the response body.
* @param {Buffer} responseBodyBuffer
* @return {PlacePaginationData[]}
*/
const parseSearchPlacesResponseBody = (responseBodyBuffer) => {
/** @type {PlacePaginationData[]} */
const placePaginationData = [];
const jsonString = responseBodyBuffer
.toString('utf-8')
.replace('/*""*/', '');
const jsonObject = JSON.parse(jsonString);
const data = stringifyGoogleXrhResponse(jsonObject.d);
// We are paring ads but seems Google is not showing them to the scraper right now
const ads = (data[2] && data[2][1] && data[2][1][0]) || [];
ads.forEach((/** @type {any} */ ad) => {
const placeData = parsePaginationResult(ad, true);
if (placeData) {
placePaginationData.push(placeData);
} else {
log.warning(`[SEARCH]: Cannot find place data for advertisement in search.`)
}
})
/** @type {any} Too complex to type out*/
let organicResults = data[0][1];
// If the search goes to search results, the first one is not a place
// If the search goes to a place directly, the first one is that place
if (organicResults.length > 1) {
organicResults = organicResults.slice(1)
}
organicResults.forEach((/** @type {any} */ result ) => {
const placeData = parsePaginationResult(result, false);
if (placeData) {
placePaginationData.push(placeData);
} else {
log.warning(`[SEARCH]: Cannot find place data in search.`)
}
});
return placePaginationData;
};
/**
* Parses review from a single review array json Google format
* @param {any} jsonArray
* @param {string} reviewsTranslation
* @return {Review}
*/
const parseReviewFromJson = (jsonArray, reviewsTranslation) => {
let text = jsonArray[3];
// Optionally remove translation
// TODO: Perhaps the text is differentiated in the JSON
if (typeof text === 'string' && reviewsTranslation !== 'originalAndTranslated') {
const splitReviewText = text.split('\n\n(Original)\n');
if (reviewsTranslation === 'onlyOriginal') {
// Fallback if there is no translation
text = splitReviewText[1] || splitReviewText[0];
} else if (reviewsTranslation === 'onlyTranslated') {
text = splitReviewText[0];
}
text = text.replace('(Translated by Google)', '').replace('\n\n(Original)\n', '').trim();
}
return {
name: jsonArray[0][1],
text,
publishAt: jsonArray[1],
publishedAtDate: new Date(jsonArray[27]).toISOString(),
likesCount: jsonArray[15],
reviewId: jsonArray[10],
reviewUrl: jsonArray[18],
reviewerId: jsonArray[6],
reviewerUrl: jsonArray[0][0],
reviewerNumberOfReviews: jsonArray[12] && jsonArray[12][1] && jsonArray[12][1][1],
isLocalGuide: jsonArray[12] && jsonArray[12][1] && Array.isArray(jsonArray[12][1][0]),
// On some places google shows reviews from other services like booking
// There isn't stars but rating for this places reviews
stars: jsonArray[4] || null,
// Trip advisor
rating: jsonArray[25] ? jsonArray[25][1] : null,
responseFromOwnerDate: jsonArray[9] && jsonArray[9][3]
? new Date(jsonArray[9][3]).toISOString()
: null,
responseFromOwnerText: jsonArray[9] ? jsonArray[9][1] : null,
};
}
/**
* Response from google xhr is kind a weird. Mix of array of array.
* This function parse reviews from the response body.
* @param {Buffer | string} responseBody
* @param {string} reviewsTranslation
* @return [place]
*/
const parseReviewFromResponseBody = (responseBody, reviewsTranslation) => {
/** @type {Review[]} */
const currentReviews = [];
const stringBody = typeof responseBody === 'string'
? responseBody
: responseBody.toString('utf-8');
let results;
try {
results = stringifyGoogleXrhResponse(stringBody);
} catch (e) {
return { error: e.message };
}
if (!results || !results[2]) {
return { currentReviews };
}
results[2].forEach((/** @type {any} */ jsonArray) => {
const review = parseReviewFromJson(jsonArray, reviewsTranslation);
currentReviews.push(review);
});
return { currentReviews };
};
/**
* Method scrolls page to xpos, ypos.
* @param {Puppeteer.Page} page
* @param {string} selectorToScroll
* @param {number} scrollToHeight
*/
const scrollTo = async (page, selectorToScroll, scrollToHeight) => {
try {
await page.waitForSelector(selectorToScroll);
} catch (e) {
log.warning(`Could not find selector ${selectorToScroll} to scroll to - ${page.url()}`);
}
await page.evaluate((selector, height) => {
const scrollable = document.querySelector(selector);
scrollable.scrollTop = height;
}, selectorToScroll, scrollToHeight);
};
/** @param {string} url */
const parseZoomFromUrl = (url) => {
const zoomMatch = url.match(/@[0-9.-]+,[0-9.-]+,([0-9.]+)z/);
return zoomMatch ? Number(zoomMatch[1]) : null;
};
/** @param {string[]} imageUrls */
const enlargeImageUrls = (imageUrls) => {
// w1920-h1080
const FULL_RESOLUTION = {
width: 1920,
height: 1080,
};
return imageUrls.map((imageUrl) => {
const sizeMatch = imageUrl.match(/=s\d+/);
const widthHeightMatch = imageUrl.match(/=w\d+-h\d+/);
if (sizeMatch) {
return imageUrl.replace(sizeMatch[0], `=s${FULL_RESOLUTION.width}`);
}
if (widthHeightMatch) {
return imageUrl.replace(widthHeightMatch[0], `=w${FULL_RESOLUTION.width}-h${FULL_RESOLUTION.height}`);
}
return imageUrl;
});
};
/**
* Waits until a predicate (funcion that returns bool) returns true
*
* ```
* let eventFired = false;
* await waiter(() => eventFired, { timeout: 120000, pollInterval: 1000 })
* // Something happening elsewhere that will set eventFired to true
* ```
*
* @param {function} predicate
* @param {object} [options]
* @param {number} [options.timeout]
* @param {number} [options.pollInterval]
* @param {string} [options.timeoutErrorMeesage]
* @param {string} [options.successMessage]
*/
const waiter = async (predicate, options = {}) => {
const { timeout = 120000, pollInterval = 1000, timeoutErrorMeesage, successMessage } = options;
const start = Date.now();
for (;;) {
if (await predicate()) {
if (successMessage) |
return;
}
const waitingFor = Date.now() - start;
if (waitingFor > timeout) {
throw new Error(timeoutErrorMeesage || `Timeout reached when waiting for predicate for ${waitingFor} ms`);
}
await new Promise((resolve) => setTimeout(resolve, pollInterval));
}
};
/**
* @param {Puppeteer.Page} page
* @param {string} url
* @param {boolean} persistCookiesPerSession
* @param {Apify.Session | undefined} session
*/
const waitAndHandleConsentScreen = async (page, url, persistCookiesPerSession, session) => {
// TODO: Test if the new consent screen works well!
const predicate = async (shouldClick = false) => {
// handling consent page (usually shows up on startup), handles non .com domains
const consentButton = await page.$('[action^="https://consent.google"] button');
if (consentButton) {
if (shouldClick) {
await Promise.all([
page.waitForNavigation({ timeout: 60000 }),
consentButton.click()
]);
}
return true;
}
// handling consent frame in maps
// (this only happens rarely, but still happens)
for (const frame of page.mainFrame().childFrames()) {
if (frame.url().match(/consent\.google\.[a-z.]+/)) {
if (shouldClick) {
await frame.click('#introAgreeButton');
}
return true;
}
}
};
/**
* Puts the CONSENT Cookie into the session
*/
const updateCookies = async () => {
if (session) {
const cookies = await page.cookies(url);
// Without changing the domain, apify won't find the cookie later.
// Changing the domain can duplicate cookies in the saved session state, so only the necessary cookie is saved here.
if (cookies) {
let consentCookie = cookies.filter(cookie => cookie.name=="CONSENT")[0];
// overwrite the pending cookie to make sure, we don't set the pending cookie when Apify is fixed
session.setPuppeteerCookies([{... consentCookie}], "https://www.google.com/");
if (consentCookie) {
consentCookie.domain = "www.google.com"
}
session.setPuppeteerCookies([consentCookie], "https://www.google.com/");
}
} else {
log.warning("Session is undefined -> consent screen cookies not saved")
}
}
await waiter(predicate, {
timeout: 60000,
pollInterval: 500,
timeoutErrorMeesage: `Waiting for consent screen timeouted after 60000ms on URL: ${url}`,
successMessage: `Approved consent screen on URL: ${url}`,
});
await predicate(true);
if (persistCookiesPerSession) {
await updateCookies();
}
};
module.exports = {
waitForGoogleMapLoader,
parseSearchPlacesResponseBody,
parseReviewFromResponseBody,
parseReviewFromJson,
scrollTo,
parseZoomFromUrl,
enlargeImageUrls,
waiter,
waitAndHandleConsentScreen,
};
| {
log.info(successMessage);
} | conditional_block |
Flask_Server.py | # -*- coding: utf-8 -*-
"""Inception v3 architecture 모델을 retraining한 모델을 이용해서 이미지에 대한 추론(inference)을 진행하는 예제"""
from flask import Flask, render_template, request, jsonify
# from flask_restful import Api
from flask_cors import CORS
# from flask.ext.cache import Cache
import numpy as np
import tensorflow as tf
import urllib.request
import cv2
import requests
from PIL import Image
from io import BytesIO
modelFullPath = '/tmp/output_graph.pb' # 읽어들일 graph 파일 경로
labelsFullPath = '/tmp/output_labels.txt' # 읽어들일 labels 파일 경로
# imagePath = /tmp/test.jpg # 추론을 진행할 이미지 경로
app = Flask(__name__)
# cache = Cache(app, config={'CACHE_TYPE':'simple'})
CORS(app)
# cors = CORS(app, resources={
# r"*": {"origin" : "*"},
# })
# api = Api(app)
# Api.add_resource(T)
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
if request. | d == 'POST':
# 파라미터를 전달 받습니다.
tagImg = []
backImg = []
# imageSrc = request.form['avg_img']
imageSrc = request.json['imgs']
print(imageSrc)
create_graph()
imageType = ['tagImg', 'backImg']
for imgType in imageType:
print("imageSrc[%s] length : %d" % (imgType, len(imageSrc[imgType])))
print(imageSrc[imgType])
# print(imageSrc[imgType][0])
if (len(imageSrc[imgType]) == 0):
print("NoImage 경우.")
print(imageSrc[imgType])
# output.append("NoImage")
if (imgType == 'tagImg'):
tagImg.append("NoImage")
if (imgType == 'backImg'):
backImg.append("NoImage")
else:
count = 0
# print("else length : %d" %len(imageSrc[imgType]))
# print(imageSrc[imgType])
for imgs in imageSrc[imgType]:
# print("imageSrc in imgs :")
print(imgs)
if (imgs == '' or imgs == ' '):
print("공백임.")
# output.append("b'nood\\n'")
if (imgType == 'tagImg'):
tagImg.append("nood")
if (imgType == 'backImg'):
backImg.append("nood")
else:
count = count + 1
print("imgSrc count : %d" % count)
hdr = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'}
req = urllib.request.Request(imgs, headers=hdr)
# response = urllib.request.urlopen(req).read()
resp = urllib.request.urlopen(req)
info = resp.info()
# print(info.get_content_type())
# print(info.get_content_maintype()) # -> text
print("file type : %s" % (info.get_content_subtype()))
if (info.get_content_subtype() == 'jpeg') or (info.get_content_subtype() == 'png'):
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = cv2.imencode('.jpg', image)[1].tostring()
# output.append(run_inference_on_image(image))
if (imgType == 'tagImg'):
tagImg.append(run_inference_on_image(image))
if (imgType == 'backImg'):
backImg.append(run_inference_on_image(image))
# print(image)
elif info.get_content_subtype() == 'gif':
# print("gif file임")
img = BytesIO(resp.read())
img = Image.open(img)
mypalette = img.getpalette()
img.putpalette(mypalette)
new_im = Image.new("RGBA", img.size)
new_im.paste(img)
new_im = np.array(new_im)
image = cv2.imencode('.jpg', new_im)[1].tostring()
# output.append(run_inference_on_image(image))
if (imgType == 'tagImg'):
tagImg.append(run_inference_on_image(image))
if (imgType == 'backImg'):
backImg.append(run_inference_on_image(image))
# response = requests.get(imgs)
# img = Image.open(BytesIO(response.content))
# mypalette = img.getpalette()
# img.putpalette(mypalette)
# new_im = Image.new("RGBA",img.size)
# new_im.paste(img)
# new_im = np.array(new_im)
# output.append(run_inference_on_image(new_im))
else:
print("jepg png 아님.")
# output.append("b'nood'")
if (imgType == 'tagImg'):
tagImg.append("nood")
if (imgType == 'backImg'):
backImg.append("nood")
# output = ['normal', 'nood', 'gamble']
# print(imageSrc[0])
# print('start run_inference_on_image')
# print('finish run_inference_on_image')
output = {'tagImg': tagImg, 'backImg': backImg}
print(output)
return jsonify(imgs=output)
# return jsonify(result = output)
# return render_template('index.html', output=imageSrc)
# return render_template('index.html', output=output)
def url_to_image(url):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
# urls = [
# "https://ssl.pstatic.net/tveta/libs/1240/1240155/1b47a8d4e3229d9531cf_20190510121017466.png",
# "https://s.pstatic.net/static/newsstand/up/2017/0424/nsd154219877.png"
# ]
# loop over the image URLs
# for url in urls:
# # download the image URL and display it
# print ("downloading %s" % (url))
# image = url_to_image(url)
# cv2.imshow("Image", image)
# imagePath = url_to_image(url)
##########################################################################################
def create_graph():
"""저장된(saved) GraphDef 파일로부터 graph를 생성하고 saver를 반환한다."""
# 저장된(saved) graph_def.pb로부터 graph를 생성한다.
with tf.gfile.GFile(modelFullPath, 'rb') as f:
print("create_grpah()")
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# print('finish create_graph()')
with tf.device('/gpu:0'):
def run_inference_on_image(image):
# if not tf.gfile.Exists(imagePath):
# tf.logging.fatal('File does not exist %s', imagePath)
# return answer
###############################################################################
# image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
# url = "https://ssl.pstatic.net/tveta/libs/1240/1240155/1b47a8d4e3229d9531cf_20190510121017466.png"
##############################################################################
# 저장된(saved) GraphDef 파일로부터 graph를 생성한다.
# print(type(image_data))
answer = None
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
# print('softmax_tensor')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image})
# print('sess.run')
predictions = np.squeeze(predictions)
# print('finish squeeze')
top_k = predictions.argsort()[-5:][::-1] # 가장 높은 확률을 가진 5개(top 5)의 예측값(predictions)을 얻는다.
# f = open(labelsFullPath, 'rb')
# #print('finish labelsFullPath open')
# lines = f.readlines()
# labels = [str(w).replace("\n", "") for w in lines]
# for node_id in top_k:
# human_string = labels[node_id]
# score = predictions[node_id]
# print('%s (score = %.5f)' % (human_string, score))
f = open(labelsFullPath, 'rb')
# lines = f.readlines()
lines = f.read().splitlines()
labels = [str(w).replace("\n", "") for w in lines]
# print(labels)
for node_id in top_k:
human_string = labels[node_id]
human_string = human_string[1:]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
answer = labels[top_k[0]][1:]
answer = answer.replace("'", "")
print(answer)
return answer
if __name__ == '__main__':
# start_time = time.time()
# app.run(host='0.0.0.0')
app.run(host='202.31.202.253', port=5000, threaded=False)
# end_time = time.time()
# print("WorkingTime: {} sec".format(end_time-start_time))
| metho | identifier_name |
Flask_Server.py | # -*- coding: utf-8 -*-
"""Inception v3 architecture 모델을 retraining한 모델을 이용해서 이미지에 대한 추론(inference)을 진행하는 예제"""
from flask import Flask, render_template, request, jsonify
# from flask_restful import Api
from flask_cors import CORS
# from flask.ext.cache import Cache
import numpy as np
import tensorflow as tf
import urllib.request
import cv2
import requests
from PIL import Image
from io import BytesIO
modelFullPath = '/tmp/output_graph.pb' # 읽어들일 graph 파일 경로
labelsFullPath = '/tmp/output_labels.txt' # 읽어들일 labels 파일 경로
# imagePath = /tmp/test.jpg # 추론을 진행할 이미지 경로
app = Flask(__name__)
# cache = Cache(app, config={'CACHE_TYPE':'simple'})
CORS(app)
# cors = CORS(app, resources={
# r"*": {"origin" : "*"},
# })
# api = Api(app)
# Api.add_resource(T)
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
if request.method == 'POST':
# 파라미터를 전달 받습니다.
tagImg = []
backImg = []
# imageSrc = request.form['avg_img']
imageSrc = request.json['imgs']
print(imageSrc)
create_graph()
imageType = ['tagImg', 'backImg']
for imgType in imageType:
print("imageSrc[%s] length : %d" % (imgType, len(imageSrc[imgType])))
print(imageSrc[imgType])
# print(imageSrc[imgType][0])
if (len(imageSrc[imgType]) == 0):
print("NoImage 경우.")
print(imageSrc[imgType])
# output.append("NoImage")
if (imgType == 'tagImg'):
tagImg.append("NoImage")
if (imgType == 'backImg'):
backImg.append("NoImage")
else:
count = 0
# print("else length : %d" %len(imageSrc[imgType]))
# print(imageSrc[imgType])
for imgs in imageSrc[imgType]:
# print("imageSrc in imgs :")
print(imgs)
if (imgs == '' or imgs == ' '):
print("공백임.")
# output.append("b'nood\\n'")
if (imgType == 'tagImg'):
tagImg.append("nood")
if (imgType == 'backImg'):
backImg.append("nood")
else:
count = count + 1
print("imgSrc count : %d" % count)
hdr = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'}
req = urllib.request.Request(imgs, headers=hdr)
# response = urllib.request.urlopen(req).read()
resp = urllib.request.urlopen(req)
info = resp.info()
# print(info.get_content_type())
# print(info.get_content_maintype()) # -> text
print("file type : %s" % (info.get_content_subtype()))
if (info.get_content_subtype() == 'jpeg') or (info.get_content_subtype() == 'png'):
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = cv2.imencode('.jpg', image)[1].tostring()
# output.append(run_inference_on_image(image))
if (imgType == 'tagImg'):
tagImg.append(run_inference_on_image(image))
if (imgType == 'backImg'):
back |
# print(image)
elif info.get_content_subtype() == 'gif':
# print("gif file임")
img = BytesIO(resp.read())
img = Image.open(img)
mypalette = img.getpalette()
img.putpalette(mypalette)
new_im = Image.new("RGBA", img.size)
new_im.paste(img)
new_im = np.array(new_im)
image = cv2.imencode('.jpg', new_im)[1].tostring()
# output.append(run_inference_on_image(image))
if (imgType == 'tagImg'):
tagImg.append(run_inference_on_image(image))
if (imgType == 'backImg'):
backImg.append(run_inference_on_image(image))
# response = requests.get(imgs)
# img = Image.open(BytesIO(response.content))
# mypalette = img.getpalette()
# img.putpalette(mypalette)
# new_im = Image.new("RGBA",img.size)
# new_im.paste(img)
# new_im = np.array(new_im)
# output.append(run_inference_on_image(new_im))
else:
print("jepg png 아님.")
# output.append("b'nood'")
if (imgType == 'tagImg'):
tagImg.append("nood")
if (imgType == 'backImg'):
backImg.append("nood")
# output = ['normal', 'nood', 'gamble']
# print(imageSrc[0])
# print('start run_inference_on_image')
# print('finish run_inference_on_image')
output = {'tagImg': tagImg, 'backImg': backImg}
print(output)
return jsonify(imgs=output)
# return jsonify(result = output)
# return render_template('index.html', output=imageSrc)
# return render_template('index.html', output=output)
def url_to_image(url):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
# urls = [
# "https://ssl.pstatic.net/tveta/libs/1240/1240155/1b47a8d4e3229d9531cf_20190510121017466.png",
# "https://s.pstatic.net/static/newsstand/up/2017/0424/nsd154219877.png"
# ]
# loop over the image URLs
# for url in urls:
# # download the image URL and display it
# print ("downloading %s" % (url))
# image = url_to_image(url)
# cv2.imshow("Image", image)
# imagePath = url_to_image(url)
##########################################################################################
def create_graph():
"""저장된(saved) GraphDef 파일로부터 graph를 생성하고 saver를 반환한다."""
# 저장된(saved) graph_def.pb로부터 graph를 생성한다.
with tf.gfile.GFile(modelFullPath, 'rb') as f:
print("create_grpah()")
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# print('finish create_graph()')
with tf.device('/gpu:0'):
def run_inference_on_image(image):
# if not tf.gfile.Exists(imagePath):
# tf.logging.fatal('File does not exist %s', imagePath)
# return answer
###############################################################################
# image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
# url = "https://ssl.pstatic.net/tveta/libs/1240/1240155/1b47a8d4e3229d9531cf_20190510121017466.png"
##############################################################################
# 저장된(saved) GraphDef 파일로부터 graph를 생성한다.
# print(type(image_data))
answer = None
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
# print('softmax_tensor')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image})
# print('sess.run')
predictions = np.squeeze(predictions)
# print('finish squeeze')
top_k = predictions.argsort()[-5:][::-1] # 가장 높은 확률을 가진 5개(top 5)의 예측값(predictions)을 얻는다.
# f = open(labelsFullPath, 'rb')
# #print('finish labelsFullPath open')
# lines = f.readlines()
# labels = [str(w).replace("\n", "") for w in lines]
# for node_id in top_k:
# human_string = labels[node_id]
# score = predictions[node_id]
# print('%s (score = %.5f)' % (human_string, score))
f = open(labelsFullPath, 'rb')
# lines = f.readlines()
lines = f.read().splitlines()
labels = [str(w).replace("\n", "") for w in lines]
# print(labels)
for node_id in top_k:
human_string = labels[node_id]
human_string = human_string[1:]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
answer = labels[top_k[0]][1:]
answer = answer.replace("'", "")
print(answer)
return answer
if __name__ == '__main__':
# start_time = time.time()
# app.run(host='0.0.0.0')
app.run(host='202.31.202.253', port=5000, threaded=False)
# end_time = time.time()
# print("WorkingTime: {} sec".format(end_time-start_time))
| Img.append(run_inference_on_image(image))
| conditional_block |
Flask_Server.py | # -*- coding: utf-8 -*-
"""Inception v3 architecture 모델을 retraining한 모델을 이용해서 이미지에 대한 추론(inference)을 진행하는 예제"""
from flask import Flask, render_template, request, jsonify
# from flask_restful import Api
from flask_cors import CORS
# from flask.ext.cache import Cache
import numpy as np
import tensorflow as tf
import urllib.request
import cv2
import requests
from PIL import Image
from io import BytesIO
modelFullPath = '/tmp/output_graph.pb' # 읽어들일 graph 파일 경로
labelsFullPath = '/tmp/output_labels.txt' # 읽어들일 labels 파일 경로
# imagePath = /tmp/test.jpg # 추론을 진행할 이미지 경로
app = Flask(__name__)
# cache = Cache(app, config={'CACHE_TYPE':'simple'})
CORS(app)
# cors = CORS(app, resources={
# r"*": {"origin" : "*"},
# })
# api = Api(app)
# Api.add_resource(T)
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
if request.method == 'POST':
# 파라미터를 전달 받습니다.
tagImg = []
backImg = []
# imageSrc = request.form['avg_img']
imageSrc = request.json['imgs']
print(imageSrc)
create_graph()
imageType = ['tagImg', 'backImg']
for imgType in imageType:
print("imageSrc[%s] length : %d" % (imgType, len(imageSrc[imgType])))
print(imageSrc[imgType])
# print(imageSrc[imgType][0])
if (len(imageSrc[imgType]) == 0):
print("NoImage 경우.")
print(imageSrc[imgType])
# output.append("NoImage")
if (imgType == 'tagImg'):
tagImg.append("NoImage")
if (imgType == 'backImg'):
backImg.append("NoImage")
else:
count = 0
# print("else length : %d" %len(imageSrc[imgType]))
# print(imageSrc[imgType])
for imgs in imageSrc[imgType]:
# print("imageSrc in imgs :")
print(imgs)
if (imgs == '' or imgs == ' '):
print("공백임.")
# output.append("b'nood\\n'")
if (imgType == 'tagImg'):
tagImg.append("nood")
if (imgType == 'backImg'):
backImg.append("nood")
else:
count = count + 1
print("imgSrc count : %d" % count)
hdr = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'}
req = urllib.request.Request(imgs, headers=hdr)
# response = urllib.request.urlopen(req).read()
resp = urllib.request.urlopen(req)
info = resp.info()
# print(info.get_content_type())
# print(info.get_content_maintype()) # -> text
print("file type : %s" % (info.get_content_subtype()))
if (info.get_content_subtype() == 'jpeg') or (info.get_content_subtype() == 'png'):
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = cv2.imencode('.jpg', image)[1].tostring()
# output.append(run_inference_on_image(image))
if (imgType == 'tagImg'):
tagImg.append(run_inference_on_image(image))
if (imgType == 'backImg'):
backImg.append(run_inference_on_image(image))
# print(image)
elif info.get_content_subtype() == 'gif':
# print("gif file임")
img = BytesIO(resp.read())
img = Image.open(img)
mypalette = img.getpalette()
img.putpalette(mypalette)
new_im = Image.new("RGBA", img.size)
new_im.paste(img)
new_im = np.array(new_im)
image = cv2.imencode('.jpg', new_im)[1].tostring()
# output.append(run_inference_on_image(image))
if (imgType == 'tagImg'):
tagImg.append(run_inference_on_image(image))
if (imgType == 'backImg'):
backImg.append(run_inference_on_image(image))
# response = requests.get(imgs)
# img = Image.open(BytesIO(response.content))
# mypalette = img.getpalette()
# img.putpalette(mypalette)
# new_im = Image.new("RGBA",img.size)
# new_im.paste(img)
# new_im = np.array(new_im)
# output.append(run_inference_on_image(new_im))
else:
print("jepg png 아님.")
# output.append("b'nood'")
if (imgType == 'tagImg'):
tagImg.append("nood")
if (imgType == 'backImg'):
backImg.append("nood")
# output = ['normal', 'nood', 'gamble']
# print(imageSrc[0])
# print('start run_inference_on_image')
# print('finish run_inference_on_image')
output = {'tagImg': tagImg, 'backImg': backImg}
print(output)
return jsonify(imgs=output)
# return jsonify(result = output)
# return render_template('index.html', output=imageSrc)
# return render_template('index.html', output=output)
def url_to_image(url):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
# urls = [
# "https://ssl.pstatic.net/tveta/libs/1240/1240155/1b47a8d4e3229d9531cf_20190510121017466.png",
# "https://s.pstatic.net/static/newsstand/up/2017/0424/nsd154219877.png"
# ]
# loop over the image URLs
# for url in urls:
# # download the image URL and display it
# print ("downloading %s" % (url))
# image = url_to_image(url)
# cv2.imshow("Image", image)
# imagePath = url_to_image(url)
##########################################################################################
def create_graph():
"""저장된(saved) GraphDef 파일로부터 graph를 생성하고 saver를 반환한다."""
# 저장된(saved) graph_def.pb로부터 graph를 생성한다.
with tf.gfile.GFile(modelFullPath, | return answer
###############################################################################
# image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
# url = "https://ssl.pstatic.net/tveta/libs/1240/1240155/1b47a8d4e3229d9531cf_20190510121017466.png"
##############################################################################
# 저장된(saved) GraphDef 파일로부터 graph를 생성한다.
# print(type(image_data))
answer = None
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
# print('softmax_tensor')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image})
# print('sess.run')
predictions = np.squeeze(predictions)
# print('finish squeeze')
top_k = predictions.argsort()[-5:][::-1] # 가장 높은 확률을 가진 5개(top 5)의 예측값(predictions)을 얻는다.
# f = open(labelsFullPath, 'rb')
# #print('finish labelsFullPath open')
# lines = f.readlines()
# labels = [str(w).replace("\n", "") for w in lines]
# for node_id in top_k:
# human_string = labels[node_id]
# score = predictions[node_id]
# print('%s (score = %.5f)' % (human_string, score))
f = open(labelsFullPath, 'rb')
# lines = f.readlines()
lines = f.read().splitlines()
labels = [str(w).replace("\n", "") for w in lines]
# print(labels)
for node_id in top_k:
human_string = labels[node_id]
human_string = human_string[1:]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
answer = labels[top_k[0]][1:]
answer = answer.replace("'", "")
print(answer)
return answer
if __name__ == '__main__':
# start_time = time.time()
# app.run(host='0.0.0.0')
app.run(host='202.31.202.253', port=5000, threaded=False)
# end_time = time.time()
# print("WorkingTime: {} sec".format(end_time-start_time))
| 'rb') as f:
print("create_grpah()")
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# print('finish create_graph()')
with tf.device('/gpu:0'):
def run_inference_on_image(image):
# if not tf.gfile.Exists(imagePath):
# tf.logging.fatal('File does not exist %s', imagePath)
# | identifier_body |
Flask_Server.py | # -*- coding: utf-8 -*-
"""Inception v3 architecture 모델을 retraining한 모델을 이용해서 이미지에 대한 추론(inference)을 진행하는 예제"""
from flask import Flask, render_template, request, jsonify
# from flask_restful import Api
from flask_cors import CORS
# from flask.ext.cache import Cache
import numpy as np
import tensorflow as tf
import urllib.request
import cv2
import requests
from PIL import Image
from io import BytesIO
modelFullPath = '/tmp/output_graph.pb' # 읽어들일 graph 파일 경로
labelsFullPath = '/tmp/output_labels.txt' # 읽어들일 labels 파일 경로
# imagePath = /tmp/test.jpg # 추론을 진행할 이미지 경로
app = Flask(__name__)
# cache = Cache(app, config={'CACHE_TYPE':'simple'})
CORS(app)
# cors = CORS(app, resources={
|
# Api.add_resource(T)
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
if request.method == 'POST':
# 파라미터를 전달 받습니다.
tagImg = []
backImg = []
# imageSrc = request.form['avg_img']
imageSrc = request.json['imgs']
print(imageSrc)
create_graph()
imageType = ['tagImg', 'backImg']
for imgType in imageType:
print("imageSrc[%s] length : %d" % (imgType, len(imageSrc[imgType])))
print(imageSrc[imgType])
# print(imageSrc[imgType][0])
if (len(imageSrc[imgType]) == 0):
print("NoImage 경우.")
print(imageSrc[imgType])
# output.append("NoImage")
if (imgType == 'tagImg'):
tagImg.append("NoImage")
if (imgType == 'backImg'):
backImg.append("NoImage")
else:
count = 0
# print("else length : %d" %len(imageSrc[imgType]))
# print(imageSrc[imgType])
for imgs in imageSrc[imgType]:
# print("imageSrc in imgs :")
print(imgs)
if (imgs == '' or imgs == ' '):
print("공백임.")
# output.append("b'nood\\n'")
if (imgType == 'tagImg'):
tagImg.append("nood")
if (imgType == 'backImg'):
backImg.append("nood")
else:
count = count + 1
print("imgSrc count : %d" % count)
hdr = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'}
req = urllib.request.Request(imgs, headers=hdr)
# response = urllib.request.urlopen(req).read()
resp = urllib.request.urlopen(req)
info = resp.info()
# print(info.get_content_type())
# print(info.get_content_maintype()) # -> text
print("file type : %s" % (info.get_content_subtype()))
if (info.get_content_subtype() == 'jpeg') or (info.get_content_subtype() == 'png'):
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = cv2.imencode('.jpg', image)[1].tostring()
# output.append(run_inference_on_image(image))
if (imgType == 'tagImg'):
tagImg.append(run_inference_on_image(image))
if (imgType == 'backImg'):
backImg.append(run_inference_on_image(image))
# print(image)
elif info.get_content_subtype() == 'gif':
# print("gif file임")
img = BytesIO(resp.read())
img = Image.open(img)
mypalette = img.getpalette()
img.putpalette(mypalette)
new_im = Image.new("RGBA", img.size)
new_im.paste(img)
new_im = np.array(new_im)
image = cv2.imencode('.jpg', new_im)[1].tostring()
# output.append(run_inference_on_image(image))
if (imgType == 'tagImg'):
tagImg.append(run_inference_on_image(image))
if (imgType == 'backImg'):
backImg.append(run_inference_on_image(image))
# response = requests.get(imgs)
# img = Image.open(BytesIO(response.content))
# mypalette = img.getpalette()
# img.putpalette(mypalette)
# new_im = Image.new("RGBA",img.size)
# new_im.paste(img)
# new_im = np.array(new_im)
# output.append(run_inference_on_image(new_im))
else:
print("jepg png 아님.")
# output.append("b'nood'")
if (imgType == 'tagImg'):
tagImg.append("nood")
if (imgType == 'backImg'):
backImg.append("nood")
# output = ['normal', 'nood', 'gamble']
# print(imageSrc[0])
# print('start run_inference_on_image')
# print('finish run_inference_on_image')
output = {'tagImg': tagImg, 'backImg': backImg}
print(output)
return jsonify(imgs=output)
# return jsonify(result = output)
# return render_template('index.html', output=imageSrc)
# return render_template('index.html', output=output)
def url_to_image(url):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
# urls = [
# "https://ssl.pstatic.net/tveta/libs/1240/1240155/1b47a8d4e3229d9531cf_20190510121017466.png",
# "https://s.pstatic.net/static/newsstand/up/2017/0424/nsd154219877.png"
# ]
# loop over the image URLs
# for url in urls:
# # download the image URL and display it
# print ("downloading %s" % (url))
# image = url_to_image(url)
# cv2.imshow("Image", image)
# imagePath = url_to_image(url)
##########################################################################################
def create_graph():
"""저장된(saved) GraphDef 파일로부터 graph를 생성하고 saver를 반환한다."""
# 저장된(saved) graph_def.pb로부터 graph를 생성한다.
with tf.gfile.GFile(modelFullPath, 'rb') as f:
print("create_grpah()")
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# print('finish create_graph()')
with tf.device('/gpu:0'):
def run_inference_on_image(image):
# if not tf.gfile.Exists(imagePath):
# tf.logging.fatal('File does not exist %s', imagePath)
# return answer
###############################################################################
# image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
# url = "https://ssl.pstatic.net/tveta/libs/1240/1240155/1b47a8d4e3229d9531cf_20190510121017466.png"
##############################################################################
# 저장된(saved) GraphDef 파일로부터 graph를 생성한다.
# print(type(image_data))
answer = None
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
# print('softmax_tensor')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image})
# print('sess.run')
predictions = np.squeeze(predictions)
# print('finish squeeze')
top_k = predictions.argsort()[-5:][::-1] # 가장 높은 확률을 가진 5개(top 5)의 예측값(predictions)을 얻는다.
# f = open(labelsFullPath, 'rb')
# #print('finish labelsFullPath open')
# lines = f.readlines()
# labels = [str(w).replace("\n", "") for w in lines]
# for node_id in top_k:
# human_string = labels[node_id]
# score = predictions[node_id]
# print('%s (score = %.5f)' % (human_string, score))
f = open(labelsFullPath, 'rb')
# lines = f.readlines()
lines = f.read().splitlines()
labels = [str(w).replace("\n", "") for w in lines]
# print(labels)
for node_id in top_k:
human_string = labels[node_id]
human_string = human_string[1:]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
answer = labels[top_k[0]][1:]
answer = answer.replace("'", "")
print(answer)
return answer
if __name__ == '__main__':
# start_time = time.time()
# app.run(host='0.0.0.0')
app.run(host='202.31.202.253', port=5000, threaded=False)
# end_time = time.time()
# print("WorkingTime: {} sec".format(end_time-start_time)) | # r"*": {"origin" : "*"},
# })
# api = Api(app)
| random_line_split |
mallSwitcherDao.go | // Package switcher 数据库交互函数
package switcher
import (
"database/sql"
"errors"
"log"
)
// testDao 测试接口
func testDao(id string, db *sql.DB) ([]AName, error) {
var aname []AName
sql := "select name from test"
log.Println(sql)
rows, err := db.Query(sql)
if err != nil {
log.Println(err)
return aname, err
}
defer rows.Close()
for rows.Next() {
var aaname AName
var name string
err := rows.Scan(&name)
if err != nil {
log.Println(err)
return aname, err
}
aaname.Name = name
aname = append(aname, aaname)
}
return aname, err
}
// 用户收藏查询
func getCollectionDao(selectSQL string, db *sql.DB) ([]Collection, error) {
var collection []Collection
rows, err := db.Query(selectSQL)
if err != nil {
log.Println(err)
return collection, err
}
defer rows.Close()
for rows.Next() {
var c Collection
var id, goodsID int
var collectionType, goodsName, style, unit, price, width, height, compressPic, scenesURL string
err := rows.Scan(&id, &goodsID, &collectionType, &goodsName, &style, &unit, &price, &width, &height, &compressPic, &scenesURL)
if err != nil {
log.Println(err)
return collection, err
}
c.ID = id
c.GoodsID = goodsID
c.CollectionType = collectionType
c.GoodsName = goodsName
c.Style = style
c.Unit = unit
c.Price = price
c.Width = width
c.Height = height
c.CompressPic = compressPic
c.ScenesURL = scenesURL
collection = append(collection, c)
}
return collection, err
}
// 用户增加收藏
func insertCollectionDao(userid, shopid, goodsid int, collectionType, scenesURL string, tx *sql.Tx) (int64, error) {
var rowid int64
// 增加收藏sql
insertSQL := `insert into collection (userid,shopid,goodsid,type,scenesurl) values (?, ?, ?, ?, ?)`
result, err := tx.Exec(insertSQL, userid, shopid, goodsid, collectionType, scenesURL)
if nil != err {
log.Println(err)
return rowid, err
}
rowid, err = result.LastInsertId()
return rowid, nil
}
// 用户取消收藏
func delCollectionDao(userid, collectionid int, db *sql.DB) error {
updateSQL := `UPDATE collection SET status = 0 WHERE id = ? and userid = ?`
_, err := db.Exec(updateSQL, userid, collectionid)
if nil != err {
log.Println(err)
return err
}
return nil
}
// 获取机器状态
func getMachineDao(machineid int, db *sql.DB) (Machine, error) {
var m Machine
selectSQL := `select id,machineid,shopid,slotnum,runstate,netstate,machineip from machine where state = 1 and id = ` + IntToString(machineid)
err := db.QueryRow(selectSQL).Scan(&m.ID, &m.MachineID, &m.ShopID, &m.SlotNum, &m.RunState, &m.NetState, &m.MachineIP)
if err != nil { | log.Println(err)
return m, err
}
return m, err
}
// 实体机器商品展示
func insertMachineTaskDao(machineid, userid, shopid, goodsid, slotid int, tx *sql.Tx) error {
// 增加机器任务sql
insertSQL := `insert into machinetask (machineid,shopid,userid,goodsid,slotid) values (?, ?, ?, ?, ?)`
_, err := tx.Exec(insertSQL, machineid, shopid, userid, goodsid, slotid)
if nil != err {
log.Println(err)
return err
}
return nil
}
// 增加模拟场景任务
func insertScenesTaskDao(shopid, userid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid int,
scenesid, scenesmenuurl1, scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl,
goodsBwidth, goodsBheight, goodsBspliceType string, tx *sql.Tx) (int64, error) {
var rowid int64
// 增加机器任务sql
insertSQL := `insert into scenestask (shopid, userid, scenesid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid,
scenesmenuurl1, scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl, goodsBwidth, goodsBheight, goodsBspliceType)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
result, err := tx.Exec(insertSQL, shopid, userid, scenesid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid, scenesmenuurl1,
scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl, goodsBwidth, goodsBheight, goodsBspliceType)
if nil != err {
log.Println(err)
return rowid, err
}
rowid, err = result.LastInsertId()
return rowid, nil
}
// 查询模拟场景URL
func getScenesTaskURLDao(scenesTaskid int, db *sql.DB) (string, error) {
var scenesTaskURL string
selectSQL := `select url from scenestask where state = 1 and id = ` + IntToString(scenesTaskid)
err := db.QueryRow(selectSQL).Scan(&scenesTaskURL)
if err != nil {
log.Println(err)
return scenesTaskURL, err
}
return scenesTaskURL, err
}
// 根据url获取店铺id name
func getShopID(shopurl string, db *sql.DB) (UrlToId, error) {
var UTI UrlToId
selectSQL := "select id, shopname from shop where shopurl = ?"
err := db.QueryRow(selectSQL, shopurl).Scan(&UTI.ID, &UTI.ShopName)
if err != nil {
return UTI, err
}
return UTI, err
}
// 用户登录验证
func getUserLogin(phoneNum, password string, db *sql.DB) (string, error, interface{}) {
var UI UserInfo
var num int
selectSQL := "select ifnull(id, 0), ifnull(count(*), 0) from user where phoneNum = ? and password = ? and state = 1"
err := db.QueryRow(selectSQL, phoneNum, password).Scan(&UI.ID, &num)
if err != nil {
return "查询失败", err, UI
}
if num != 0 {
return "登录成功", err, UI
}
return "手机号或密码错误", errors.New("手机账号或密码错误"), UI
}
// 店铺信息查询 包括 首页展示商品的数量和阀值
func getIndex(shopid string, db *sql.DB) (IndexInfo, HotCondition, error) {
var II IndexInfo
var HC HotCondition
selectSQL := "select id, shopname, logoimg, shopshow, bannerimg, popularlimit, popularquantity from shop where id = ?"
err := db.QueryRow(selectSQL, shopid).Scan(&II.ID, &II.ShopName, &II.LogoImg, &II.ShopShow, &II.BannerImg, &HC.PopularLimit, &HC.PopularQuantity)
if err != nil {
return II, HC, err
}
return II, HC, err
}
// 爆款查询
func getHotGoods(shopid string, HC HotCondition, CLS []ClassList, db *sql.DB) ([]HotGoods, error) {
var HGS []HotGoods
for _, k := range CLS {
selectSQL := "select g.id, m.menuname, ifnull(x.clicknum, 0) as clicknum, ifnull(y.colornum, 0) as colornum, ifnull(y.smallpic, '') as smallpic from goods g left join menu m on g.menuid = m.id left join (select goodsid, count(*) as clicknum from goodsaccess a group by goodsid) x on g.id = x.goodsid left join (select goodsid, smallpic, count(*) as colornum from goodspic group by goodsid) y on g.id = y.goodsid where g.state = 1 and g.shopid = ? and menuid = ? and ifnull(x.clicknum, 0) > ? order by ifnull(y.colornum, 0) desc limit ?"
rows, err := db.Query(selectSQL, shopid, k.ID, HC.PopularLimit, HC.PopularQuantity)
if err != nil {
log.Println(err)
return HGS, err
}
defer rows.Close()
for rows.Next() {
var HG HotGoods
err := rows.Scan(&HG.ID, &HG.MenuName, &HG.ClickNum, &HG.ColorNum, &HG.SmallPic)
if err != nil {
log.Println(err)
return HGS, err
}
HGS = append(HGS, HG)
}
}
return HGS, nil
}
// 店铺联系方式查询
func getShopContactInfo(shopid string, db *sql.DB) (interface{}, error) {
var SCI ShopContactInfo
selectSQL := "select telnum, phonenum, wechat, wechaturl, location, lng, lat from shop where id = ?"
err := db.QueryRow(selectSQL, shopid).Scan(&SCI.Telnum, &SCI.Phonenum, &SCI.Wechat, &SCI.Wechaturl, &SCI.Location, &SCI.Lng, &SCI.Lat)
if err != nil {
return "查询失败", err
}
return SCI, err
}
// 全部分类查询(一级分类查询)
func getAllClassifyOne(shopid string, db *sql.DB) ([]AllClassOne, error) {
var ACOS []AllClassOne
selectSQL := "select id, menuname, superid from menu where state = 1 and superid = 0 and shopid = ?"
rows, err := db.Query(selectSQL, shopid)
if err != nil {
log.Println(err)
return ACOS, err
}
defer rows.Close()
for rows.Next() {
var ACO AllClassOne
err := rows.Scan(&ACO.ID, &ACO.MenuName, &ACO.SuperID)
if err != nil {
log.Println(err)
return ACOS, err
}
ACOS = append(ACOS, ACO)
}
return ACOS, err
}
// 全部分类查询(二级分类查询 带图片数量)
func getAllClassifyTwoWithPic(shopid, superid string, db *sql.DB) ([]AllClassTwo, error) {
var ACTS []AllClassTwo
selectSQL := `select z.id as menuid, z.menuname, z.superid, ifnull(y.num, 0) as num from (select id, menuname, superid from menu where superid = ? and shopid = ? and state = 1) z left join (select id, menuid, count(*) as num from goods where state = 1 and shopid = ? group by menuid) y on z.id = y.menuid group by z.id`
rows, err := db.Query(selectSQL, superid, shopid, shopid)
if err != nil {
log.Println(err)
return ACTS, err
}
defer rows.Close()
for rows.Next() {
var ACT AllClassTwo
err := rows.Scan(&ACT.ID, &ACT.MenuName, &ACT.SuperID, &ACT.Num)
if err != nil {
log.Println(err)
return ACTS, err
}
ACTS = append(ACTS, ACT)
}
return ACTS, err
}
// 全部选择分类查询
func getAllClassifyTwo(superid string, db *sql.DB) ([]ClassList, error) {
var CLS []ClassList
selectSQL := "select id, menuname from menu where superid = ? and state = 1"
rows, err := db.Query(selectSQL, superid)
if err != nil {
log.Println(err)
return CLS, err
}
defer rows.Close()
for rows.Next() {
var CL ClassList
err := rows.Scan(&CL.ID, &CL.MenuName)
if err != nil {
log.Println(err)
return CLS, err
}
CLS = append(CLS, CL)
}
return CLS, err
}
// 获取二级菜单下的商品总数
func getGoodsNum(menuid string, db *sql.DB) (int, error) {
var total int
selectSQL := "select count(*) from goods where menuid = ? and state = 1"
rows, err := db.Query(selectSQL, menuid)
if err != nil {
log.Println(err)
return total, err
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&total)
if err != nil {
log.Println(err)
return total, err
}
}
return total, err
}
// 获取商品信息
func getGoods(menuid string, db *sql.DB) ([]GoodsList, error) {
var GLS []GoodsList
selectSQL := `select g.id, ifnull(t.num, 0) as clickNum, ifnull(x.smallpic, '') as smallpic, ifnull(x.num, 0) as colorNum
from goods g
left join (select goodsid, count(goodsid) as num from goodsaccess group by goodsid) t on g.id = t.goodsid
left join (select goodsid, smallpic, count(*) as num from goodspic where state = 1 group by goodsid) x on g.id = x.goodsid
where g.menuid = ?`
rows, err := db.Query(selectSQL, menuid)
if err != nil {
log.Println(err)
return GLS, err
}
defer rows.Close()
for rows.Next() {
var GL GoodsList
err := rows.Scan(&GL.ID, &GL.ClickNum, &GL.SmallPic, &GL.ColorNum)
if err != nil {
log.Println(err)
return GLS, err
}
GLS = append(GLS, GL)
}
return GLS, err
}
// 获取商品详细内容
func getGoodsInfoDetail(goodsid string, db *sql.DB) (GoodsDetail, error) {
var GD GoodsDetail
selectSQL := `select g.id, g.goodsname, g.brand, g.style, g.unit, g.material, g.madein, g.price, g.width, g.height,
ifnull(s.machineid, 0) as machineid, ifnull(m.name, '') as machinename, ifnull(s.id, 0) as slotid,
ifnull(s.slotnum, 0) as slotnum from goods g
left join machineslot s on s.goodsid = g.id
left join machine m on m.id = s.machineid
where g.id = ?`
err := db.QueryRow(selectSQL, goodsid).Scan(&GD.ID, &GD.GoodsName, &GD.Brand, &GD.Style, &GD.Unit, &GD.Material, &GD.MadeIn, &GD.Price,
&GD.Width, &GD.Height, &GD.MachineDetail.MachineID, &GD.MachineDetail.MachineName, &GD.MachineDetail.SlotID, &GD.MachineDetail.SlotNum)
if err != nil {
log.Println(err)
}
return GD, err
}
// 获取商品是否收藏
func getIsCollect(goodsid, userid string, db *sql.DB) (int, int, error) {
var num int
var collectionid int
selectSQL := "select ifnull(id,0), count(*) from collection where goodsid = ? and userid = ? and state = 1"
err := db.QueryRow(selectSQL, goodsid, userid).Scan(&collectionid, &num)
if err != nil {
log.Println(err)
}
return collectionid, num, err
}
// 获取商品图片
func getGoodsPicDetail(goodsid string, db *sql.DB) ([]PicList, error) {
var PLS []PicList
selectSQL := "select id, model, pic, compresspic from goodspic where state = 1 and goodsid = ? order by isfirst desc"
rows, err := db.Query(selectSQL, goodsid)
if err != nil {
log.Println(err)
return PLS, err
}
defer rows.Close()
for rows.Next() {
var PL PicList
err := rows.Scan(&PL.ID, &PL.Model, &PL.Pic, &PL.Compresspic)
if err != nil {
log.Println(err)
return PLS, err
}
PLS = append(PLS, PL)
}
return PLS, err
}
// 店铺访问记录增加
func insertShopAccess(shopid, userid, ip string, db *sql.DB) (string, error) {
if shopid == "" {
return "店铺id参数为空", errors.New("店铺id参数为空")
}
insertSQL := "insert into shopaccess (shopid, userid, ip) values (?, ?, ?)"
_, err := db.Exec(insertSQL, shopid, userid, ip)
if err != nil {
log.Println(err)
return "插入店铺访问记录失败", err
}
return "插入店铺访问记录成功", err
}
// 商品访问记录增加
func insertGoodsAccess(goodsid, userid, ip string, db *sql.DB) (string, error) {
if goodsid == "" {
return "商品id参数为空", errors.New("商品id参数为空")
}
insertSQL := "insert into goodsaccess (goodsid, userid, ip) values (?, ?, ?)"
_, err := db.Exec(insertSQL, goodsid, userid, ip)
if err != nil {
log.Println(err)
return "插入商品访问记录失败", err
}
return "插入商品访问记录成功", err
} | random_line_split | |
mallSwitcherDao.go | // Package switcher 数据库交互函数
package switcher
import (
"database/sql"
"errors"
"log"
)
// testDao 测试接口
func testDao(id string, db *sql.DB) ([]AName, error) {
var aname []AName
sql := "select name from test"
log.Println(sql)
rows, err := db.Query(sql)
if err != nil {
log.Println(err)
return aname, err
}
defer rows.Close()
for rows.Next() {
var aaname AName
var name string
err := rows.Scan(&name)
if err != nil {
log.Println(err)
return aname, err
}
aaname.Name = name
aname = append(aname, aaname)
}
return aname, err
}
// 用户收藏查询
func getCollectionDao(selectSQL string, db *sql.DB) ([]Collection, error) {
var collection []Collection
ro | ao(userid, shopid, goodsid int, collectionType, scenesURL string, tx *sql.Tx) (int64, error) {
var rowid int64
// 增加收藏sql
insertSQL := `insert into collection (userid,shopid,goodsid,type,scenesurl) values (?, ?, ?, ?, ?)`
result, err := tx.Exec(insertSQL, userid, shopid, goodsid, collectionType, scenesURL)
if nil != err {
log.Println(err)
return rowid, err
}
rowid, err = result.LastInsertId()
return rowid, nil
}
// 用户取消收藏
func delCollectionDao(userid, collectionid int, db *sql.DB) error {
updateSQL := `UPDATE collection SET status = 0 WHERE id = ? and userid = ?`
_, err := db.Exec(updateSQL, userid, collectionid)
if nil != err {
log.Println(err)
return err
}
return nil
}
// 获取机器状态
func getMachineDao(machineid int, db *sql.DB) (Machine, error) {
var m Machine
selectSQL := `select id,machineid,shopid,slotnum,runstate,netstate,machineip from machine where state = 1 and id = ` + IntToString(machineid)
err := db.QueryRow(selectSQL).Scan(&m.ID, &m.MachineID, &m.ShopID, &m.SlotNum, &m.RunState, &m.NetState, &m.MachineIP)
if err != nil {
log.Println(err)
return m, err
}
return m, err
}
// 实体机器商品展示
func insertMachineTaskDao(machineid, userid, shopid, goodsid, slotid int, tx *sql.Tx) error {
// 增加机器任务sql
insertSQL := `insert into machinetask (machineid,shopid,userid,goodsid,slotid) values (?, ?, ?, ?, ?)`
_, err := tx.Exec(insertSQL, machineid, shopid, userid, goodsid, slotid)
if nil != err {
log.Println(err)
return err
}
return nil
}
// 增加模拟场景任务
func insertScenesTaskDao(shopid, userid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid int,
scenesid, scenesmenuurl1, scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl,
goodsBwidth, goodsBheight, goodsBspliceType string, tx *sql.Tx) (int64, error) {
var rowid int64
// 增加机器任务sql
insertSQL := `insert into scenestask (shopid, userid, scenesid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid,
scenesmenuurl1, scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl, goodsBwidth, goodsBheight, goodsBspliceType)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
result, err := tx.Exec(insertSQL, shopid, userid, scenesid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid, scenesmenuurl1,
scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl, goodsBwidth, goodsBheight, goodsBspliceType)
if nil != err {
log.Println(err)
return rowid, err
}
rowid, err = result.LastInsertId()
return rowid, nil
}
// 查询模拟场景URL
func getScenesTaskURLDao(scenesTaskid int, db *sql.DB) (string, error) {
var scenesTaskURL string
selectSQL := `select url from scenestask where state = 1 and id = ` + IntToString(scenesTaskid)
err := db.QueryRow(selectSQL).Scan(&scenesTaskURL)
if err != nil {
log.Println(err)
return scenesTaskURL, err
}
return scenesTaskURL, err
}
// 根据url获取店铺id name
func getShopID(shopurl string, db *sql.DB) (UrlToId, error) {
var UTI UrlToId
selectSQL := "select id, shopname from shop where shopurl = ?"
err := db.QueryRow(selectSQL, shopurl).Scan(&UTI.ID, &UTI.ShopName)
if err != nil {
return UTI, err
}
return UTI, err
}
// 用户登录验证
func getUserLogin(phoneNum, password string, db *sql.DB) (string, error, interface{}) {
var UI UserInfo
var num int
selectSQL := "select ifnull(id, 0), ifnull(count(*), 0) from user where phoneNum = ? and password = ? and state = 1"
err := db.QueryRow(selectSQL, phoneNum, password).Scan(&UI.ID, &num)
if err != nil {
return "查询失败", err, UI
}
if num != 0 {
return "登录成功", err, UI
}
return "手机号或密码错误", errors.New("手机账号或密码错误"), UI
}
// 店铺信息查询 包括 首页展示商品的数量和阀值
func getIndex(shopid string, db *sql.DB) (IndexInfo, HotCondition, error) {
var II IndexInfo
var HC HotCondition
selectSQL := "select id, shopname, logoimg, shopshow, bannerimg, popularlimit, popularquantity from shop where id = ?"
err := db.QueryRow(selectSQL, shopid).Scan(&II.ID, &II.ShopName, &II.LogoImg, &II.ShopShow, &II.BannerImg, &HC.PopularLimit, &HC.PopularQuantity)
if err != nil {
return II, HC, err
}
return II, HC, err
}
// 爆款查询
func getHotGoods(shopid string, HC HotCondition, CLS []ClassList, db *sql.DB) ([]HotGoods, error) {
var HGS []HotGoods
for _, k := range CLS {
selectSQL := "select g.id, m.menuname, ifnull(x.clicknum, 0) as clicknum, ifnull(y.colornum, 0) as colornum, ifnull(y.smallpic, '') as smallpic from goods g left join menu m on g.menuid = m.id left join (select goodsid, count(*) as clicknum from goodsaccess a group by goodsid) x on g.id = x.goodsid left join (select goodsid, smallpic, count(*) as colornum from goodspic group by goodsid) y on g.id = y.goodsid where g.state = 1 and g.shopid = ? and menuid = ? and ifnull(x.clicknum, 0) > ? order by ifnull(y.colornum, 0) desc limit ?"
rows, err := db.Query(selectSQL, shopid, k.ID, HC.PopularLimit, HC.PopularQuantity)
if err != nil {
log.Println(err)
return HGS, err
}
defer rows.Close()
for rows.Next() {
var HG HotGoods
err := rows.Scan(&HG.ID, &HG.MenuName, &HG.ClickNum, &HG.ColorNum, &HG.SmallPic)
if err != nil {
log.Println(err)
return HGS, err
}
HGS = append(HGS, HG)
}
}
return HGS, nil
}
// 店铺联系方式查询
func getShopContactInfo(shopid string, db *sql.DB) (interface{}, error) {
var SCI ShopContactInfo
selectSQL := "select telnum, phonenum, wechat, wechaturl, location, lng, lat from shop where id = ?"
err := db.QueryRow(selectSQL, shopid).Scan(&SCI.Telnum, &SCI.Phonenum, &SCI.Wechat, &SCI.Wechaturl, &SCI.Location, &SCI.Lng, &SCI.Lat)
if err != nil {
return "查询失败", err
}
return SCI, err
}
// 全部分类查询(一级分类查询)
func getAllClassifyOne(shopid string, db *sql.DB) ([]AllClassOne, error) {
var ACOS []AllClassOne
selectSQL := "select id, menuname, superid from menu where state = 1 and superid = 0 and shopid = ?"
rows, err := db.Query(selectSQL, shopid)
if err != nil {
log.Println(err)
return ACOS, err
}
defer rows.Close()
for rows.Next() {
var ACO AllClassOne
err := rows.Scan(&ACO.ID, &ACO.MenuName, &ACO.SuperID)
if err != nil {
log.Println(err)
return ACOS, err
}
ACOS = append(ACOS, ACO)
}
return ACOS, err
}
// 全部分类查询(二级分类查询 带图片数量)
func getAllClassifyTwoWithPic(shopid, superid string, db *sql.DB) ([]AllClassTwo, error) {
var ACTS []AllClassTwo
selectSQL := `select z.id as menuid, z.menuname, z.superid, ifnull(y.num, 0) as num from (select id, menuname, superid from menu where superid = ? and shopid = ? and state = 1) z left join (select id, menuid, count(*) as num from goods where state = 1 and shopid = ? group by menuid) y on z.id = y.menuid group by z.id`
rows, err := db.Query(selectSQL, superid, shopid, shopid)
if err != nil {
log.Println(err)
return ACTS, err
}
defer rows.Close()
for rows.Next() {
var ACT AllClassTwo
err := rows.Scan(&ACT.ID, &ACT.MenuName, &ACT.SuperID, &ACT.Num)
if err != nil {
log.Println(err)
return ACTS, err
}
ACTS = append(ACTS, ACT)
}
return ACTS, err
}
// 全部选择分类查询
func getAllClassifyTwo(superid string, db *sql.DB) ([]ClassList, error) {
var CLS []ClassList
selectSQL := "select id, menuname from menu where superid = ? and state = 1"
rows, err := db.Query(selectSQL, superid)
if err != nil {
log.Println(err)
return CLS, err
}
defer rows.Close()
for rows.Next() {
var CL ClassList
err := rows.Scan(&CL.ID, &CL.MenuName)
if err != nil {
log.Println(err)
return CLS, err
}
CLS = append(CLS, CL)
}
return CLS, err
}
// 获取二级菜单下的商品总数
func getGoodsNum(menuid string, db *sql.DB) (int, error) {
var total int
selectSQL := "select count(*) from goods where menuid = ? and state = 1"
rows, err := db.Query(selectSQL, menuid)
if err != nil {
log.Println(err)
return total, err
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&total)
if err != nil {
log.Println(err)
return total, err
}
}
return total, err
}
// 获取商品信息
func getGoods(menuid string, db *sql.DB) ([]GoodsList, error) {
var GLS []GoodsList
selectSQL := `select g.id, ifnull(t.num, 0) as clickNum, ifnull(x.smallpic, '') as smallpic, ifnull(x.num, 0) as colorNum
from goods g
left join (select goodsid, count(goodsid) as num from goodsaccess group by goodsid) t on g.id = t.goodsid
left join (select goodsid, smallpic, count(*) as num from goodspic where state = 1 group by goodsid) x on g.id = x.goodsid
where g.menuid = ?`
rows, err := db.Query(selectSQL, menuid)
if err != nil {
log.Println(err)
return GLS, err
}
defer rows.Close()
for rows.Next() {
var GL GoodsList
err := rows.Scan(&GL.ID, &GL.ClickNum, &GL.SmallPic, &GL.ColorNum)
if err != nil {
log.Println(err)
return GLS, err
}
GLS = append(GLS, GL)
}
return GLS, err
}
// 获取商品详细内容
func getGoodsInfoDetail(goodsid string, db *sql.DB) (GoodsDetail, error) {
var GD GoodsDetail
selectSQL := `select g.id, g.goodsname, g.brand, g.style, g.unit, g.material, g.madein, g.price, g.width, g.height,
ifnull(s.machineid, 0) as machineid, ifnull(m.name, '') as machinename, ifnull(s.id, 0) as slotid,
ifnull(s.slotnum, 0) as slotnum from goods g
left join machineslot s on s.goodsid = g.id
left join machine m on m.id = s.machineid
where g.id = ?`
err := db.QueryRow(selectSQL, goodsid).Scan(&GD.ID, &GD.GoodsName, &GD.Brand, &GD.Style, &GD.Unit, &GD.Material, &GD.MadeIn, &GD.Price,
&GD.Width, &GD.Height, &GD.MachineDetail.MachineID, &GD.MachineDetail.MachineName, &GD.MachineDetail.SlotID, &GD.MachineDetail.SlotNum)
if err != nil {
log.Println(err)
}
return GD, err
}
// 获取商品是否收藏
func getIsCollect(goodsid, userid string, db *sql.DB) (int, int, error) {
var num int
var collectionid int
selectSQL := "select ifnull(id,0), count(*) from collection where goodsid = ? and userid = ? and state = 1"
err := db.QueryRow(selectSQL, goodsid, userid).Scan(&collectionid, &num)
if err != nil {
log.Println(err)
}
return collectionid, num, err
}
// 获取商品图片
func getGoodsPicDetail(goodsid string, db *sql.DB) ([]PicList, error) {
var PLS []PicList
selectSQL := "select id, model, pic, compresspic from goodspic where state = 1 and goodsid = ? order by isfirst desc"
rows, err := db.Query(selectSQL, goodsid)
if err != nil {
log.Println(err)
return PLS, err
}
defer rows.Close()
for rows.Next() {
var PL PicList
err := rows.Scan(&PL.ID, &PL.Model, &PL.Pic, &PL.Compresspic)
if err != nil {
log.Println(err)
return PLS, err
}
PLS = append(PLS, PL)
}
return PLS, err
}
// 店铺访问记录增加
func insertShopAccess(shopid, userid, ip string, db *sql.DB) (string, error) {
if shopid == "" {
return "店铺id参数为空", errors.New("店铺id参数为空")
}
insertSQL := "insert into shopaccess (shopid, userid, ip) values (?, ?, ?)"
_, err := db.Exec(insertSQL, shopid, userid, ip)
if err != nil {
log.Println(err)
return "插入店铺访问记录失败", err
}
return "插入店铺访问记录成功", err
}
// 商品访问记录增加
func insertGoodsAccess(goodsid, userid, ip string, db *sql.DB) (string, error) {
if goodsid == "" {
return "商品id参数为空", errors.New("商品id参数为空")
}
insertSQL := "insert into goodsaccess (goodsid, userid, ip) values (?, ?, ?)"
_, err := db.Exec(insertSQL, goodsid, userid, ip)
if err != nil {
log.Println(err)
return "插入商品访问记录失败", err
}
return "插入商品访问记录成功", err
}
| ws, err := db.Query(selectSQL)
if err != nil {
log.Println(err)
return collection, err
}
defer rows.Close()
for rows.Next() {
var c Collection
var id, goodsID int
var collectionType, goodsName, style, unit, price, width, height, compressPic, scenesURL string
err := rows.Scan(&id, &goodsID, &collectionType, &goodsName, &style, &unit, &price, &width, &height, &compressPic, &scenesURL)
if err != nil {
log.Println(err)
return collection, err
}
c.ID = id
c.GoodsID = goodsID
c.CollectionType = collectionType
c.GoodsName = goodsName
c.Style = style
c.Unit = unit
c.Price = price
c.Width = width
c.Height = height
c.CompressPic = compressPic
c.ScenesURL = scenesURL
collection = append(collection, c)
}
return collection, err
}
// 用户增加收藏
func insertCollectionD | identifier_body |
mallSwitcherDao.go | // Package switcher 数据库交互函数
package switcher
import (
"database/sql"
"errors"
"log"
)
// testDao 测试接口
func testDao(id string, db *sql.DB) ([]AName, error) {
var aname []AName
sql := "select name from test"
log.Println(sql)
rows, err := db.Query(sql)
if err != nil {
log.Println(err)
return aname, err
}
defer rows.Close()
for rows.Next() {
var aaname AName
var name string
err := rows.Scan(&name)
if err != nil {
log.Println(err)
return aname, err
}
aaname.Name = name
aname = append(aname, aaname)
}
return aname, err
}
// 用户收藏查询
func getCollectionDao(selectSQL string, db *sql.DB) ([]Collection, error) {
var collection []Collection
rows, err := db.Query(selectSQL)
if err != nil {
log.Println(err)
return collection, err
}
defer rows.Close()
for rows.Next() {
var c Collection
var id, goodsID int
var collectionType, goodsName, style, unit, price, width, height, compressPic, scenesURL string
err := rows.Scan(&id, &goodsID, &collectionType, &goodsName, &style, &unit, &price, &width, &height, &compressPic, &scenesURL)
if err != nil {
log.Println(err)
return collection, err
}
c.ID = id
c.GoodsID = goodsID
c.CollectionType = collectionType
c.GoodsName = goodsName
c.Style = style
c.Unit = unit
c.Price = price
c.Width = width
c.Height = height
c.CompressPic = compressPic
c.ScenesURL = scenesURL
collection = append(collection, c)
}
return collection, err
}
// 用户增加收藏
func insertCollectionDao(userid, shopid, goodsid int, collectionType, scenesURL string, tx *sql.Tx) (int64, error) {
var rowid int64
// 增加收藏sql
insertSQL := `insert into collection (userid,shopid,goodsid,type,scenesurl) values (?, ?, ?, ?, ?)`
result, err := tx.Exec(insertSQL, userid, shopid, goodsid, collectionType, scenesURL)
if nil != err {
log.Println(err)
return rowid, err
}
rowid, err = result.LastInsertId()
return rowid, nil
}
// 用户取消收藏
func delCollectionDao(userid, collectionid int, db *sql.DB) error {
updateSQL := `UPDATE collection SET status = 0 WHERE id = ? and userid = ?`
_, err := db.Exec(updateSQL, userid, collectionid)
if nil != err {
log.Println(err)
return err
}
return nil
}
// 获取机器状态
func getMachineDao(machineid int, db *sql.DB) (Machine, error) {
var m Machine
selectSQL := `select id,machineid,shopid,slotnum,runstate,netstate,machineip from machine where state = 1 and id = ` + IntToString(machineid)
err := db.QueryRow(selectSQL).Scan(&m.ID, &m.MachineID, &m.ShopID, &m.SlotNum, &m.RunState, &m.NetState, &m.MachineIP)
if err != nil {
log.Println(err)
return m, err
}
return m, err
}
// 实体机器商品展示
func insertMachineTaskDao(machineid, userid, shopid, goodsid, slotid int, tx *sql.Tx) error {
// 增加机器任务sql
insertSQL := `insert into machinetask (machineid,shopid,userid,goodsid,slotid) values (?, ?, ?, ?, ?)`
_, err := tx.Exec(insertSQL, machineid, shopid, userid, goodsid, slotid)
if nil != err {
log.Println(err)
return err
}
return nil
}
// 增加模拟场景任务
func insertScenesTaskDao(shopid, userid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid int,
scenesid, scenesmenuurl1, scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl,
goodsBwidth, goodsBheight, goodsBspliceType string, tx *sql.Tx) (int64, error) {
var rowid int64
// 增加机器任务sql
insertSQL := `insert into scenestask (shopid, userid, scenesid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid,
scenesmenuurl1, scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl, goodsBwidth, goodsBheight, goodsBspliceType)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
result, err := tx.Exec(insertSQL, shopid, userid, scenesid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid, scenesmenuurl1,
scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl, goodsBwidth, goodsBheight, goodsBspliceType)
if nil != err {
log.Println(err)
return rowid, err
}
rowid, err = result.LastInsertId()
return rowid, nil
}
// 查询模拟场景URL
func getScenesTaskURLDao(scenesTaskid int, db *sql.DB) (string, error) {
var scenesTaskURL string
selectSQL := `select url from scenestask where state = 1 and id = ` + IntToString(scenesTaskid)
err := db.QueryRow(selectSQL).Scan(&scenesTaskURL)
if err != nil {
log.Println(err)
return scenesTaskURL, err
}
return scenesTaskURL, err
}
// 根据url获取店铺id name
func getShopID(shopurl string, db *sql.DB) (UrlToId, error) {
var UTI UrlToId
selectSQL := "select id, shopname from shop where shopurl = ?"
err := db.QueryRow(selectSQL, shopurl).Scan(&UTI.ID, &UTI.ShopName)
if err != nil {
return UTI, err
}
return UTI, err
}
// 用户登录验证
func getUserLogin(phoneNum, password string, db *sql.DB) (string, error, interface{}) {
var UI UserInfo
var num int
selectSQL := "select ifnull(id, 0), ifnull(count(*), 0) from user where phoneNum = ? and password = ? and state = 1"
err := db.QueryRow(selectSQL, phoneNum, password).Scan(&UI.ID, &num)
if err != nil {
return "查询失败", err, UI
}
if num != 0 {
return "登录成功", err, UI
}
return "手机号或密码错误", errors.New("手机账号或密码错误"), UI
}
// 店铺信息查询 包括 首页展示商品的数量和阀值
func getIndex(shopid string, db *sql.DB) (IndexInfo, HotCondition, error) {
var II IndexInfo
var HC HotCondition
selectSQL := "select id, shopname, logoimg, shopshow, bannerimg, popularlimit, popularquantity from shop where id = ?"
err := db.QueryRow(selectSQL, shopid).Scan(&II.ID, &II.ShopName, &II.LogoImg, &II.ShopShow, &II.BannerImg, &HC.PopularLimit, &HC.PopularQuantity)
if err != nil {
return II, HC, err
}
return II, HC, err
}
// 爆款查询
func getHotGoods(shopid string, HC HotCondition, CLS []ClassList, db *sql.DB) ([]HotGoods, error) {
var HGS []HotGoods
for _, k := range CLS {
selectSQL := "select g.id, m.menuname, ifnull(x.clicknum, 0) as clicknum, ifnull(y.colornum, 0) as colornum, ifnull(y.smallpic, '') as smallpic from goods g left join menu m on g.menuid = m.id left join (select goodsid, count(*) as clicknum from goodsaccess a group by goodsid) x on g.id = x.goodsid left join (select goodsid, smallpic, count(*) as colornum from goodspic group by goodsid) y on g.id = y.goodsid where g.state = 1 and g.shopid = ? and menuid = ? and ifnull(x.clicknum, 0) > ? order by ifnull(y.colornum, 0) desc limit ?"
rows, err := db.Query(selectSQL, shopid, k.ID, HC.PopularLimit, HC.PopularQuantity)
if err != nil {
log.Println(err)
return HGS, err
}
defer rows.Close()
for rows.Next() {
var HG HotGoods
err := rows.Scan(&HG.ID, &HG.MenuName, &HG.ClickNum, &HG.ColorNum, &HG.SmallPic)
if err != nil {
log.Println(err)
return HGS, err
}
HGS = append(HGS, HG)
}
}
return HGS, nil
}
// 店铺联系方式查询
func getShopContactInfo(shopid string, db *sql.DB) (interface{}, error) {
var SCI ShopContactInfo
selectSQL := "select telnum, phonenum, wechat, wechaturl, location, lng, lat from shop where id = ?"
err := db.QueryRow(selectSQL, shopid).Scan(&SCI.Telnum, &SCI.Phonenum, &SCI.Wechat, &SCI.Wechaturl, &SCI.Location, &SCI.Lng, &SCI.Lat)
if err != nil {
return "查询失败", err
}
return SCI, err
}
// 全部分类查询(一级分类查询)
func getAllClassifyOne(shopid string, db *sql.DB) ([]AllClassOne, error) {
var ACOS []AllClassOne
selectSQL := "select id, menuname, superid from menu where state = 1 and superid = 0 and shopid = ?"
rows, err := db.Qu | nil {
log.Println(err)
return ACOS, err
}
defer rows.Close()
for rows.Next() {
var ACO AllClassOne
err := rows.Scan(&ACO.ID, &ACO.MenuName, &ACO.SuperID)
if err != nil {
log.Println(err)
return ACOS, err
}
ACOS = append(ACOS, ACO)
}
return ACOS, err
}
// 全部分类查询(二级分类查询 带图片数量)
func getAllClassifyTwoWithPic(shopid, superid string, db *sql.DB) ([]AllClassTwo, error) {
var ACTS []AllClassTwo
selectSQL := `select z.id as menuid, z.menuname, z.superid, ifnull(y.num, 0) as num from (select id, menuname, superid from menu where superid = ? and shopid = ? and state = 1) z left join (select id, menuid, count(*) as num from goods where state = 1 and shopid = ? group by menuid) y on z.id = y.menuid group by z.id`
rows, err := db.Query(selectSQL, superid, shopid, shopid)
if err != nil {
log.Println(err)
return ACTS, err
}
defer rows.Close()
for rows.Next() {
var ACT AllClassTwo
err := rows.Scan(&ACT.ID, &ACT.MenuName, &ACT.SuperID, &ACT.Num)
if err != nil {
log.Println(err)
return ACTS, err
}
ACTS = append(ACTS, ACT)
}
return ACTS, err
}
// 全部选择分类查询
func getAllClassifyTwo(superid string, db *sql.DB) ([]ClassList, error) {
var CLS []ClassList
selectSQL := "select id, menuname from menu where superid = ? and state = 1"
rows, err := db.Query(selectSQL, superid)
if err != nil {
log.Println(err)
return CLS, err
}
defer rows.Close()
for rows.Next() {
var CL ClassList
err := rows.Scan(&CL.ID, &CL.MenuName)
if err != nil {
log.Println(err)
return CLS, err
}
CLS = append(CLS, CL)
}
return CLS, err
}
// 获取二级菜单下的商品总数
func getGoodsNum(menuid string, db *sql.DB) (int, error) {
var total int
selectSQL := "select count(*) from goods where menuid = ? and state = 1"
rows, err := db.Query(selectSQL, menuid)
if err != nil {
log.Println(err)
return total, err
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&total)
if err != nil {
log.Println(err)
return total, err
}
}
return total, err
}
// 获取商品信息
func getGoods(menuid string, db *sql.DB) ([]GoodsList, error) {
var GLS []GoodsList
selectSQL := `select g.id, ifnull(t.num, 0) as clickNum, ifnull(x.smallpic, '') as smallpic, ifnull(x.num, 0) as colorNum
from goods g
left join (select goodsid, count(goodsid) as num from goodsaccess group by goodsid) t on g.id = t.goodsid
left join (select goodsid, smallpic, count(*) as num from goodspic where state = 1 group by goodsid) x on g.id = x.goodsid
where g.menuid = ?`
rows, err := db.Query(selectSQL, menuid)
if err != nil {
log.Println(err)
return GLS, err
}
defer rows.Close()
for rows.Next() {
var GL GoodsList
err := rows.Scan(&GL.ID, &GL.ClickNum, &GL.SmallPic, &GL.ColorNum)
if err != nil {
log.Println(err)
return GLS, err
}
GLS = append(GLS, GL)
}
return GLS, err
}
// 获取商品详细内容
func getGoodsInfoDetail(goodsid string, db *sql.DB) (GoodsDetail, error) {
var GD GoodsDetail
selectSQL := `select g.id, g.goodsname, g.brand, g.style, g.unit, g.material, g.madein, g.price, g.width, g.height,
ifnull(s.machineid, 0) as machineid, ifnull(m.name, '') as machinename, ifnull(s.id, 0) as slotid,
ifnull(s.slotnum, 0) as slotnum from goods g
left join machineslot s on s.goodsid = g.id
left join machine m on m.id = s.machineid
where g.id = ?`
err := db.QueryRow(selectSQL, goodsid).Scan(&GD.ID, &GD.GoodsName, &GD.Brand, &GD.Style, &GD.Unit, &GD.Material, &GD.MadeIn, &GD.Price,
&GD.Width, &GD.Height, &GD.MachineDetail.MachineID, &GD.MachineDetail.MachineName, &GD.MachineDetail.SlotID, &GD.MachineDetail.SlotNum)
if err != nil {
log.Println(err)
}
return GD, err
}
// 获取商品是否收藏
func getIsCollect(goodsid, userid string, db *sql.DB) (int, int, error) {
var num int
var collectionid int
selectSQL := "select ifnull(id,0), count(*) from collection where goodsid = ? and userid = ? and state = 1"
err := db.QueryRow(selectSQL, goodsid, userid).Scan(&collectionid, &num)
if err != nil {
log.Println(err)
}
return collectionid, num, err
}
// 获取商品图片
func getGoodsPicDetail(goodsid string, db *sql.DB) ([]PicList, error) {
var PLS []PicList
selectSQL := "select id, model, pic, compresspic from goodspic where state = 1 and goodsid = ? order by isfirst desc"
rows, err := db.Query(selectSQL, goodsid)
if err != nil {
log.Println(err)
return PLS, err
}
defer rows.Close()
for rows.Next() {
var PL PicList
err := rows.Scan(&PL.ID, &PL.Model, &PL.Pic, &PL.Compresspic)
if err != nil {
log.Println(err)
return PLS, err
}
PLS = append(PLS, PL)
}
return PLS, err
}
// 店铺访问记录增加
func insertShopAccess(shopid, userid, ip string, db *sql.DB) (string, error) {
if shopid == "" {
return "店铺id参数为空", errors.New("店铺id参数为空")
}
insertSQL := "insert into shopaccess (shopid, userid, ip) values (?, ?, ?)"
_, err := db.Exec(insertSQL, shopid, userid, ip)
if err != nil {
log.Println(err)
return "插入店铺访问记录失败", err
}
return "插入店铺访问记录成功", err
}
// 商品访问记录增加
func insertGoodsAccess(goodsid, userid, ip string, db *sql.DB) (string, error) {
if goodsid == "" {
return "商品id参数为空", errors.New("商品id参数为空")
}
insertSQL := "insert into goodsaccess (goodsid, userid, ip) values (?, ?, ?)"
_, err := db.Exec(insertSQL, goodsid, userid, ip)
if err != nil {
log.Println(err)
return "插入商品访问记录失败", err
}
return "插入商品访问记录成功", err
}
| ery(selectSQL, shopid)
if err != | conditional_block |
mallSwitcherDao.go | // Package switcher 数据库交互函数
package switcher
import (
"database/sql"
"errors"
"log"
)
// testDao 测试接口
func testDao(id string, db *sql.DB) ([]AName, error) {
var aname []AName
sql := "select name from test"
log.Println(sql)
rows, err := db.Query(sql)
if err != nil {
log.Println(err)
return aname, err
}
defer rows.Close()
for rows.Next() {
var aaname AName
var name string
err := rows.Scan(&name)
if err != nil {
log.Println(err)
return aname, err
}
aaname.Name = name
aname = append(aname, aaname)
}
return aname, err
}
// 用户收藏查询
func getCollectionDao(selectSQL string, db *sql.DB) ([]Collection, error) {
var collection []Collection
rows, err := db.Query(selectSQL)
if err != nil {
log.Println(err)
return collection, err
}
defer rows.Close()
for rows.Next() {
var c Collection
var id, goodsID int
var collectionType, goodsName, style, unit, price, width, height, compressPic, scenesURL string
err := rows.Scan(&id, &goodsID, &collectionType, &goodsName, &style, &unit, &price, &width, &height, &compressPic, &scenesURL)
if err != nil {
log.Println(err)
return collection, err
}
c.ID = id
c.GoodsID = goodsID
c.CollectionType = collectionType
c.GoodsName = goodsName
c.Style = style
c.Unit = unit
c.Price = price
c.Width = width
c.Height = height
c.CompressPic = compressPic
c.ScenesURL = scenesURL
collection = append(collection, c)
}
return collection, err
}
// 用户增加收藏
func insertCollectionDao(userid, shopid, goodsid int, collectionType, scenesURL string, tx *sql.Tx) (int64, error) {
var rowid int64
// 增加收藏sql
insertSQL := `insert into collection (userid,shopid,goodsid,type,scenesurl) values (?, ?, ?, ?, ?)`
result, err := tx.Exec(insertSQL, userid, shopid, goodsid, collectionType, scenesURL)
if nil != err {
log.Println(err)
return rowid, err
}
rowid, err = result.LastInsertId()
return rowid, nil
}
// 用户取消收藏
func delCollectionDao(userid, collectionid int, db *sql.DB) error {
updateSQL := `UPDATE collection SET status = 0 WHERE id = ? and userid = ?`
_, err := db.Exec(updateSQL, userid, collectionid)
if nil != err {
log.Println(err)
return err
}
return nil
}
// 获取机器状态
func getMachineDao(machineid int, db *sql.DB) (Machine, error) {
var m Machine
selectSQL := `select id,machineid,shopid,slotnum,runstate,netstate,machineip from machine where state = 1 and id = ` + IntToString(machineid)
err := db.QueryRow(selectSQL).Scan(&m.ID, &m.MachineID, &m.ShopID, &m.SlotNum, &m.RunState, &m.NetState, &m.MachineIP)
if err != nil {
log.Println(err)
return m, err
}
return m, err
}
// 实体机器商品展示
func insertMachineTaskDao(machineid, userid, shopid, goodsid, slotid int, tx *sql.Tx) error {
// 增加机器任务sql
insertSQL := `insert into machinetask (machineid,shopid,userid,goodsid,slotid) values (?, ?, ?, ?, ?)`
_, err := tx.Exec(insertSQL, machineid, shopid, userid, goodsid, slotid)
if nil != err {
log.Println(err)
return err
}
return nil
}
// 增加模拟场景任务
func insertScenesTaskDao(shopid, userid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid int,
scenesid, scenesmenuurl1, scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl,
goodsBwidth, goodsBheight, goodsBspliceType string, tx *sql.Tx) (int64, error) {
var rowid int64
// 增加机器任务sql
insertSQL := `insert into scenestask (shopid, userid, scenesid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid,
scenesmenuurl1, scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl, goodsBwidth, goodsBheight, goodsBspliceType)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
result, err := tx.Exec(insertSQL, shopid, userid, scenesid, scenesmenuid1, scenesmenuid2, goodsAid, goodsBid, scenesmenuurl1,
scenesmenuurl2, goodsAurl, goodsAwidth, goodsAheight, goodsAspliceType, goodsBurl, goodsBwidth, goodsBheight, goodsBspliceType)
if nil != err {
log.Println(err)
return rowid, err
}
rowid, err = result.LastInsertId()
return rowid, nil
}
// 查询模拟场景URL
func getScenesTaskURLDao(scenesTaskid int, db *sql.DB) (string, error) {
var scenesTaskURL string
selectSQL := `select url from scenestask where state = 1 and id = ` + IntToString(scenesTaskid)
err := db.QueryRow(selectSQL).Scan(&scenesTaskURL)
if err != nil {
log.Println(err)
return scenesTaskURL, err
}
return scenesTaskURL, err
}
// 根据url获取店铺id name
func getShopID(shopurl string, db *sql.DB) (UrlToId, error) {
var UTI UrlToId
selectSQL := "select id, shopname from shop where shopurl = ?"
err := db.QueryRow(selectSQL, shopurl).Scan(&UTI.ID, &UTI.ShopName)
if err != nil {
return UTI, err
}
return UTI, err
}
// 用户登录验证
func getUserLogin(phoneNum, password string, db *sql.DB) (string, error, interface{}) {
var UI UserInfo
var num int
selectSQL := "select ifnull(id, 0), ifnull(count(*), 0) from user where phoneNum = ? and password = ? and state = 1"
err := db.QueryRow(selectSQL, phoneNum, password).Scan(&UI.ID, &num)
if err != nil {
return "查询失败", err, UI
}
if num != 0 {
return "登录成功", err, UI
}
return "手机号或密码错误", errors.New("手机账号或密码错误"), UI
}
// 店铺信息查询 包括 首页展示商品的数量和阀值
func getIndex(shopid string, db *sql.DB) (IndexInfo, HotCondition, error) {
var II IndexInfo
var HC HotCondition
selectSQL := "select id, shopname, logoimg, shopshow, bannerimg, popularlimit, popularquantity from shop where id = ?"
err := db.QueryRow(selectSQL, shopid).Scan(&II.ID, &II.ShopName, &II.LogoImg, &II.ShopShow, &II.BannerImg, &HC.PopularLimit, &HC.PopularQuantity)
if err != nil {
return II, HC, err
}
return II, HC, err
}
// 爆款查询
func getHotGoods(shopid string, HC HotCondition, CLS []ClassList, db *sql.DB) ([]HotGoods, error) {
var HGS []HotGoods
for _, k := range CLS {
selectSQL := "select g.id, m.menuname, ifnull(x.clicknum, 0) as clicknum, ifnull(y.colornum, 0) as colornum, ifnull(y.smallpic, '') as smallpic from goods g left join menu m on g.menuid = m.id left join (select goodsid, count(*) as clicknum from goodsaccess a group by goodsid) x on g.id = x.goodsid left join (select goodsid, smallpic, count(*) as colornum from goodspic group by goodsid) y on g.id = y.goodsid where g.state = 1 and g.shopid = ? and menuid = ? and ifnull(x.clicknum, 0) > ? order by ifnull(y.colornum, 0) desc limit ?"
rows, err := db.Query(selectSQL, shopid, k.ID, HC.PopularLimit, HC.PopularQuantity)
if err != nil {
log.Println(err)
return HGS, err
}
defer rows.Close()
for rows.Next() {
var HG HotGoods
err := rows.Scan(&HG.ID, &HG.MenuName, &HG.ClickNum, &HG.ColorNum, &HG.SmallPic)
if err != nil {
log.Println(err)
return HGS, err
}
HGS = append(HGS, HG)
}
}
return HGS, nil
}
// 店铺联系方式查询
func getShopContactInfo(shopid string, db *sql.DB) (interface{}, error) {
var SCI ShopContactInfo
selectSQL := "select telnum, phonenum, wechat, wechaturl, location, lng, lat from shop where id = ?"
err := db.QueryRow(selectSQL, shopid).Scan(&SCI.Telnum, &SCI.Phonenum, &SCI.Wechat, &SC | Location, &SCI.Lng, &SCI.Lat)
if err != nil {
return "查询失败", err
}
return SCI, err
}
// 全部分类查询(一级分类查询)
func getAllClassifyOne(shopid string, db *sql.DB) ([]AllClassOne, error) {
var ACOS []AllClassOne
selectSQL := "select id, menuname, superid from menu where state = 1 and superid = 0 and shopid = ?"
rows, err := db.Query(selectSQL, shopid)
if err != nil {
log.Println(err)
return ACOS, err
}
defer rows.Close()
for rows.Next() {
var ACO AllClassOne
err := rows.Scan(&ACO.ID, &ACO.MenuName, &ACO.SuperID)
if err != nil {
log.Println(err)
return ACOS, err
}
ACOS = append(ACOS, ACO)
}
return ACOS, err
}
// 全部分类查询(二级分类查询 带图片数量)
func getAllClassifyTwoWithPic(shopid, superid string, db *sql.DB) ([]AllClassTwo, error) {
var ACTS []AllClassTwo
selectSQL := `select z.id as menuid, z.menuname, z.superid, ifnull(y.num, 0) as num from (select id, menuname, superid from menu where superid = ? and shopid = ? and state = 1) z left join (select id, menuid, count(*) as num from goods where state = 1 and shopid = ? group by menuid) y on z.id = y.menuid group by z.id`
rows, err := db.Query(selectSQL, superid, shopid, shopid)
if err != nil {
log.Println(err)
return ACTS, err
}
defer rows.Close()
for rows.Next() {
var ACT AllClassTwo
err := rows.Scan(&ACT.ID, &ACT.MenuName, &ACT.SuperID, &ACT.Num)
if err != nil {
log.Println(err)
return ACTS, err
}
ACTS = append(ACTS, ACT)
}
return ACTS, err
}
// 全部选择分类查询
func getAllClassifyTwo(superid string, db *sql.DB) ([]ClassList, error) {
var CLS []ClassList
selectSQL := "select id, menuname from menu where superid = ? and state = 1"
rows, err := db.Query(selectSQL, superid)
if err != nil {
log.Println(err)
return CLS, err
}
defer rows.Close()
for rows.Next() {
var CL ClassList
err := rows.Scan(&CL.ID, &CL.MenuName)
if err != nil {
log.Println(err)
return CLS, err
}
CLS = append(CLS, CL)
}
return CLS, err
}
// 获取二级菜单下的商品总数
func getGoodsNum(menuid string, db *sql.DB) (int, error) {
var total int
selectSQL := "select count(*) from goods where menuid = ? and state = 1"
rows, err := db.Query(selectSQL, menuid)
if err != nil {
log.Println(err)
return total, err
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&total)
if err != nil {
log.Println(err)
return total, err
}
}
return total, err
}
// 获取商品信息
func getGoods(menuid string, db *sql.DB) ([]GoodsList, error) {
var GLS []GoodsList
selectSQL := `select g.id, ifnull(t.num, 0) as clickNum, ifnull(x.smallpic, '') as smallpic, ifnull(x.num, 0) as colorNum
from goods g
left join (select goodsid, count(goodsid) as num from goodsaccess group by goodsid) t on g.id = t.goodsid
left join (select goodsid, smallpic, count(*) as num from goodspic where state = 1 group by goodsid) x on g.id = x.goodsid
where g.menuid = ?`
rows, err := db.Query(selectSQL, menuid)
if err != nil {
log.Println(err)
return GLS, err
}
defer rows.Close()
for rows.Next() {
var GL GoodsList
err := rows.Scan(&GL.ID, &GL.ClickNum, &GL.SmallPic, &GL.ColorNum)
if err != nil {
log.Println(err)
return GLS, err
}
GLS = append(GLS, GL)
}
return GLS, err
}
// 获取商品详细内容
func getGoodsInfoDetail(goodsid string, db *sql.DB) (GoodsDetail, error) {
var GD GoodsDetail
selectSQL := `select g.id, g.goodsname, g.brand, g.style, g.unit, g.material, g.madein, g.price, g.width, g.height,
ifnull(s.machineid, 0) as machineid, ifnull(m.name, '') as machinename, ifnull(s.id, 0) as slotid,
ifnull(s.slotnum, 0) as slotnum from goods g
left join machineslot s on s.goodsid = g.id
left join machine m on m.id = s.machineid
where g.id = ?`
err := db.QueryRow(selectSQL, goodsid).Scan(&GD.ID, &GD.GoodsName, &GD.Brand, &GD.Style, &GD.Unit, &GD.Material, &GD.MadeIn, &GD.Price,
&GD.Width, &GD.Height, &GD.MachineDetail.MachineID, &GD.MachineDetail.MachineName, &GD.MachineDetail.SlotID, &GD.MachineDetail.SlotNum)
if err != nil {
log.Println(err)
}
return GD, err
}
// 获取商品是否收藏
func getIsCollect(goodsid, userid string, db *sql.DB) (int, int, error) {
var num int
var collectionid int
selectSQL := "select ifnull(id,0), count(*) from collection where goodsid = ? and userid = ? and state = 1"
err := db.QueryRow(selectSQL, goodsid, userid).Scan(&collectionid, &num)
if err != nil {
log.Println(err)
}
return collectionid, num, err
}
// 获取商品图片
func getGoodsPicDetail(goodsid string, db *sql.DB) ([]PicList, error) {
var PLS []PicList
selectSQL := "select id, model, pic, compresspic from goodspic where state = 1 and goodsid = ? order by isfirst desc"
rows, err := db.Query(selectSQL, goodsid)
if err != nil {
log.Println(err)
return PLS, err
}
defer rows.Close()
for rows.Next() {
var PL PicList
err := rows.Scan(&PL.ID, &PL.Model, &PL.Pic, &PL.Compresspic)
if err != nil {
log.Println(err)
return PLS, err
}
PLS = append(PLS, PL)
}
return PLS, err
}
// 店铺访问记录增加
func insertShopAccess(shopid, userid, ip string, db *sql.DB) (string, error) {
if shopid == "" {
return "店铺id参数为空", errors.New("店铺id参数为空")
}
insertSQL := "insert into shopaccess (shopid, userid, ip) values (?, ?, ?)"
_, err := db.Exec(insertSQL, shopid, userid, ip)
if err != nil {
log.Println(err)
return "插入店铺访问记录失败", err
}
return "插入店铺访问记录成功", err
}
// 商品访问记录增加
func insertGoodsAccess(goodsid, userid, ip string, db *sql.DB) (string, error) {
if goodsid == "" {
return "商品id参数为空", errors.New("商品id参数为空")
}
insertSQL := "insert into goodsaccess (goodsid, userid, ip) values (?, ?, ?)"
_, err := db.Exec(insertSQL, goodsid, userid, ip)
if err != nil {
log.Println(err)
return "插入商品访问记录失败", err
}
return "插入商品访问记录成功", err
}
| I.Wechaturl, &SCI. | identifier_name |
nb_10_DogcatcherFlatten.py |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/10_DogcatcherFlatten.ipynb
import pandas as pd
import argparse
import csv
import os
import numpy as np
import string
def FivePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=True)
df["FA_start"] = df["gene_start"]
df_exon = df[df["type"]=="exon"].copy()
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["FA_end"] = df_exon["end"]
df_exon = df_exon[["name","FA_end"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["FA_length"] = df["FA_end"] - df["FA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def ThreePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=False)
df["LA_end"] = df["gene_end"]
df_exon = df[df["type"]=="exon"].copy()
# Keep first exon
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["LA_start"] = df_exon["start"]
df_exon = df_exon[["name","LA_start"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["LA_length"] = df["LA_end"] - df["LA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def getAreas(df):
"""
This function will get the first and last exons for plu and min strand.
Call it area because not necessarily exon.
"""
df_plu = df[df["strand"]=="+"]
df_min = df[df["strand"]=="-"]
df_plu_FA = FivePrimeArea(df_plu)
df_min_FA = FivePrimeArea(df_min)
df_plu_LA = ThreePrimeArea(df_plu)[["name","LA_start","LA_end","LA_length"]]
df_min_LA = ThreePrimeArea(df_min)[["name","LA_start","LA_end","LA_length"]]
df_plu = pd.merge(df_plu_FA,df_plu_LA,on="name")
df_min = pd.merge(df_min_FA,df_min_LA,on="name")
df = pd.concat([df_plu,df_min])
return df
def | (df):
"""This function will take a gtf and return strand specific dictionary of different chrm"""
chr_names=df['chr'].unique().tolist()
d_chr = d_gtf_chr = {chrom : df[df["chr"]==chrom] for chrom in chr_names}
return d_chr
def countInside(df, start, end):
rows_df = df[ (start < df["start"]) & (df["end"] < end) ]
names = rows_df['name'].unique().tolist()
names = ",".join(names)
if len(names) >0:
return names
else:
return np.nan
def removeInside(df):
d_chr = chrDIC(df)
df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']], row["start"], row["end"]), axis=1)
df2 = df.dropna(subset=['genes_inside'])
all_names = []
for i in range(len(df2)):
names = df2["genes_inside"].iloc[i]
names = names.split(",")
all_names = all_names + names
inside_genes = list(set(all_names))
l = len(inside_genes)
print(f"Removing {l} genes that are inside other genes")
df_inside = pd.DataFrame(inside_genes,columns=['name'])
df = df[~df["name"].isin(df_inside["name"])].copy()
del df["genes_inside"]
return df, df_inside
def flattenGTF(file_in,file_type,NEXTFLOW=True):
if file_type == "ENSEMBL":
print(f"Flattening ENSEMBL like genome {file_in}")
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["chr"] = df["chr"].astype(str)
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df.sort_values(by=["chr","start"], inplace=True, ascending=True)
fout = f"{file_in[:-4]}_sort.gtf"
df.to_csv(fout,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("gene_id ","")
df["name"] = df["name"].str.replace("\"","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "BED":
my_col = ["chr","start","end","name","strand"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
if file_type == "REFSEQGFF":
# Chrome numbers are changed. Need to change back to chr1 etc.
# https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.39#/def_asm_Primary_Assembly
print(f"Flattening REFSEQGFF like genome")
# https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/reference/
#download this GCF_000001405.39_GRCh38.p13_genomic.gtf.gz
# sort and index in IGV
# NC_000001.11 BestRefSeq gene 11874 14409 . + . gene_id "DDX11L1"; transcript_id ""; db_xref "GeneID:100287102"; db_xref "HGNC:HGNC:37102"; description "DEAD/H-box helicase 11 like 1 (pseudogene)"; gbkey "Gene"; gene "DDX11L1"; gene_biotype "transcribed_pseudogene"; pseudo "true";
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
replace_list = [("chr1","NC_000001.11"),
("chr2","NC_000002.12"),
("chr3","NC_000003.12"),
("chr4","NC_000004.12"),
("chr5","NC_000005.10"),
("chr6","NC_000006.12"),
("chr7","NC_000007.14"),
("chr8","NC_000008.11"),
("chr9","NC_000009.12"),
("chr10","NC_000010.11"),
("chr11","NC_000011.10"),
("chr12","NC_000012.12"),
("chr13","NC_000013.11"),
("chr14","NC_000014.9"),
("chr15","NC_000015.10"),
("chr16","NC_000016.10"),
("chr17","NC_000017.11"),
("chr18","NC_000018.10"),
("chr19","NC_000019.10"),
("chr20","NC_000020.11"),
("chr21","NC_000021.9"),
("chr22","NC_000022.11"),
("chrX","NC_000023.11"),
("chrY","NC_000024.10")]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[df["type"]=="gene"].copy()
# Change NC names to chr
for l in replace_list:
df["chr"] = np.where(df["chr"]==l[1],l[0],df["chr"])
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("ID=gene-","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "REFSEQBED":
# chr1 11873 14409 NR_046018 0 +
# 14409 14409 0 3 354,109,1189, 0,739,1347,
my_col = ["chr","start","end","name","dot","strand","start1","start2","dot2","dot3","gene_id","gene_id2"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[["chr","start","end","name","strand"]]
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
df_plu = df[df["strand"]=="+"].copy()
df_min = df[df["strand"]=="-"].copy()
df_plu, df_plu_inside = removeInside(df_plu)
df_min, df_min_inside = removeInside(df_min)
df_plu.sort_values(by=["chr","end"], inplace=True, ascending=False)
df_plu.drop_duplicates(subset=["start","chr"], keep='first', inplace=True)
df_min.sort_values(by=["chr","start"], inplace=True, ascending=True)
df_min.drop_duplicates(subset=["end","chr"], keep='first', inplace=True)
df = pd.concat([df_plu,df_min])
df = df.sort_values(by=["chr","end"],ascending=False)
gtf = df[["chr","source","type","start","end","dot","strand","dot2","gene_id"] ]
df = df[["chr","start","end","name","strand","FA_start","FA_end","LA_start","LA_end"]]
if NEXTFLOW:
file_in = os.path.basename(file_in)
fout = f"{file_in[:-4]}_flat.txt"
fout2 = f"{file_in[:-4]}_flat.gtf"
fout3 = f"{file_in[:-4]}_flat_CHROMNAMES.txt"
print(f"Outputting flat file {fout}")
df.to_csv(fout,sep="\t",index=None)
gtf.to_csv(fout2,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
gtf_names = gtf[["chr"]].copy()
gtf_names.drop_duplicates(subset=["chr"], keep='first', inplace=True)
gtf_names.to_csv(fout3,sep="\t", index=None)
return df
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED')
parser.add_argument('--annotation_in', action= 'store', metavar='annotation_in')
parser.add_argument('--file_type', action= 'store', metavar='file_type',default="ENSEMBL")
args = parser.parse_args()
return args
if __name__=="__main__":
args = parse_arguments()
file_in = args.annotation_in
file_type = args.file_type
flattenGTF(file_in,file_type)
| chrDIC | identifier_name |
nb_10_DogcatcherFlatten.py |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/10_DogcatcherFlatten.ipynb
import pandas as pd
import argparse
import csv
import os
import numpy as np
import string
def FivePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=True)
df["FA_start"] = df["gene_start"]
df_exon = df[df["type"]=="exon"].copy()
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["FA_end"] = df_exon["end"]
df_exon = df_exon[["name","FA_end"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["FA_length"] = df["FA_end"] - df["FA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def ThreePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=False)
df["LA_end"] = df["gene_end"]
df_exon = df[df["type"]=="exon"].copy()
# Keep first exon
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["LA_start"] = df_exon["start"]
df_exon = df_exon[["name","LA_start"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["LA_length"] = df["LA_end"] - df["LA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def getAreas(df):
"""
This function will get the first and last exons for plu and min strand.
Call it area because not necessarily exon.
"""
df_plu = df[df["strand"]=="+"]
df_min = df[df["strand"]=="-"]
df_plu_FA = FivePrimeArea(df_plu)
df_min_FA = FivePrimeArea(df_min)
df_plu_LA = ThreePrimeArea(df_plu)[["name","LA_start","LA_end","LA_length"]]
df_min_LA = ThreePrimeArea(df_min)[["name","LA_start","LA_end","LA_length"]]
df_plu = pd.merge(df_plu_FA,df_plu_LA,on="name")
df_min = pd.merge(df_min_FA,df_min_LA,on="name")
df = pd.concat([df_plu,df_min])
return df
def chrDIC(df):
"""This function will take a gtf and return strand specific dictionary of different chrm"""
chr_names=df['chr'].unique().tolist()
d_chr = d_gtf_chr = {chrom : df[df["chr"]==chrom] for chrom in chr_names}
return d_chr
def countInside(df, start, end):
rows_df = df[ (start < df["start"]) & (df["end"] < end) ]
names = rows_df['name'].unique().tolist()
names = ",".join(names)
if len(names) >0:
return names
else:
return np.nan
def removeInside(df):
d_chr = chrDIC(df)
df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']], row["start"], row["end"]), axis=1)
df2 = df.dropna(subset=['genes_inside'])
all_names = []
for i in range(len(df2)):
names = df2["genes_inside"].iloc[i]
names = names.split(",")
all_names = all_names + names
inside_genes = list(set(all_names))
l = len(inside_genes)
print(f"Removing {l} genes that are inside other genes")
df_inside = pd.DataFrame(inside_genes,columns=['name'])
df = df[~df["name"].isin(df_inside["name"])].copy()
del df["genes_inside"]
return df, df_inside
def flattenGTF(file_in,file_type,NEXTFLOW=True):
if file_type == "ENSEMBL":
print(f"Flattening ENSEMBL like genome {file_in}")
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["chr"] = df["chr"].astype(str)
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df.sort_values(by=["chr","start"], inplace=True, ascending=True)
fout = f"{file_in[:-4]}_sort.gtf"
df.to_csv(fout,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("gene_id ","")
df["name"] = df["name"].str.replace("\"","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "BED":
my_col = ["chr","start","end","name","strand"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
if file_type == "REFSEQGFF":
# Chrome numbers are changed. Need to change back to chr1 etc.
# https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.39#/def_asm_Primary_Assembly
|
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "REFSEQBED":
# chr1 11873 14409 NR_046018 0 +
# 14409 14409 0 3 354,109,1189, 0,739,1347,
my_col = ["chr","start","end","name","dot","strand","start1","start2","dot2","dot3","gene_id","gene_id2"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[["chr","start","end","name","strand"]]
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
df_plu = df[df["strand"]=="+"].copy()
df_min = df[df["strand"]=="-"].copy()
df_plu, df_plu_inside = removeInside(df_plu)
df_min, df_min_inside = removeInside(df_min)
df_plu.sort_values(by=["chr","end"], inplace=True, ascending=False)
df_plu.drop_duplicates(subset=["start","chr"], keep='first', inplace=True)
df_min.sort_values(by=["chr","start"], inplace=True, ascending=True)
df_min.drop_duplicates(subset=["end","chr"], keep='first', inplace=True)
df = pd.concat([df_plu,df_min])
df = df.sort_values(by=["chr","end"],ascending=False)
gtf = df[["chr","source","type","start","end","dot","strand","dot2","gene_id"] ]
df = df[["chr","start","end","name","strand","FA_start","FA_end","LA_start","LA_end"]]
if NEXTFLOW:
file_in = os.path.basename(file_in)
fout = f"{file_in[:-4]}_flat.txt"
fout2 = f"{file_in[:-4]}_flat.gtf"
fout3 = f"{file_in[:-4]}_flat_CHROMNAMES.txt"
print(f"Outputting flat file {fout}")
df.to_csv(fout,sep="\t",index=None)
gtf.to_csv(fout2,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
gtf_names = gtf[["chr"]].copy()
gtf_names.drop_duplicates(subset=["chr"], keep='first', inplace=True)
gtf_names.to_csv(fout3,sep="\t", index=None)
return df
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED')
parser.add_argument('--annotation_in', action= 'store', metavar='annotation_in')
parser.add_argument('--file_type', action= 'store', metavar='file_type',default="ENSEMBL")
args = parser.parse_args()
return args
if __name__=="__main__":
args = parse_arguments()
file_in = args.annotation_in
file_type = args.file_type
flattenGTF(file_in,file_type)
| print(f"Flattening REFSEQGFF like genome")
# https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/reference/
#download this GCF_000001405.39_GRCh38.p13_genomic.gtf.gz
# sort and index in IGV
# NC_000001.11 BestRefSeq gene 11874 14409 . + . gene_id "DDX11L1"; transcript_id ""; db_xref "GeneID:100287102"; db_xref "HGNC:HGNC:37102"; description "DEAD/H-box helicase 11 like 1 (pseudogene)"; gbkey "Gene"; gene "DDX11L1"; gene_biotype "transcribed_pseudogene"; pseudo "true";
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
replace_list = [("chr1","NC_000001.11"),
("chr2","NC_000002.12"),
("chr3","NC_000003.12"),
("chr4","NC_000004.12"),
("chr5","NC_000005.10"),
("chr6","NC_000006.12"),
("chr7","NC_000007.14"),
("chr8","NC_000008.11"),
("chr9","NC_000009.12"),
("chr10","NC_000010.11"),
("chr11","NC_000011.10"),
("chr12","NC_000012.12"),
("chr13","NC_000013.11"),
("chr14","NC_000014.9"),
("chr15","NC_000015.10"),
("chr16","NC_000016.10"),
("chr17","NC_000017.11"),
("chr18","NC_000018.10"),
("chr19","NC_000019.10"),
("chr20","NC_000020.11"),
("chr21","NC_000021.9"),
("chr22","NC_000022.11"),
("chrX","NC_000023.11"),
("chrY","NC_000024.10")]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[df["type"]=="gene"].copy()
# Change NC names to chr
for l in replace_list:
df["chr"] = np.where(df["chr"]==l[1],l[0],df["chr"])
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("ID=gene-","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"] | conditional_block |
nb_10_DogcatcherFlatten.py | #################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/10_DogcatcherFlatten.ipynb
import pandas as pd
import argparse
import csv
import os
import numpy as np
import string
def FivePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=True)
df["FA_start"] = df["gene_start"]
df_exon = df[df["type"]=="exon"].copy()
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["FA_end"] = df_exon["end"]
df_exon = df_exon[["name","FA_end"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["FA_length"] = df["FA_end"] - df["FA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def ThreePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=False)
df["LA_end"] = df["gene_end"]
df_exon = df[df["type"]=="exon"].copy()
# Keep first exon
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["LA_start"] = df_exon["start"]
df_exon = df_exon[["name","LA_start"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["LA_length"] = df["LA_end"] - df["LA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def getAreas(df):
"""
This function will get the first and last exons for plu and min strand.
Call it area because not necessarily exon.
"""
df_plu = df[df["strand"]=="+"]
df_min = df[df["strand"]=="-"]
df_plu_FA = FivePrimeArea(df_plu)
df_min_FA = FivePrimeArea(df_min)
df_plu_LA = ThreePrimeArea(df_plu)[["name","LA_start","LA_end","LA_length"]]
df_min_LA = ThreePrimeArea(df_min)[["name","LA_start","LA_end","LA_length"]]
df_plu = pd.merge(df_plu_FA,df_plu_LA,on="name")
df_min = pd.merge(df_min_FA,df_min_LA,on="name")
df = pd.concat([df_plu,df_min])
return df
def chrDIC(df):
"""This function will take a gtf and return strand specific dictionary of different chrm"""
chr_names=df['chr'].unique().tolist()
d_chr = d_gtf_chr = {chrom : df[df["chr"]==chrom] for chrom in chr_names}
return d_chr
def countInside(df, start, end):
rows_df = df[ (start < df["start"]) & (df["end"] < end) ]
names = rows_df['name'].unique().tolist()
names = ",".join(names)
if len(names) >0:
return names
else:
return np.nan
def removeInside(df):
d_chr = chrDIC(df)
df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']], row["start"], row["end"]), axis=1)
df2 = df.dropna(subset=['genes_inside'])
all_names = []
for i in range(len(df2)):
names = df2["genes_inside"].iloc[i]
names = names.split(",")
all_names = all_names + names
inside_genes = list(set(all_names))
l = len(inside_genes)
print(f"Removing {l} genes that are inside other genes")
df_inside = pd.DataFrame(inside_genes,columns=['name'])
df = df[~df["name"].isin(df_inside["name"])].copy()
del df["genes_inside"]
return df, df_inside
def flattenGTF(file_in,file_type,NEXTFLOW=True):
if file_type == "ENSEMBL":
print(f"Flattening ENSEMBL like genome {file_in}")
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["chr"] = df["chr"].astype(str)
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df.sort_values(by=["chr","start"], inplace=True, ascending=True)
fout = f"{file_in[:-4]}_sort.gtf"
df.to_csv(fout,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("gene_id ","")
df["name"] = df["name"].str.replace("\"","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "BED":
my_col = ["chr","start","end","name","strand"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
if file_type == "REFSEQGFF":
# Chrome numbers are changed. Need to change back to chr1 etc.
# https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.39#/def_asm_Primary_Assembly
print(f"Flattening REFSEQGFF like genome")
# https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/reference/
#download this GCF_000001405.39_GRCh38.p13_genomic.gtf.gz
# sort and index in IGV
# NC_000001.11 BestRefSeq gene 11874 14409 . + . gene_id "DDX11L1"; transcript_id ""; db_xref "GeneID:100287102"; db_xref "HGNC:HGNC:37102"; description "DEAD/H-box helicase 11 like 1 (pseudogene)"; gbkey "Gene"; gene "DDX11L1"; gene_biotype "transcribed_pseudogene"; pseudo "true";
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
replace_list = [("chr1","NC_000001.11"),
("chr2","NC_000002.12"),
("chr3","NC_000003.12"),
("chr4","NC_000004.12"),
("chr5","NC_000005.10"),
("chr6","NC_000006.12"),
("chr7","NC_000007.14"),
("chr8","NC_000008.11"),
("chr9","NC_000009.12"),
("chr10","NC_000010.11"),
("chr11","NC_000011.10"),
("chr12","NC_000012.12"),
("chr13","NC_000013.11"),
("chr14","NC_000014.9"),
("chr15","NC_000015.10"),
("chr16","NC_000016.10"),
("chr17","NC_000017.11"),
("chr18","NC_000018.10"),
("chr19","NC_000019.10"),
("chr20","NC_000020.11"),
("chr21","NC_000021.9"),
("chr22","NC_000022.11"),
("chrX","NC_000023.11"),
("chrY","NC_000024.10")]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[df["type"]=="gene"].copy()
# Change NC names to chr
for l in replace_list:
df["chr"] = np.where(df["chr"]==l[1],l[0],df["chr"])
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("ID=gene-","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"] | df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "REFSEQBED":
# chr1 11873 14409 NR_046018 0 +
# 14409 14409 0 3 354,109,1189, 0,739,1347,
my_col = ["chr","start","end","name","dot","strand","start1","start2","dot2","dot3","gene_id","gene_id2"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[["chr","start","end","name","strand"]]
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
df_plu = df[df["strand"]=="+"].copy()
df_min = df[df["strand"]=="-"].copy()
df_plu, df_plu_inside = removeInside(df_plu)
df_min, df_min_inside = removeInside(df_min)
df_plu.sort_values(by=["chr","end"], inplace=True, ascending=False)
df_plu.drop_duplicates(subset=["start","chr"], keep='first', inplace=True)
df_min.sort_values(by=["chr","start"], inplace=True, ascending=True)
df_min.drop_duplicates(subset=["end","chr"], keep='first', inplace=True)
df = pd.concat([df_plu,df_min])
df = df.sort_values(by=["chr","end"],ascending=False)
gtf = df[["chr","source","type","start","end","dot","strand","dot2","gene_id"] ]
df = df[["chr","start","end","name","strand","FA_start","FA_end","LA_start","LA_end"]]
if NEXTFLOW:
file_in = os.path.basename(file_in)
fout = f"{file_in[:-4]}_flat.txt"
fout2 = f"{file_in[:-4]}_flat.gtf"
fout3 = f"{file_in[:-4]}_flat_CHROMNAMES.txt"
print(f"Outputting flat file {fout}")
df.to_csv(fout,sep="\t",index=None)
gtf.to_csv(fout2,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
gtf_names = gtf[["chr"]].copy()
gtf_names.drop_duplicates(subset=["chr"], keep='first', inplace=True)
gtf_names.to_csv(fout3,sep="\t", index=None)
return df
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED')
parser.add_argument('--annotation_in', action= 'store', metavar='annotation_in')
parser.add_argument('--file_type', action= 'store', metavar='file_type',default="ENSEMBL")
args = parser.parse_args()
return args
if __name__=="__main__":
args = parse_arguments()
file_in = args.annotation_in
file_type = args.file_type
flattenGTF(file_in,file_type) | random_line_split | |
nb_10_DogcatcherFlatten.py |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/10_DogcatcherFlatten.ipynb
import pandas as pd
import argparse
import csv
import os
import numpy as np
import string
def FivePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=True)
df["FA_start"] = df["gene_start"]
df_exon = df[df["type"]=="exon"].copy()
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["FA_end"] = df_exon["end"]
df_exon = df_exon[["name","FA_end"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["FA_length"] = df["FA_end"] - df["FA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def ThreePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=False)
df["LA_end"] = df["gene_end"]
df_exon = df[df["type"]=="exon"].copy()
# Keep first exon
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["LA_start"] = df_exon["start"]
df_exon = df_exon[["name","LA_start"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["LA_length"] = df["LA_end"] - df["LA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def getAreas(df):
"""
This function will get the first and last exons for plu and min strand.
Call it area because not necessarily exon.
"""
df_plu = df[df["strand"]=="+"]
df_min = df[df["strand"]=="-"]
df_plu_FA = FivePrimeArea(df_plu)
df_min_FA = FivePrimeArea(df_min)
df_plu_LA = ThreePrimeArea(df_plu)[["name","LA_start","LA_end","LA_length"]]
df_min_LA = ThreePrimeArea(df_min)[["name","LA_start","LA_end","LA_length"]]
df_plu = pd.merge(df_plu_FA,df_plu_LA,on="name")
df_min = pd.merge(df_min_FA,df_min_LA,on="name")
df = pd.concat([df_plu,df_min])
return df
def chrDIC(df):
"""This function will take a gtf and return strand specific dictionary of different chrm"""
chr_names=df['chr'].unique().tolist()
d_chr = d_gtf_chr = {chrom : df[df["chr"]==chrom] for chrom in chr_names}
return d_chr
def countInside(df, start, end):
|
def removeInside(df):
d_chr = chrDIC(df)
df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']], row["start"], row["end"]), axis=1)
df2 = df.dropna(subset=['genes_inside'])
all_names = []
for i in range(len(df2)):
names = df2["genes_inside"].iloc[i]
names = names.split(",")
all_names = all_names + names
inside_genes = list(set(all_names))
l = len(inside_genes)
print(f"Removing {l} genes that are inside other genes")
df_inside = pd.DataFrame(inside_genes,columns=['name'])
df = df[~df["name"].isin(df_inside["name"])].copy()
del df["genes_inside"]
return df, df_inside
def flattenGTF(file_in,file_type,NEXTFLOW=True):
if file_type == "ENSEMBL":
print(f"Flattening ENSEMBL like genome {file_in}")
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["chr"] = df["chr"].astype(str)
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df.sort_values(by=["chr","start"], inplace=True, ascending=True)
fout = f"{file_in[:-4]}_sort.gtf"
df.to_csv(fout,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("gene_id ","")
df["name"] = df["name"].str.replace("\"","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "BED":
my_col = ["chr","start","end","name","strand"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
if file_type == "REFSEQGFF":
# Chrome numbers are changed. Need to change back to chr1 etc.
# https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.39#/def_asm_Primary_Assembly
print(f"Flattening REFSEQGFF like genome")
# https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/reference/
#download this GCF_000001405.39_GRCh38.p13_genomic.gtf.gz
# sort and index in IGV
# NC_000001.11 BestRefSeq gene 11874 14409 . + . gene_id "DDX11L1"; transcript_id ""; db_xref "GeneID:100287102"; db_xref "HGNC:HGNC:37102"; description "DEAD/H-box helicase 11 like 1 (pseudogene)"; gbkey "Gene"; gene "DDX11L1"; gene_biotype "transcribed_pseudogene"; pseudo "true";
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
replace_list = [("chr1","NC_000001.11"),
("chr2","NC_000002.12"),
("chr3","NC_000003.12"),
("chr4","NC_000004.12"),
("chr5","NC_000005.10"),
("chr6","NC_000006.12"),
("chr7","NC_000007.14"),
("chr8","NC_000008.11"),
("chr9","NC_000009.12"),
("chr10","NC_000010.11"),
("chr11","NC_000011.10"),
("chr12","NC_000012.12"),
("chr13","NC_000013.11"),
("chr14","NC_000014.9"),
("chr15","NC_000015.10"),
("chr16","NC_000016.10"),
("chr17","NC_000017.11"),
("chr18","NC_000018.10"),
("chr19","NC_000019.10"),
("chr20","NC_000020.11"),
("chr21","NC_000021.9"),
("chr22","NC_000022.11"),
("chrX","NC_000023.11"),
("chrY","NC_000024.10")]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[df["type"]=="gene"].copy()
# Change NC names to chr
for l in replace_list:
df["chr"] = np.where(df["chr"]==l[1],l[0],df["chr"])
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("ID=gene-","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "REFSEQBED":
# chr1 11873 14409 NR_046018 0 +
# 14409 14409 0 3 354,109,1189, 0,739,1347,
my_col = ["chr","start","end","name","dot","strand","start1","start2","dot2","dot3","gene_id","gene_id2"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[["chr","start","end","name","strand"]]
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
df_plu = df[df["strand"]=="+"].copy()
df_min = df[df["strand"]=="-"].copy()
df_plu, df_plu_inside = removeInside(df_plu)
df_min, df_min_inside = removeInside(df_min)
df_plu.sort_values(by=["chr","end"], inplace=True, ascending=False)
df_plu.drop_duplicates(subset=["start","chr"], keep='first', inplace=True)
df_min.sort_values(by=["chr","start"], inplace=True, ascending=True)
df_min.drop_duplicates(subset=["end","chr"], keep='first', inplace=True)
df = pd.concat([df_plu,df_min])
df = df.sort_values(by=["chr","end"],ascending=False)
gtf = df[["chr","source","type","start","end","dot","strand","dot2","gene_id"] ]
df = df[["chr","start","end","name","strand","FA_start","FA_end","LA_start","LA_end"]]
if NEXTFLOW:
file_in = os.path.basename(file_in)
fout = f"{file_in[:-4]}_flat.txt"
fout2 = f"{file_in[:-4]}_flat.gtf"
fout3 = f"{file_in[:-4]}_flat_CHROMNAMES.txt"
print(f"Outputting flat file {fout}")
df.to_csv(fout,sep="\t",index=None)
gtf.to_csv(fout2,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
gtf_names = gtf[["chr"]].copy()
gtf_names.drop_duplicates(subset=["chr"], keep='first', inplace=True)
gtf_names.to_csv(fout3,sep="\t", index=None)
return df
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED')
parser.add_argument('--annotation_in', action= 'store', metavar='annotation_in')
parser.add_argument('--file_type', action= 'store', metavar='file_type',default="ENSEMBL")
args = parser.parse_args()
return args
if __name__=="__main__":
args = parse_arguments()
file_in = args.annotation_in
file_type = args.file_type
flattenGTF(file_in,file_type)
| rows_df = df[ (start < df["start"]) & (df["end"] < end) ]
names = rows_df['name'].unique().tolist()
names = ",".join(names)
if len(names) >0:
return names
else:
return np.nan | identifier_body |
lib.rs | //! This is an implementation the test factory pattern made to work with [Diesel][].
//!
//! [Diesel]: https://diesel.rs
//!
//! Example usage:
//!
//! ```
//! #[macro_use]
//! extern crate diesel;
//!
//! use diesel_factories::{Association, Factory};
//! use diesel::{pg::PgConnection, prelude::*};
//!
//! // Tell Diesel what our schema is
//! // Note unusual primary key name - see options for derive macro.
//! mod schema {
//! table! {
//! countries (identity) {
//! identity -> Integer,
//! name -> Text,
//! }
//! }
//!
//! table! {
//! cities (id) {
//! id -> Integer,
//! name -> Text,
//! country_id -> Integer,
//! }
//! }
//! }
//!
//! // Our city model
//! #[derive(Clone, Queryable)]
//! struct City {
//! pub id: i32,
//! pub name: String,
//! pub country_id: i32,
//! }
//!
//! #[derive(Clone, Factory)]
//! #[factory(
//! // model type our factory inserts
//! model = City,
//! // table the model belongs to
//! table = crate::schema::cities,
//! // connection type you use. Defaults to `PgConnection`
//! connection = diesel::pg::PgConnection,
//! // type of primary key. Defaults to `i32`
//! id = i32,
//! )]
//! struct CityFactory<'a> {
//! pub name: String,
//! // A `CityFactory` is associated to either an inserted `&'a Country` or a `CountryFactory`
//! // instance.
//! pub country: Association<'a, Country, CountryFactory>,
//! }
//!
//! // We make new factory instances through the `Default` trait
//! impl<'a> Default for CityFactory<'a> {
//! fn default() -> Self {
//! Self {
//! name: "Copenhagen".to_string(),
//! // `default` will return an `Association` with a `CountryFactory`. No inserts happen
//! // here.
//! //
//! // This is the same as `Association::Factory(CountryFactory::default())`.
//! country: Association::default(),
//! }
//! }
//! }
//!
//! // The same setup, but for `Country`
//! #[derive(Clone, Queryable)]
//! struct Country {
//! pub identity: i32,
//! pub name: String,
//! }
//!
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = Country,
//! table = crate::schema::countries,
//! connection = diesel::pg::PgConnection,
//! id = i32,
//! id_name = identity,
//! )]
//! struct CountryFactory {
//! pub name: String,
//! }
//!
//! impl Default for CountryFactory {
//! fn default() -> Self {
//! Self {
//! name: "Denmark".into(),
//! }
//! }
//! }
//!
//! // Usage
//! fn basic_usage() {
//! let con = establish_connection();
//!
//! let city = CityFactory::default().insert(&con);
//! assert_eq!("Copenhagen", city.name);
//!
//! let country = find_country_by_id(city.country_id, &con);
//! assert_eq!("Denmark", country.name);
//!
//! assert_eq!(1, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//!
//! fn setting_fields() {
//! let con = establish_connection();
//!
//! let city = CityFactory::default()
//! .name("Amsterdam")
//! .country(CountryFactory::default().name("Netherlands"))
//! .insert(&con);
//! assert_eq!("Amsterdam", city.name);
//!
//! let country = find_country_by_id(city.country_id, &con);
//! assert_eq!("Netherlands", country.name);
//!
//! assert_eq!(1, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//!
//! fn multiple_models_with_same_association() {
//! let con = establish_connection();
//!
//! let netherlands = CountryFactory::default()
//! .name("Netherlands")
//! .insert(&con);
//!
//! let amsterdam = CityFactory::default()
//! .name("Amsterdam")
//! .country(&netherlands)
//! .insert(&con);
//!
//! let hague = CityFactory::default()
//! .name("The Hague")
//! .country(&netherlands)
//! .insert(&con);
//!
//! assert_eq!(amsterdam.country_id, hague.country_id);
//!
//! assert_eq!(2, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//! #
//! # fn main() {
//! # basic_usage();
//! # setting_fields();
//! # multiple_models_with_same_association();
//! # }
//! # fn establish_connection() -> PgConnection {
//! # use std::env;
//! # let pg_host = env::var("POSTGRES_HOST").unwrap_or_else(|_| "localhost".to_string());
//! # let pg_port = env::var("POSTGRES_PORT").unwrap_or_else(|_| "5432".to_string());
//! # let pg_password = env::var("POSTGRES_PASSWORD").ok();
//! #
//! # let auth = if let Some(pg_password) = pg_password {
//! # format!("postgres:{}@", pg_password)
//! # } else {
//! # String::new()
//! # };
//! #
//! # let database_url = format!(
//! # "postgres://{auth}{host}:{port}/diesel_factories_test",
//! # auth = auth,
//! # host = pg_host,
//! # port = pg_port
//! # );
//! # let con = PgConnection::establish(&database_url).unwrap();
//! # con.begin_test_transaction().unwrap();
//! # con
//! # }
//!
//! // Utility functions just for demo'ing
//! fn count_cities(con: &PgConnection) -> i64 {
//! use crate::schema::cities;
//! use diesel::dsl::count_star;
//! cities::table.select(count_star()).first(con).unwrap()
//! }
//!
//! fn count_countries(con: &PgConnection) -> i64 {
//! use crate::schema::countries;
//! use diesel::dsl::count_star;
//! countries::table.select(count_star()).first(con).unwrap()
//! }
//!
//! fn find_country_by_id(input: i32, con: &PgConnection) -> Country {
//! use crate::schema::countries::dsl::*;
//! countries
//! .filter(identity.eq(&input))
//! .first::<Country>(con)
//! .unwrap()
//! }
//! ```
//!
//! ## `#[derive(Factory)]`
//!
//! ### Attributes
//!
//! These attributes are available on the struct itself inside `#[factory(...)]`.
//!
//! | Name | Description | Example | Default |
//! |---|---|---|---|
//! | `model` | Model type your factory inserts | `City` | None, required |
//! | `table` | Table your model belongs to | `crate::schema::cities` | None, required |
//! | `connection` | The connection type your app uses | `MysqlConnection` | `diesel::pg::PgConnection` |
//! | `id` | The type of your table's primary key | `i64` | `i32` |
//! | `id_name` | The name of your table's primary key column | `identity` | `id` |
//!
//! These attributes are available on association fields inside `#[factory(...)]`.
//!
//! | Name | Description | Example | Default |
//! |---|---|---|---|
//! | `foreign_key_name` | Name of the foreign key column on your model | `country_identity` | `{association_name}_id` |
//!
//! ### Builder methods
//!
//! Besides implementing [`Factory`] for your struct it will also derive builder methods for easily customizing each field. The generated code looks something like this:
//!
//! ```
//! struct CountryFactory {
//! pub name: String,
//! }
//!
//! // This is what gets generated for each field
//! impl CountryFactory {
//! fn name<T: Into<String>>(mut self, new: T) -> Self {
//! self.name = new.into();
//! self
//! }
//! }
//! #
//! # impl Default for CountryFactory {
//! # fn default() -> Self {
//! # CountryFactory { name: String::new() }
//! # }
//! # }
//!
//! // So you can do this
//! CountryFactory::default().name("Amsterdam");
//! ```
//!
//! [`Factory`]: trait.Factory.html
//!
//! ### Builder methods for associations
//!
//! The builder methods generated for `Association` fields are a bit different. If you have a factory like:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = City,
//! table = crate::schema::cities,
//! )]
//! struct CityFactory<'a> {
//! pub name: String,
//! pub country: Association<'a, Country, CountryFactory>,
//! }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # fn main() {}
//! ```
//!
//! You'll be able to call `country` either with an owned `CountryFactory`:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! # #[derive(Clone, Factory)]
//! # #[factory(
//! # model = City,
//! # table = crate::schema::cities,
//! # )]
//! # struct CityFactory<'a> {
//! # pub name: String,
//! # pub country: Association<'a, Country, CountryFactory>,
//! # }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # Self {
//! # name: String::new(), country: Association::default(),
//! # }
//! # }
//! # }
//! #
//! # fn main() {
//! let country_factory = CountryFactory::default();
//! CityFactory::default().country(country_factory);
//! # }
//! ```
//!
//! Or a borrowed `Country`:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! # #[derive(Clone, Factory)]
//! # #[factory(
//! # model = City,
//! # table = crate::schema::cities,
//! # )]
//! # struct CityFactory<'a> {
//! # pub name: String,
//! # pub country: Association<'a, Country, CountryFactory>,
//! # }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # Self {
//! # name: String::new(), country: Association::default(),
//! # }
//! # }
//! # }
//! #
//! # fn main() {
//! let country = Country { id: 1, name: "Denmark".into() };
//! CityFactory::default().country(&country);
//! # }
//! ```
//!
//! This should prevent bugs where you have multiple factory instances sharing some association that you mutate halfway through a test.
//!
//! ### Optional associations
//!
//! If your model has a nullable association you can do this:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup_with_city_factory.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = User,
//! table = crate::schema::users,
//! )]
//! struct UserFactory<'a> {
//! pub name: String,
//! pub country: Option<Association<'a, Country, CountryFactory>>,
//! # pub age: i32,
//! # pub home_city: Option<Association<'a, City, CityFactory<'a>>>,
//! # pub current_city: Option<Association<'a, City, CityFactory<'a>>>,
//! }
//!
//! impl<'a> Default for UserFactory<'a> {
//! fn default() -> Self {
//! Self {
//! name: "Bob".into(),
//! country: None,
//! # age: 30,
//! # home_city: None,
//! # current_city: None,
//! }
//! }
//! }
//!
//! # fn main() {
//! // Setting `country` to a `CountryFactory`
//! let country_factory = CountryFactory::default();
//! UserFactory::default().country(Some(country_factory));
//!
//! // Setting `country` to a `Country`
//! let country = Country { id: 1, name: "Denmark".into() };
//! UserFactory::default().country(Some(&country));
//!
//! // Setting `country` to `None`
//! UserFactory::default().country(Option::<CountryFactory>::None);
//! UserFactory::default().country(Option::<&Country>::None);
//! # }
//! ```
//!
//! ### Customizing foreign key names
//!
//! You can customize the name of the foreign key for your associations like so
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = City,
//! table = crate::schema::cities,
//! )]
//! struct CityFactory<'a> {
//! #[factory(foreign_key_name = country_id)]
//! pub country: Association<'a, Country, CountryFactory>,
//! # pub name: String,
//! }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # fn main() {}
//! ```
#![doc(html_root_url = "https://docs.rs/diesel-factories/2.0.0")]
#![deny(
mutable_borrow_reservation_conflict,
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unstable_features,
unused_import_braces,
unused_qualifications
)]
use std::sync::atomic::{AtomicUsize, Ordering};
pub use diesel_factories_code_gen::Factory;
/// A "belongs to" association that may or may not have been inserted yet.
///
/// You will normally be using this when setting up "belongs to" associations between models in
/// factories.
#[derive(Debug, Clone)]
pub enum Association<'a, Model, Factory> {
/// An associated model that has been inserted into the database.
///
/// You shouldn't have to use this direclty but instead just `Association::default()`.
Model(&'a Model),
/// A factory for a model that hasn't been inserted yet into the database.
///
/// You shouldn't have to use this direclty but instead just `Association::default()`.
Factory(Factory),
}
impl<Model, Factory: Default> Default for Association<'_, Model, Factory> {
fn default() -> Self {
Association::Factory(Factory::default())
}
}
impl<'a, Model, Factory> Association<'a, Model, Factory> {
#[doc(hidden)]
pub fn new_model(inner: &'a Model) -> Self {
Association::Model(inner)
}
#[doc(hidden)]
pub fn new_factory(inner: Factory) -> Self {
Association::Factory(inner)
}
}
impl<M, F> Association<'_, M, F>
where
F: Factory<Model = M> + Clone,
{
#[doc(hidden)]
pub fn insert_returning_id(&self, con: &F::Connection) -> F::Id {
match self {
Association::Model(model) => F::id_for_model(&model).clone(),
Association::Factory(factory) => {
let model = factory.clone().insert(con);
F::id_for_model(&model).clone()
}
}
}
}
/// A generic factory trait.
///
/// You shouldn't ever have to implement this trait yourself. It can be derived using
/// `#[derive(Factory)]`
///
/// See the [root module docs](/) for info on how to use `#[derive(Factory)]`.
pub trait Factory: Clone {
/// The model type the factory inserts.
///
/// For a factory named `UserFactory` this would probably be `User`. | /// The primary key type your model uses.
///
/// This will normally be i32 or i64 but can be whatever you need.
type Id: Clone;
/// The database connection type you use such as `diesel::pg::PgConnection`.
type Connection;
/// Insert the factory into the database.
///
/// # Panics
/// This will panic if the insert fails. Should be fine since you want panics early in tests.
fn insert(self, con: &Self::Connection) -> Self::Model;
/// Get the primary key value for a model type.
///
/// Just a generic wrapper around `model.id`.
fn id_for_model(model: &Self::Model) -> &Self::Id;
}
static SEQUENCE_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// Utility function for generating unique ids or strings in factories.
/// Each time `sequence` gets called, the closure will receive a different number.
///
/// ```
/// use diesel_factories::sequence;
///
/// assert_ne!(
/// sequence(|i| format!("unique-string-{}", i)),
/// sequence(|i| format!("unique-string-{}", i)),
/// );
/// ```
pub fn sequence<T, F>(f: F) -> T
where
F: Fn(usize) -> T,
{
SEQUENCE_COUNTER.fetch_add(1, Ordering::SeqCst);
let count = SEQUENCE_COUNTER.load(Ordering::Relaxed);
f(count)
}
#[cfg(test)]
mod test {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_compile_pass() {
let t = trybuild::TestCases::new();
t.pass("tests/compile_pass/*.rs");
t.compile_fail("tests/compile_fail/*.rs");
}
} | type Model;
| random_line_split |
lib.rs | //! This is an implementation the test factory pattern made to work with [Diesel][].
//!
//! [Diesel]: https://diesel.rs
//!
//! Example usage:
//!
//! ```
//! #[macro_use]
//! extern crate diesel;
//!
//! use diesel_factories::{Association, Factory};
//! use diesel::{pg::PgConnection, prelude::*};
//!
//! // Tell Diesel what our schema is
//! // Note unusual primary key name - see options for derive macro.
//! mod schema {
//! table! {
//! countries (identity) {
//! identity -> Integer,
//! name -> Text,
//! }
//! }
//!
//! table! {
//! cities (id) {
//! id -> Integer,
//! name -> Text,
//! country_id -> Integer,
//! }
//! }
//! }
//!
//! // Our city model
//! #[derive(Clone, Queryable)]
//! struct City {
//! pub id: i32,
//! pub name: String,
//! pub country_id: i32,
//! }
//!
//! #[derive(Clone, Factory)]
//! #[factory(
//! // model type our factory inserts
//! model = City,
//! // table the model belongs to
//! table = crate::schema::cities,
//! // connection type you use. Defaults to `PgConnection`
//! connection = diesel::pg::PgConnection,
//! // type of primary key. Defaults to `i32`
//! id = i32,
//! )]
//! struct CityFactory<'a> {
//! pub name: String,
//! // A `CityFactory` is associated to either an inserted `&'a Country` or a `CountryFactory`
//! // instance.
//! pub country: Association<'a, Country, CountryFactory>,
//! }
//!
//! // We make new factory instances through the `Default` trait
//! impl<'a> Default for CityFactory<'a> {
//! fn default() -> Self {
//! Self {
//! name: "Copenhagen".to_string(),
//! // `default` will return an `Association` with a `CountryFactory`. No inserts happen
//! // here.
//! //
//! // This is the same as `Association::Factory(CountryFactory::default())`.
//! country: Association::default(),
//! }
//! }
//! }
//!
//! // The same setup, but for `Country`
//! #[derive(Clone, Queryable)]
//! struct Country {
//! pub identity: i32,
//! pub name: String,
//! }
//!
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = Country,
//! table = crate::schema::countries,
//! connection = diesel::pg::PgConnection,
//! id = i32,
//! id_name = identity,
//! )]
//! struct CountryFactory {
//! pub name: String,
//! }
//!
//! impl Default for CountryFactory {
//! fn default() -> Self {
//! Self {
//! name: "Denmark".into(),
//! }
//! }
//! }
//!
//! // Usage
//! fn basic_usage() {
//! let con = establish_connection();
//!
//! let city = CityFactory::default().insert(&con);
//! assert_eq!("Copenhagen", city.name);
//!
//! let country = find_country_by_id(city.country_id, &con);
//! assert_eq!("Denmark", country.name);
//!
//! assert_eq!(1, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//!
//! fn setting_fields() {
//! let con = establish_connection();
//!
//! let city = CityFactory::default()
//! .name("Amsterdam")
//! .country(CountryFactory::default().name("Netherlands"))
//! .insert(&con);
//! assert_eq!("Amsterdam", city.name);
//!
//! let country = find_country_by_id(city.country_id, &con);
//! assert_eq!("Netherlands", country.name);
//!
//! assert_eq!(1, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//!
//! fn multiple_models_with_same_association() {
//! let con = establish_connection();
//!
//! let netherlands = CountryFactory::default()
//! .name("Netherlands")
//! .insert(&con);
//!
//! let amsterdam = CityFactory::default()
//! .name("Amsterdam")
//! .country(&netherlands)
//! .insert(&con);
//!
//! let hague = CityFactory::default()
//! .name("The Hague")
//! .country(&netherlands)
//! .insert(&con);
//!
//! assert_eq!(amsterdam.country_id, hague.country_id);
//!
//! assert_eq!(2, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//! #
//! # fn main() {
//! # basic_usage();
//! # setting_fields();
//! # multiple_models_with_same_association();
//! # }
//! # fn establish_connection() -> PgConnection {
//! # use std::env;
//! # let pg_host = env::var("POSTGRES_HOST").unwrap_or_else(|_| "localhost".to_string());
//! # let pg_port = env::var("POSTGRES_PORT").unwrap_or_else(|_| "5432".to_string());
//! # let pg_password = env::var("POSTGRES_PASSWORD").ok();
//! #
//! # let auth = if let Some(pg_password) = pg_password {
//! # format!("postgres:{}@", pg_password)
//! # } else {
//! # String::new()
//! # };
//! #
//! # let database_url = format!(
//! # "postgres://{auth}{host}:{port}/diesel_factories_test",
//! # auth = auth,
//! # host = pg_host,
//! # port = pg_port
//! # );
//! # let con = PgConnection::establish(&database_url).unwrap();
//! # con.begin_test_transaction().unwrap();
//! # con
//! # }
//!
//! // Utility functions just for demo'ing
//! fn count_cities(con: &PgConnection) -> i64 {
//! use crate::schema::cities;
//! use diesel::dsl::count_star;
//! cities::table.select(count_star()).first(con).unwrap()
//! }
//!
//! fn count_countries(con: &PgConnection) -> i64 {
//! use crate::schema::countries;
//! use diesel::dsl::count_star;
//! countries::table.select(count_star()).first(con).unwrap()
//! }
//!
//! fn find_country_by_id(input: i32, con: &PgConnection) -> Country {
//! use crate::schema::countries::dsl::*;
//! countries
//! .filter(identity.eq(&input))
//! .first::<Country>(con)
//! .unwrap()
//! }
//! ```
//!
//! ## `#[derive(Factory)]`
//!
//! ### Attributes
//!
//! These attributes are available on the struct itself inside `#[factory(...)]`.
//!
//! | Name | Description | Example | Default |
//! |---|---|---|---|
//! | `model` | Model type your factory inserts | `City` | None, required |
//! | `table` | Table your model belongs to | `crate::schema::cities` | None, required |
//! | `connection` | The connection type your app uses | `MysqlConnection` | `diesel::pg::PgConnection` |
//! | `id` | The type of your table's primary key | `i64` | `i32` |
//! | `id_name` | The name of your table's primary key column | `identity` | `id` |
//!
//! These attributes are available on association fields inside `#[factory(...)]`.
//!
//! | Name | Description | Example | Default |
//! |---|---|---|---|
//! | `foreign_key_name` | Name of the foreign key column on your model | `country_identity` | `{association_name}_id` |
//!
//! ### Builder methods
//!
//! Besides implementing [`Factory`] for your struct it will also derive builder methods for easily customizing each field. The generated code looks something like this:
//!
//! ```
//! struct CountryFactory {
//! pub name: String,
//! }
//!
//! // This is what gets generated for each field
//! impl CountryFactory {
//! fn name<T: Into<String>>(mut self, new: T) -> Self {
//! self.name = new.into();
//! self
//! }
//! }
//! #
//! # impl Default for CountryFactory {
//! # fn default() -> Self {
//! # CountryFactory { name: String::new() }
//! # }
//! # }
//!
//! // So you can do this
//! CountryFactory::default().name("Amsterdam");
//! ```
//!
//! [`Factory`]: trait.Factory.html
//!
//! ### Builder methods for associations
//!
//! The builder methods generated for `Association` fields are a bit different. If you have a factory like:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = City,
//! table = crate::schema::cities,
//! )]
//! struct CityFactory<'a> {
//! pub name: String,
//! pub country: Association<'a, Country, CountryFactory>,
//! }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # fn main() {}
//! ```
//!
//! You'll be able to call `country` either with an owned `CountryFactory`:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! # #[derive(Clone, Factory)]
//! # #[factory(
//! # model = City,
//! # table = crate::schema::cities,
//! # )]
//! # struct CityFactory<'a> {
//! # pub name: String,
//! # pub country: Association<'a, Country, CountryFactory>,
//! # }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # Self {
//! # name: String::new(), country: Association::default(),
//! # }
//! # }
//! # }
//! #
//! # fn main() {
//! let country_factory = CountryFactory::default();
//! CityFactory::default().country(country_factory);
//! # }
//! ```
//!
//! Or a borrowed `Country`:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! # #[derive(Clone, Factory)]
//! # #[factory(
//! # model = City,
//! # table = crate::schema::cities,
//! # )]
//! # struct CityFactory<'a> {
//! # pub name: String,
//! # pub country: Association<'a, Country, CountryFactory>,
//! # }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # Self {
//! # name: String::new(), country: Association::default(),
//! # }
//! # }
//! # }
//! #
//! # fn main() {
//! let country = Country { id: 1, name: "Denmark".into() };
//! CityFactory::default().country(&country);
//! # }
//! ```
//!
//! This should prevent bugs where you have multiple factory instances sharing some association that you mutate halfway through a test.
//!
//! ### Optional associations
//!
//! If your model has a nullable association you can do this:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup_with_city_factory.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = User,
//! table = crate::schema::users,
//! )]
//! struct UserFactory<'a> {
//! pub name: String,
//! pub country: Option<Association<'a, Country, CountryFactory>>,
//! # pub age: i32,
//! # pub home_city: Option<Association<'a, City, CityFactory<'a>>>,
//! # pub current_city: Option<Association<'a, City, CityFactory<'a>>>,
//! }
//!
//! impl<'a> Default for UserFactory<'a> {
//! fn default() -> Self {
//! Self {
//! name: "Bob".into(),
//! country: None,
//! # age: 30,
//! # home_city: None,
//! # current_city: None,
//! }
//! }
//! }
//!
//! # fn main() {
//! // Setting `country` to a `CountryFactory`
//! let country_factory = CountryFactory::default();
//! UserFactory::default().country(Some(country_factory));
//!
//! // Setting `country` to a `Country`
//! let country = Country { id: 1, name: "Denmark".into() };
//! UserFactory::default().country(Some(&country));
//!
//! // Setting `country` to `None`
//! UserFactory::default().country(Option::<CountryFactory>::None);
//! UserFactory::default().country(Option::<&Country>::None);
//! # }
//! ```
//!
//! ### Customizing foreign key names
//!
//! You can customize the name of the foreign key for your associations like so
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = City,
//! table = crate::schema::cities,
//! )]
//! struct CityFactory<'a> {
//! #[factory(foreign_key_name = country_id)]
//! pub country: Association<'a, Country, CountryFactory>,
//! # pub name: String,
//! }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # fn main() {}
//! ```
#![doc(html_root_url = "https://docs.rs/diesel-factories/2.0.0")]
#![deny(
mutable_borrow_reservation_conflict,
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unstable_features,
unused_import_braces,
unused_qualifications
)]
use std::sync::atomic::{AtomicUsize, Ordering};
pub use diesel_factories_code_gen::Factory;
/// A "belongs to" association that may or may not have been inserted yet.
///
/// You will normally be using this when setting up "belongs to" associations between models in
/// factories.
#[derive(Debug, Clone)]
pub enum Association<'a, Model, Factory> {
/// An associated model that has been inserted into the database.
///
/// You shouldn't have to use this direclty but instead just `Association::default()`.
Model(&'a Model),
/// A factory for a model that hasn't been inserted yet into the database.
///
/// You shouldn't have to use this direclty but instead just `Association::default()`.
Factory(Factory),
}
impl<Model, Factory: Default> Default for Association<'_, Model, Factory> {
fn default() -> Self {
Association::Factory(Factory::default())
}
}
impl<'a, Model, Factory> Association<'a, Model, Factory> {
#[doc(hidden)]
pub fn new_model(inner: &'a Model) -> Self {
Association::Model(inner)
}
#[doc(hidden)]
pub fn new_factory(inner: Factory) -> Self {
Association::Factory(inner)
}
}
impl<M, F> Association<'_, M, F>
where
F: Factory<Model = M> + Clone,
{
#[doc(hidden)]
pub fn insert_returning_id(&self, con: &F::Connection) -> F::Id {
match self {
Association::Model(model) => F::id_for_model(&model).clone(),
Association::Factory(factory) => |
}
}
}
/// A generic factory trait.
///
/// You shouldn't ever have to implement this trait yourself. It can be derived using
/// `#[derive(Factory)]`
///
/// See the [root module docs](/) for info on how to use `#[derive(Factory)]`.
pub trait Factory: Clone {
/// The model type the factory inserts.
///
/// For a factory named `UserFactory` this would probably be `User`.
type Model;
/// The primary key type your model uses.
///
/// This will normally be i32 or i64 but can be whatever you need.
type Id: Clone;
/// The database connection type you use such as `diesel::pg::PgConnection`.
type Connection;
/// Insert the factory into the database.
///
/// # Panics
/// This will panic if the insert fails. Should be fine since you want panics early in tests.
fn insert(self, con: &Self::Connection) -> Self::Model;
/// Get the primary key value for a model type.
///
/// Just a generic wrapper around `model.id`.
fn id_for_model(model: &Self::Model) -> &Self::Id;
}
static SEQUENCE_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// Utility function for generating unique ids or strings in factories.
/// Each time `sequence` gets called, the closure will receive a different number.
///
/// ```
/// use diesel_factories::sequence;
///
/// assert_ne!(
/// sequence(|i| format!("unique-string-{}", i)),
/// sequence(|i| format!("unique-string-{}", i)),
/// );
/// ```
pub fn sequence<T, F>(f: F) -> T
where
F: Fn(usize) -> T,
{
SEQUENCE_COUNTER.fetch_add(1, Ordering::SeqCst);
let count = SEQUENCE_COUNTER.load(Ordering::Relaxed);
f(count)
}
#[cfg(test)]
mod test {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_compile_pass() {
let t = trybuild::TestCases::new();
t.pass("tests/compile_pass/*.rs");
t.compile_fail("tests/compile_fail/*.rs");
}
}
| {
let model = factory.clone().insert(con);
F::id_for_model(&model).clone()
} | conditional_block |
lib.rs | //! This is an implementation the test factory pattern made to work with [Diesel][].
//!
//! [Diesel]: https://diesel.rs
//!
//! Example usage:
//!
//! ```
//! #[macro_use]
//! extern crate diesel;
//!
//! use diesel_factories::{Association, Factory};
//! use diesel::{pg::PgConnection, prelude::*};
//!
//! // Tell Diesel what our schema is
//! // Note unusual primary key name - see options for derive macro.
//! mod schema {
//! table! {
//! countries (identity) {
//! identity -> Integer,
//! name -> Text,
//! }
//! }
//!
//! table! {
//! cities (id) {
//! id -> Integer,
//! name -> Text,
//! country_id -> Integer,
//! }
//! }
//! }
//!
//! // Our city model
//! #[derive(Clone, Queryable)]
//! struct City {
//! pub id: i32,
//! pub name: String,
//! pub country_id: i32,
//! }
//!
//! #[derive(Clone, Factory)]
//! #[factory(
//! // model type our factory inserts
//! model = City,
//! // table the model belongs to
//! table = crate::schema::cities,
//! // connection type you use. Defaults to `PgConnection`
//! connection = diesel::pg::PgConnection,
//! // type of primary key. Defaults to `i32`
//! id = i32,
//! )]
//! struct CityFactory<'a> {
//! pub name: String,
//! // A `CityFactory` is associated to either an inserted `&'a Country` or a `CountryFactory`
//! // instance.
//! pub country: Association<'a, Country, CountryFactory>,
//! }
//!
//! // We make new factory instances through the `Default` trait
//! impl<'a> Default for CityFactory<'a> {
//! fn default() -> Self {
//! Self {
//! name: "Copenhagen".to_string(),
//! // `default` will return an `Association` with a `CountryFactory`. No inserts happen
//! // here.
//! //
//! // This is the same as `Association::Factory(CountryFactory::default())`.
//! country: Association::default(),
//! }
//! }
//! }
//!
//! // The same setup, but for `Country`
//! #[derive(Clone, Queryable)]
//! struct Country {
//! pub identity: i32,
//! pub name: String,
//! }
//!
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = Country,
//! table = crate::schema::countries,
//! connection = diesel::pg::PgConnection,
//! id = i32,
//! id_name = identity,
//! )]
//! struct CountryFactory {
//! pub name: String,
//! }
//!
//! impl Default for CountryFactory {
//! fn default() -> Self {
//! Self {
//! name: "Denmark".into(),
//! }
//! }
//! }
//!
//! // Usage
//! fn basic_usage() {
//! let con = establish_connection();
//!
//! let city = CityFactory::default().insert(&con);
//! assert_eq!("Copenhagen", city.name);
//!
//! let country = find_country_by_id(city.country_id, &con);
//! assert_eq!("Denmark", country.name);
//!
//! assert_eq!(1, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//!
//! fn setting_fields() {
//! let con = establish_connection();
//!
//! let city = CityFactory::default()
//! .name("Amsterdam")
//! .country(CountryFactory::default().name("Netherlands"))
//! .insert(&con);
//! assert_eq!("Amsterdam", city.name);
//!
//! let country = find_country_by_id(city.country_id, &con);
//! assert_eq!("Netherlands", country.name);
//!
//! assert_eq!(1, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//!
//! fn multiple_models_with_same_association() {
//! let con = establish_connection();
//!
//! let netherlands = CountryFactory::default()
//! .name("Netherlands")
//! .insert(&con);
//!
//! let amsterdam = CityFactory::default()
//! .name("Amsterdam")
//! .country(&netherlands)
//! .insert(&con);
//!
//! let hague = CityFactory::default()
//! .name("The Hague")
//! .country(&netherlands)
//! .insert(&con);
//!
//! assert_eq!(amsterdam.country_id, hague.country_id);
//!
//! assert_eq!(2, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//! #
//! # fn main() {
//! # basic_usage();
//! # setting_fields();
//! # multiple_models_with_same_association();
//! # }
//! # fn establish_connection() -> PgConnection {
//! # use std::env;
//! # let pg_host = env::var("POSTGRES_HOST").unwrap_or_else(|_| "localhost".to_string());
//! # let pg_port = env::var("POSTGRES_PORT").unwrap_or_else(|_| "5432".to_string());
//! # let pg_password = env::var("POSTGRES_PASSWORD").ok();
//! #
//! # let auth = if let Some(pg_password) = pg_password {
//! # format!("postgres:{}@", pg_password)
//! # } else {
//! # String::new()
//! # };
//! #
//! # let database_url = format!(
//! # "postgres://{auth}{host}:{port}/diesel_factories_test",
//! # auth = auth,
//! # host = pg_host,
//! # port = pg_port
//! # );
//! # let con = PgConnection::establish(&database_url).unwrap();
//! # con.begin_test_transaction().unwrap();
//! # con
//! # }
//!
//! // Utility functions just for demo'ing
//! fn count_cities(con: &PgConnection) -> i64 {
//! use crate::schema::cities;
//! use diesel::dsl::count_star;
//! cities::table.select(count_star()).first(con).unwrap()
//! }
//!
//! fn count_countries(con: &PgConnection) -> i64 {
//! use crate::schema::countries;
//! use diesel::dsl::count_star;
//! countries::table.select(count_star()).first(con).unwrap()
//! }
//!
//! fn find_country_by_id(input: i32, con: &PgConnection) -> Country {
//! use crate::schema::countries::dsl::*;
//! countries
//! .filter(identity.eq(&input))
//! .first::<Country>(con)
//! .unwrap()
//! }
//! ```
//!
//! ## `#[derive(Factory)]`
//!
//! ### Attributes
//!
//! These attributes are available on the struct itself inside `#[factory(...)]`.
//!
//! | Name | Description | Example | Default |
//! |---|---|---|---|
//! | `model` | Model type your factory inserts | `City` | None, required |
//! | `table` | Table your model belongs to | `crate::schema::cities` | None, required |
//! | `connection` | The connection type your app uses | `MysqlConnection` | `diesel::pg::PgConnection` |
//! | `id` | The type of your table's primary key | `i64` | `i32` |
//! | `id_name` | The name of your table's primary key column | `identity` | `id` |
//!
//! These attributes are available on association fields inside `#[factory(...)]`.
//!
//! | Name | Description | Example | Default |
//! |---|---|---|---|
//! | `foreign_key_name` | Name of the foreign key column on your model | `country_identity` | `{association_name}_id` |
//!
//! ### Builder methods
//!
//! Besides implementing [`Factory`] for your struct it will also derive builder methods for easily customizing each field. The generated code looks something like this:
//!
//! ```
//! struct CountryFactory {
//! pub name: String,
//! }
//!
//! // This is what gets generated for each field
//! impl CountryFactory {
//! fn name<T: Into<String>>(mut self, new: T) -> Self {
//! self.name = new.into();
//! self
//! }
//! }
//! #
//! # impl Default for CountryFactory {
//! # fn default() -> Self {
//! # CountryFactory { name: String::new() }
//! # }
//! # }
//!
//! // So you can do this
//! CountryFactory::default().name("Amsterdam");
//! ```
//!
//! [`Factory`]: trait.Factory.html
//!
//! ### Builder methods for associations
//!
//! The builder methods generated for `Association` fields are a bit different. If you have a factory like:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = City,
//! table = crate::schema::cities,
//! )]
//! struct CityFactory<'a> {
//! pub name: String,
//! pub country: Association<'a, Country, CountryFactory>,
//! }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # fn main() {}
//! ```
//!
//! You'll be able to call `country` either with an owned `CountryFactory`:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! # #[derive(Clone, Factory)]
//! # #[factory(
//! # model = City,
//! # table = crate::schema::cities,
//! # )]
//! # struct CityFactory<'a> {
//! # pub name: String,
//! # pub country: Association<'a, Country, CountryFactory>,
//! # }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # Self {
//! # name: String::new(), country: Association::default(),
//! # }
//! # }
//! # }
//! #
//! # fn main() {
//! let country_factory = CountryFactory::default();
//! CityFactory::default().country(country_factory);
//! # }
//! ```
//!
//! Or a borrowed `Country`:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! # #[derive(Clone, Factory)]
//! # #[factory(
//! # model = City,
//! # table = crate::schema::cities,
//! # )]
//! # struct CityFactory<'a> {
//! # pub name: String,
//! # pub country: Association<'a, Country, CountryFactory>,
//! # }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # Self {
//! # name: String::new(), country: Association::default(),
//! # }
//! # }
//! # }
//! #
//! # fn main() {
//! let country = Country { id: 1, name: "Denmark".into() };
//! CityFactory::default().country(&country);
//! # }
//! ```
//!
//! This should prevent bugs where you have multiple factory instances sharing some association that you mutate halfway through a test.
//!
//! ### Optional associations
//!
//! If your model has a nullable association you can do this:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup_with_city_factory.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = User,
//! table = crate::schema::users,
//! )]
//! struct UserFactory<'a> {
//! pub name: String,
//! pub country: Option<Association<'a, Country, CountryFactory>>,
//! # pub age: i32,
//! # pub home_city: Option<Association<'a, City, CityFactory<'a>>>,
//! # pub current_city: Option<Association<'a, City, CityFactory<'a>>>,
//! }
//!
//! impl<'a> Default for UserFactory<'a> {
//! fn default() -> Self {
//! Self {
//! name: "Bob".into(),
//! country: None,
//! # age: 30,
//! # home_city: None,
//! # current_city: None,
//! }
//! }
//! }
//!
//! # fn main() {
//! // Setting `country` to a `CountryFactory`
//! let country_factory = CountryFactory::default();
//! UserFactory::default().country(Some(country_factory));
//!
//! // Setting `country` to a `Country`
//! let country = Country { id: 1, name: "Denmark".into() };
//! UserFactory::default().country(Some(&country));
//!
//! // Setting `country` to `None`
//! UserFactory::default().country(Option::<CountryFactory>::None);
//! UserFactory::default().country(Option::<&Country>::None);
//! # }
//! ```
//!
//! ### Customizing foreign key names
//!
//! You can customize the name of the foreign key for your associations like so
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = City,
//! table = crate::schema::cities,
//! )]
//! struct CityFactory<'a> {
//! #[factory(foreign_key_name = country_id)]
//! pub country: Association<'a, Country, CountryFactory>,
//! # pub name: String,
//! }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # fn main() {}
//! ```
#![doc(html_root_url = "https://docs.rs/diesel-factories/2.0.0")]
#![deny(
mutable_borrow_reservation_conflict,
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unstable_features,
unused_import_braces,
unused_qualifications
)]
use std::sync::atomic::{AtomicUsize, Ordering};
pub use diesel_factories_code_gen::Factory;
/// A "belongs to" association that may or may not have been inserted yet.
///
/// You will normally be using this when setting up "belongs to" associations between models in
/// factories.
#[derive(Debug, Clone)]
pub enum Association<'a, Model, Factory> {
/// An associated model that has been inserted into the database.
///
/// You shouldn't have to use this direclty but instead just `Association::default()`.
Model(&'a Model),
/// A factory for a model that hasn't been inserted yet into the database.
///
/// You shouldn't have to use this direclty but instead just `Association::default()`.
Factory(Factory),
}
impl<Model, Factory: Default> Default for Association<'_, Model, Factory> {
fn default() -> Self {
Association::Factory(Factory::default())
}
}
impl<'a, Model, Factory> Association<'a, Model, Factory> {
#[doc(hidden)]
pub fn new_model(inner: &'a Model) -> Self |
#[doc(hidden)]
pub fn new_factory(inner: Factory) -> Self {
Association::Factory(inner)
}
}
impl<M, F> Association<'_, M, F>
where
F: Factory<Model = M> + Clone,
{
#[doc(hidden)]
pub fn insert_returning_id(&self, con: &F::Connection) -> F::Id {
match self {
Association::Model(model) => F::id_for_model(&model).clone(),
Association::Factory(factory) => {
let model = factory.clone().insert(con);
F::id_for_model(&model).clone()
}
}
}
}
/// A generic factory trait.
///
/// You shouldn't ever have to implement this trait yourself. It can be derived using
/// `#[derive(Factory)]`
///
/// See the [root module docs](/) for info on how to use `#[derive(Factory)]`.
pub trait Factory: Clone {
/// The model type the factory inserts.
///
/// For a factory named `UserFactory` this would probably be `User`.
type Model;
/// The primary key type your model uses.
///
/// This will normally be i32 or i64 but can be whatever you need.
type Id: Clone;
/// The database connection type you use such as `diesel::pg::PgConnection`.
type Connection;
/// Insert the factory into the database.
///
/// # Panics
/// This will panic if the insert fails. Should be fine since you want panics early in tests.
fn insert(self, con: &Self::Connection) -> Self::Model;
/// Get the primary key value for a model type.
///
/// Just a generic wrapper around `model.id`.
fn id_for_model(model: &Self::Model) -> &Self::Id;
}
static SEQUENCE_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// Utility function for generating unique ids or strings in factories.
/// Each time `sequence` gets called, the closure will receive a different number.
///
/// ```
/// use diesel_factories::sequence;
///
/// assert_ne!(
/// sequence(|i| format!("unique-string-{}", i)),
/// sequence(|i| format!("unique-string-{}", i)),
/// );
/// ```
pub fn sequence<T, F>(f: F) -> T
where
F: Fn(usize) -> T,
{
SEQUENCE_COUNTER.fetch_add(1, Ordering::SeqCst);
let count = SEQUENCE_COUNTER.load(Ordering::Relaxed);
f(count)
}
#[cfg(test)]
mod test {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_compile_pass() {
let t = trybuild::TestCases::new();
t.pass("tests/compile_pass/*.rs");
t.compile_fail("tests/compile_fail/*.rs");
}
}
| {
Association::Model(inner)
} | identifier_body |
lib.rs | //! This is an implementation the test factory pattern made to work with [Diesel][].
//!
//! [Diesel]: https://diesel.rs
//!
//! Example usage:
//!
//! ```
//! #[macro_use]
//! extern crate diesel;
//!
//! use diesel_factories::{Association, Factory};
//! use diesel::{pg::PgConnection, prelude::*};
//!
//! // Tell Diesel what our schema is
//! // Note unusual primary key name - see options for derive macro.
//! mod schema {
//! table! {
//! countries (identity) {
//! identity -> Integer,
//! name -> Text,
//! }
//! }
//!
//! table! {
//! cities (id) {
//! id -> Integer,
//! name -> Text,
//! country_id -> Integer,
//! }
//! }
//! }
//!
//! // Our city model
//! #[derive(Clone, Queryable)]
//! struct City {
//! pub id: i32,
//! pub name: String,
//! pub country_id: i32,
//! }
//!
//! #[derive(Clone, Factory)]
//! #[factory(
//! // model type our factory inserts
//! model = City,
//! // table the model belongs to
//! table = crate::schema::cities,
//! // connection type you use. Defaults to `PgConnection`
//! connection = diesel::pg::PgConnection,
//! // type of primary key. Defaults to `i32`
//! id = i32,
//! )]
//! struct CityFactory<'a> {
//! pub name: String,
//! // A `CityFactory` is associated to either an inserted `&'a Country` or a `CountryFactory`
//! // instance.
//! pub country: Association<'a, Country, CountryFactory>,
//! }
//!
//! // We make new factory instances through the `Default` trait
//! impl<'a> Default for CityFactory<'a> {
//! fn default() -> Self {
//! Self {
//! name: "Copenhagen".to_string(),
//! // `default` will return an `Association` with a `CountryFactory`. No inserts happen
//! // here.
//! //
//! // This is the same as `Association::Factory(CountryFactory::default())`.
//! country: Association::default(),
//! }
//! }
//! }
//!
//! // The same setup, but for `Country`
//! #[derive(Clone, Queryable)]
//! struct Country {
//! pub identity: i32,
//! pub name: String,
//! }
//!
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = Country,
//! table = crate::schema::countries,
//! connection = diesel::pg::PgConnection,
//! id = i32,
//! id_name = identity,
//! )]
//! struct CountryFactory {
//! pub name: String,
//! }
//!
//! impl Default for CountryFactory {
//! fn default() -> Self {
//! Self {
//! name: "Denmark".into(),
//! }
//! }
//! }
//!
//! // Usage
//! fn basic_usage() {
//! let con = establish_connection();
//!
//! let city = CityFactory::default().insert(&con);
//! assert_eq!("Copenhagen", city.name);
//!
//! let country = find_country_by_id(city.country_id, &con);
//! assert_eq!("Denmark", country.name);
//!
//! assert_eq!(1, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//!
//! fn setting_fields() {
//! let con = establish_connection();
//!
//! let city = CityFactory::default()
//! .name("Amsterdam")
//! .country(CountryFactory::default().name("Netherlands"))
//! .insert(&con);
//! assert_eq!("Amsterdam", city.name);
//!
//! let country = find_country_by_id(city.country_id, &con);
//! assert_eq!("Netherlands", country.name);
//!
//! assert_eq!(1, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//!
//! fn multiple_models_with_same_association() {
//! let con = establish_connection();
//!
//! let netherlands = CountryFactory::default()
//! .name("Netherlands")
//! .insert(&con);
//!
//! let amsterdam = CityFactory::default()
//! .name("Amsterdam")
//! .country(&netherlands)
//! .insert(&con);
//!
//! let hague = CityFactory::default()
//! .name("The Hague")
//! .country(&netherlands)
//! .insert(&con);
//!
//! assert_eq!(amsterdam.country_id, hague.country_id);
//!
//! assert_eq!(2, count_cities(&con));
//! assert_eq!(1, count_countries(&con));
//! }
//! #
//! # fn main() {
//! # basic_usage();
//! # setting_fields();
//! # multiple_models_with_same_association();
//! # }
//! # fn establish_connection() -> PgConnection {
//! # use std::env;
//! # let pg_host = env::var("POSTGRES_HOST").unwrap_or_else(|_| "localhost".to_string());
//! # let pg_port = env::var("POSTGRES_PORT").unwrap_or_else(|_| "5432".to_string());
//! # let pg_password = env::var("POSTGRES_PASSWORD").ok();
//! #
//! # let auth = if let Some(pg_password) = pg_password {
//! # format!("postgres:{}@", pg_password)
//! # } else {
//! # String::new()
//! # };
//! #
//! # let database_url = format!(
//! # "postgres://{auth}{host}:{port}/diesel_factories_test",
//! # auth = auth,
//! # host = pg_host,
//! # port = pg_port
//! # );
//! # let con = PgConnection::establish(&database_url).unwrap();
//! # con.begin_test_transaction().unwrap();
//! # con
//! # }
//!
//! // Utility functions just for demo'ing
//! fn count_cities(con: &PgConnection) -> i64 {
//! use crate::schema::cities;
//! use diesel::dsl::count_star;
//! cities::table.select(count_star()).first(con).unwrap()
//! }
//!
//! fn count_countries(con: &PgConnection) -> i64 {
//! use crate::schema::countries;
//! use diesel::dsl::count_star;
//! countries::table.select(count_star()).first(con).unwrap()
//! }
//!
//! fn find_country_by_id(input: i32, con: &PgConnection) -> Country {
//! use crate::schema::countries::dsl::*;
//! countries
//! .filter(identity.eq(&input))
//! .first::<Country>(con)
//! .unwrap()
//! }
//! ```
//!
//! ## `#[derive(Factory)]`
//!
//! ### Attributes
//!
//! These attributes are available on the struct itself inside `#[factory(...)]`.
//!
//! | Name | Description | Example | Default |
//! |---|---|---|---|
//! | `model` | Model type your factory inserts | `City` | None, required |
//! | `table` | Table your model belongs to | `crate::schema::cities` | None, required |
//! | `connection` | The connection type your app uses | `MysqlConnection` | `diesel::pg::PgConnection` |
//! | `id` | The type of your table's primary key | `i64` | `i32` |
//! | `id_name` | The name of your table's primary key column | `identity` | `id` |
//!
//! These attributes are available on association fields inside `#[factory(...)]`.
//!
//! | Name | Description | Example | Default |
//! |---|---|---|---|
//! | `foreign_key_name` | Name of the foreign key column on your model | `country_identity` | `{association_name}_id` |
//!
//! ### Builder methods
//!
//! Besides implementing [`Factory`] for your struct it will also derive builder methods for easily customizing each field. The generated code looks something like this:
//!
//! ```
//! struct CountryFactory {
//! pub name: String,
//! }
//!
//! // This is what gets generated for each field
//! impl CountryFactory {
//! fn name<T: Into<String>>(mut self, new: T) -> Self {
//! self.name = new.into();
//! self
//! }
//! }
//! #
//! # impl Default for CountryFactory {
//! # fn default() -> Self {
//! # CountryFactory { name: String::new() }
//! # }
//! # }
//!
//! // So you can do this
//! CountryFactory::default().name("Amsterdam");
//! ```
//!
//! [`Factory`]: trait.Factory.html
//!
//! ### Builder methods for associations
//!
//! The builder methods generated for `Association` fields are a bit different. If you have a factory like:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = City,
//! table = crate::schema::cities,
//! )]
//! struct CityFactory<'a> {
//! pub name: String,
//! pub country: Association<'a, Country, CountryFactory>,
//! }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # fn main() {}
//! ```
//!
//! You'll be able to call `country` either with an owned `CountryFactory`:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! # #[derive(Clone, Factory)]
//! # #[factory(
//! # model = City,
//! # table = crate::schema::cities,
//! # )]
//! # struct CityFactory<'a> {
//! # pub name: String,
//! # pub country: Association<'a, Country, CountryFactory>,
//! # }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # Self {
//! # name: String::new(), country: Association::default(),
//! # }
//! # }
//! # }
//! #
//! # fn main() {
//! let country_factory = CountryFactory::default();
//! CityFactory::default().country(country_factory);
//! # }
//! ```
//!
//! Or a borrowed `Country`:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! # #[derive(Clone, Factory)]
//! # #[factory(
//! # model = City,
//! # table = crate::schema::cities,
//! # )]
//! # struct CityFactory<'a> {
//! # pub name: String,
//! # pub country: Association<'a, Country, CountryFactory>,
//! # }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # Self {
//! # name: String::new(), country: Association::default(),
//! # }
//! # }
//! # }
//! #
//! # fn main() {
//! let country = Country { id: 1, name: "Denmark".into() };
//! CityFactory::default().country(&country);
//! # }
//! ```
//!
//! This should prevent bugs where you have multiple factory instances sharing some association that you mutate halfway through a test.
//!
//! ### Optional associations
//!
//! If your model has a nullable association you can do this:
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup_with_city_factory.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = User,
//! table = crate::schema::users,
//! )]
//! struct UserFactory<'a> {
//! pub name: String,
//! pub country: Option<Association<'a, Country, CountryFactory>>,
//! # pub age: i32,
//! # pub home_city: Option<Association<'a, City, CityFactory<'a>>>,
//! # pub current_city: Option<Association<'a, City, CityFactory<'a>>>,
//! }
//!
//! impl<'a> Default for UserFactory<'a> {
//! fn default() -> Self {
//! Self {
//! name: "Bob".into(),
//! country: None,
//! # age: 30,
//! # home_city: None,
//! # current_city: None,
//! }
//! }
//! }
//!
//! # fn main() {
//! // Setting `country` to a `CountryFactory`
//! let country_factory = CountryFactory::default();
//! UserFactory::default().country(Some(country_factory));
//!
//! // Setting `country` to a `Country`
//! let country = Country { id: 1, name: "Denmark".into() };
//! UserFactory::default().country(Some(&country));
//!
//! // Setting `country` to `None`
//! UserFactory::default().country(Option::<CountryFactory>::None);
//! UserFactory::default().country(Option::<&Country>::None);
//! # }
//! ```
//!
//! ### Customizing foreign key names
//!
//! You can customize the name of the foreign key for your associations like so
//!
//! ```
//! # #![allow(unused_imports)]
//! # include!("../tests/docs_setup.rs");
//! #
//! #[derive(Clone, Factory)]
//! #[factory(
//! model = City,
//! table = crate::schema::cities,
//! )]
//! struct CityFactory<'a> {
//! #[factory(foreign_key_name = country_id)]
//! pub country: Association<'a, Country, CountryFactory>,
//! # pub name: String,
//! }
//! #
//! # impl<'a> Default for CityFactory<'a> {
//! # fn default() -> Self {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # fn main() {}
//! ```
#![doc(html_root_url = "https://docs.rs/diesel-factories/2.0.0")]
#![deny(
mutable_borrow_reservation_conflict,
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unstable_features,
unused_import_braces,
unused_qualifications
)]
use std::sync::atomic::{AtomicUsize, Ordering};
pub use diesel_factories_code_gen::Factory;
/// A "belongs to" association that may or may not have been inserted yet.
///
/// You will normally be using this when setting up "belongs to" associations between models in
/// factories.
#[derive(Debug, Clone)]
pub enum Association<'a, Model, Factory> {
/// An associated model that has been inserted into the database.
///
/// You shouldn't have to use this direclty but instead just `Association::default()`.
Model(&'a Model),
/// A factory for a model that hasn't been inserted yet into the database.
///
/// You shouldn't have to use this direclty but instead just `Association::default()`.
Factory(Factory),
}
impl<Model, Factory: Default> Default for Association<'_, Model, Factory> {
fn default() -> Self {
Association::Factory(Factory::default())
}
}
impl<'a, Model, Factory> Association<'a, Model, Factory> {
#[doc(hidden)]
pub fn new_model(inner: &'a Model) -> Self {
Association::Model(inner)
}
#[doc(hidden)]
pub fn new_factory(inner: Factory) -> Self {
Association::Factory(inner)
}
}
impl<M, F> Association<'_, M, F>
where
F: Factory<Model = M> + Clone,
{
#[doc(hidden)]
pub fn | (&self, con: &F::Connection) -> F::Id {
match self {
Association::Model(model) => F::id_for_model(&model).clone(),
Association::Factory(factory) => {
let model = factory.clone().insert(con);
F::id_for_model(&model).clone()
}
}
}
}
/// A generic factory trait.
///
/// You shouldn't ever have to implement this trait yourself. It can be derived using
/// `#[derive(Factory)]`
///
/// See the [root module docs](/) for info on how to use `#[derive(Factory)]`.
pub trait Factory: Clone {
/// The model type the factory inserts.
///
/// For a factory named `UserFactory` this would probably be `User`.
type Model;
/// The primary key type your model uses.
///
/// This will normally be i32 or i64 but can be whatever you need.
type Id: Clone;
/// The database connection type you use such as `diesel::pg::PgConnection`.
type Connection;
/// Insert the factory into the database.
///
/// # Panics
/// This will panic if the insert fails. Should be fine since you want panics early in tests.
fn insert(self, con: &Self::Connection) -> Self::Model;
/// Get the primary key value for a model type.
///
/// Just a generic wrapper around `model.id`.
fn id_for_model(model: &Self::Model) -> &Self::Id;
}
static SEQUENCE_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// Utility function for generating unique ids or strings in factories.
/// Each time `sequence` gets called, the closure will receive a different number.
///
/// ```
/// use diesel_factories::sequence;
///
/// assert_ne!(
/// sequence(|i| format!("unique-string-{}", i)),
/// sequence(|i| format!("unique-string-{}", i)),
/// );
/// ```
pub fn sequence<T, F>(f: F) -> T
where
F: Fn(usize) -> T,
{
SEQUENCE_COUNTER.fetch_add(1, Ordering::SeqCst);
let count = SEQUENCE_COUNTER.load(Ordering::Relaxed);
f(count)
}
#[cfg(test)]
mod test {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_compile_pass() {
let t = trybuild::TestCases::new();
t.pass("tests/compile_pass/*.rs");
t.compile_fail("tests/compile_fail/*.rs");
}
}
| insert_returning_id | identifier_name |
msi.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import List, Dict, NamedTuple, Union, Optional
import codecs
import collections
import enum
import json
import re
import struct
from refinery.lib.structures import StructReader
from refinery.units.formats.office.xtdoc import xtdoc, UnpackResult
from refinery.lib import chunks
from refinery.lib.types import ByteStr
from refinery.lib.mime import FileMagicInfo
from refinery.lib.tools import cached_property
class MsiType(enum.IntEnum):
"""
Known data types for MSI table cell entries.
"""
Long = 0x104
Short = 0x502
Binary = 0x900
String = 0xD00
StringLocalized = 0xF00
Unknown = 0
def __str__(self):
return self.name
class MSITableColumnInfo(NamedTuple):
"""
Represents information about an MSI table column. See also:
https://doxygen.reactos.org/db/de4/msipriv_8h.html
"""
number: int
attributes: int
@property
def type(self) -> MsiType:
try:
if self.is_integer:
return MsiType(self.attributes & 0xFFF)
else:
return MsiType(self.attributes & 0xF00)
except Exception:
return MsiType.Unknown
@property
def is_integer(self) -> bool:
return self.attributes & 0x0F00 < 0x800
@property
def is_key(self) -> bool:
return self.attributes & 0x2000 == 0x2000
@property
def is_nullable(self) -> bool:
return self.attributes & 0x1000 == 0x1000
@property
def length(self) -> int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 0xFF
@property
def struct_format(self) -> str:
vt = self.type
if vt is MsiType.Long:
return 'I'
elif vt is MsiType.Short:
return 'H'
else:
return 'H'
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(F'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) -> Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
# https://learn.microsoft.com/en-us/windows/win32/msi/summary-list-of-all-custom-action-types
_CUSTOM_ACTION_TYPES = {
0x01: 'DLL file stored in a Binary table stream.',
0x02: 'EXE file stored in a Binary table stream.',
0x05: 'JScript file stored in a Binary table stream.',
0x06: 'VBScript file stored in a Binary table stream.',
0x11: 'DLL file that is installed with a product.',
0x12: 'EXE file that is installed with a product.',
0x13: 'Displays a specified error message and returns failure, terminating the installation.',
0x15: 'JScript file that is installed with a product.',
0x16: 'VBScript file that is installed with a product.',
0x22: 'EXE file having a path referencing a directory.',
0x23: 'Directory set with formatted text.',
0x25: 'JScript text stored in this sequence table.',
0x26: 'VBScript text stored in this sequence table.',
0x32: 'EXE file having a path specified by a property value.',
0x33: 'Property set with formatted text.',
0x35: 'JScript text specified by a property value.',
0x36: 'VBScript text specified by a property value.',
}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) -> str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(F'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(F'<{sc*row_count}') for sc in row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(stream('!_Columns'), 'HHHH'):
tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
tables[tbl_name][col_name] = MSITableColumnInfo(col_number, col_attributes)
table_names_given = {strings.ref(k) for k in chunks.unpack(stream('!_Tables'), 2, False)}
table_names_known = set(tables)
for name in table_names_known - table_names_given:
self.log_warn(F'table name known but not given: {name}')
for name in table_names_given - table_names_known:
self.log_warn(F'table name given but not known: {name}')
class ScriptItem(NamedTuple):
|
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
# https://learn.microsoft.com/en-us/windows/win32/msi/formatted
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return F'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(R'''(?x)
\[ # open square brackent
(?![~\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\]{}]+) # no brackets or braces
\]''', _replace, string)
if _replace.done:
break
string = re.sub(r'\[\\(.)\]', r'\1', string)
string = string.replace('[~]', '\0')
return string
for table_name, table in tables.items():
stream_name = F'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name), column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 0x80000000
elif vt is MsiType.Short:
if value != 0:
value -= 0x8000
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = F'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack(
'<IIII',
row[2] ^ 0x80000000,
row[3] ^ 0x80000000,
row[4] ^ 0x80000000,
row[5] ^ 0x80000000,
).hex()
if table_name == 'CustomAction':
code = row[1] & 0x3F
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {0x25: 'js', 0x26: 'vbs', 0x33: None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = F'Action/{path}'
if item.extension:
path = F'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = F'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in [
'[5]SummaryInformation',
'[5]DocumentSummaryInformation',
'[5]DigitalSignature',
'[5]MsiDigitalSignatureEx'
]:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(F'string reference count computed={c} provided={p}:', strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(F'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = F'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME,
json.dumps(processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(B'\xD0\xCF\x11\xE0'):
return False
return FileMagicInfo(data).extension == 'msi'
xtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)
| row_index: int
extension: Optional[str] | identifier_body |
msi.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import List, Dict, NamedTuple, Union, Optional
import codecs
import collections
import enum
import json
import re
import struct
from refinery.lib.structures import StructReader
from refinery.units.formats.office.xtdoc import xtdoc, UnpackResult
from refinery.lib import chunks
from refinery.lib.types import ByteStr
from refinery.lib.mime import FileMagicInfo
from refinery.lib.tools import cached_property
class MsiType(enum.IntEnum):
"""
Known data types for MSI table cell entries.
"""
Long = 0x104
Short = 0x502
Binary = 0x900
String = 0xD00
StringLocalized = 0xF00
Unknown = 0
def __str__(self):
return self.name
class MSITableColumnInfo(NamedTuple):
"""
Represents information about an MSI table column. See also:
https://doxygen.reactos.org/db/de4/msipriv_8h.html
"""
number: int
attributes: int
@property
def type(self) -> MsiType:
try:
if self.is_integer:
return MsiType(self.attributes & 0xFFF)
else:
return MsiType(self.attributes & 0xF00)
except Exception:
return MsiType.Unknown
@property
def is_integer(self) -> bool:
return self.attributes & 0x0F00 < 0x800
@property
def is_key(self) -> bool:
return self.attributes & 0x2000 == 0x2000
@property
def is_nullable(self) -> bool:
return self.attributes & 0x1000 == 0x1000
@property
def length(self) -> int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 0xFF
@property
def struct_format(self) -> str:
vt = self.type
if vt is MsiType.Long:
return 'I'
elif vt is MsiType.Short:
return 'H'
else:
return 'H'
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(F'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) -> Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
# https://learn.microsoft.com/en-us/windows/win32/msi/summary-list-of-all-custom-action-types
_CUSTOM_ACTION_TYPES = {
0x01: 'DLL file stored in a Binary table stream.',
0x02: 'EXE file stored in a Binary table stream.',
0x05: 'JScript file stored in a Binary table stream.',
0x06: 'VBScript file stored in a Binary table stream.',
0x11: 'DLL file that is installed with a product.',
0x12: 'EXE file that is installed with a product.',
0x13: 'Displays a specified error message and returns failure, terminating the installation.',
0x15: 'JScript file that is installed with a product.',
0x16: 'VBScript file that is installed with a product.',
0x22: 'EXE file having a path referencing a directory.',
0x23: 'Directory set with formatted text.',
0x25: 'JScript text stored in this sequence table.',
0x26: 'VBScript text stored in this sequence table.',
0x32: 'EXE file having a path specified by a property value.',
0x33: 'Property set with formatted text.',
0x35: 'JScript text specified by a property value.',
0x36: 'VBScript text specified by a property value.',
}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) -> str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(F'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(F'<{sc*row_count}') for sc in row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(stream('!_Columns'), 'HHHH'):
|
table_names_given = {strings.ref(k) for k in chunks.unpack(stream('!_Tables'), 2, False)}
table_names_known = set(tables)
for name in table_names_known - table_names_given:
self.log_warn(F'table name known but not given: {name}')
for name in table_names_given - table_names_known:
self.log_warn(F'table name given but not known: {name}')
class ScriptItem(NamedTuple):
row_index: int
extension: Optional[str]
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
# https://learn.microsoft.com/en-us/windows/win32/msi/formatted
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return F'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(R'''(?x)
\[ # open square brackent
(?![~\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\]{}]+) # no brackets or braces
\]''', _replace, string)
if _replace.done:
break
string = re.sub(r'\[\\(.)\]', r'\1', string)
string = string.replace('[~]', '\0')
return string
for table_name, table in tables.items():
stream_name = F'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name), column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 0x80000000
elif vt is MsiType.Short:
if value != 0:
value -= 0x8000
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = F'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack(
'<IIII',
row[2] ^ 0x80000000,
row[3] ^ 0x80000000,
row[4] ^ 0x80000000,
row[5] ^ 0x80000000,
).hex()
if table_name == 'CustomAction':
code = row[1] & 0x3F
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {0x25: 'js', 0x26: 'vbs', 0x33: None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = F'Action/{path}'
if item.extension:
path = F'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = F'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in [
'[5]SummaryInformation',
'[5]DocumentSummaryInformation',
'[5]DigitalSignature',
'[5]MsiDigitalSignatureEx'
]:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(F'string reference count computed={c} provided={p}:', strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(F'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = F'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME,
json.dumps(processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(B'\xD0\xCF\x11\xE0'):
return False
return FileMagicInfo(data).extension == 'msi'
xtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)
| tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
tables[tbl_name][col_name] = MSITableColumnInfo(col_number, col_attributes) | conditional_block |
msi.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import List, Dict, NamedTuple, Union, Optional
import codecs
import collections
import enum
import json
import re
import struct
from refinery.lib.structures import StructReader
from refinery.units.formats.office.xtdoc import xtdoc, UnpackResult
from refinery.lib import chunks
from refinery.lib.types import ByteStr
from refinery.lib.mime import FileMagicInfo
from refinery.lib.tools import cached_property
class MsiType(enum.IntEnum):
"""
Known data types for MSI table cell entries.
"""
Long = 0x104
Short = 0x502
Binary = 0x900
String = 0xD00
StringLocalized = 0xF00
Unknown = 0
def __str__(self):
return self.name
class MSITableColumnInfo(NamedTuple):
"""
Represents information about an MSI table column. See also:
https://doxygen.reactos.org/db/de4/msipriv_8h.html
"""
number: int
attributes: int
@property
def type(self) -> MsiType:
try:
if self.is_integer:
return MsiType(self.attributes & 0xFFF)
else:
return MsiType(self.attributes & 0xF00)
except Exception:
return MsiType.Unknown
@property
def is_integer(self) -> bool:
return self.attributes & 0x0F00 < 0x800
@property
def is_key(self) -> bool:
return self.attributes & 0x2000 == 0x2000
@property
def is_nullable(self) -> bool:
return self.attributes & 0x1000 == 0x1000
@property
def length(self) -> int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 0xFF
@property
def struct_format(self) -> str:
vt = self.type
if vt is MsiType.Long:
return 'I'
elif vt is MsiType.Short:
return 'H'
else:
return 'H'
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(F'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) -> Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
# https://learn.microsoft.com/en-us/windows/win32/msi/summary-list-of-all-custom-action-types
_CUSTOM_ACTION_TYPES = {
0x01: 'DLL file stored in a Binary table stream.',
0x02: 'EXE file stored in a Binary table stream.',
0x05: 'JScript file stored in a Binary table stream.',
0x06: 'VBScript file stored in a Binary table stream.',
0x11: 'DLL file that is installed with a product.',
0x12: 'EXE file that is installed with a product.',
0x13: 'Displays a specified error message and returns failure, terminating the installation.',
0x15: 'JScript file that is installed with a product.',
0x16: 'VBScript file that is installed with a product.',
0x22: 'EXE file having a path referencing a directory.',
0x23: 'Directory set with formatted text.',
0x25: 'JScript text stored in this sequence table.',
0x26: 'VBScript text stored in this sequence table.',
0x32: 'EXE file having a path specified by a property value.',
0x33: 'Property set with formatted text.',
0x35: 'JScript text specified by a property value.',
0x36: 'VBScript text specified by a property value.',
}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) -> str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(F'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(F'<{sc*row_count}') for sc in row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(stream('!_Columns'), 'HHHH'):
tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
| table_names_given = {strings.ref(k) for k in chunks.unpack(stream('!_Tables'), 2, False)}
table_names_known = set(tables)
for name in table_names_known - table_names_given:
self.log_warn(F'table name known but not given: {name}')
for name in table_names_given - table_names_known:
self.log_warn(F'table name given but not known: {name}')
class ScriptItem(NamedTuple):
row_index: int
extension: Optional[str]
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
# https://learn.microsoft.com/en-us/windows/win32/msi/formatted
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return F'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(R'''(?x)
\[ # open square brackent
(?![~\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\]{}]+) # no brackets or braces
\]''', _replace, string)
if _replace.done:
break
string = re.sub(r'\[\\(.)\]', r'\1', string)
string = string.replace('[~]', '\0')
return string
for table_name, table in tables.items():
stream_name = F'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name), column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 0x80000000
elif vt is MsiType.Short:
if value != 0:
value -= 0x8000
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = F'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack(
'<IIII',
row[2] ^ 0x80000000,
row[3] ^ 0x80000000,
row[4] ^ 0x80000000,
row[5] ^ 0x80000000,
).hex()
if table_name == 'CustomAction':
code = row[1] & 0x3F
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {0x25: 'js', 0x26: 'vbs', 0x33: None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = F'Action/{path}'
if item.extension:
path = F'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = F'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in [
'[5]SummaryInformation',
'[5]DocumentSummaryInformation',
'[5]DigitalSignature',
'[5]MsiDigitalSignatureEx'
]:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(F'string reference count computed={c} provided={p}:', strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(F'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = F'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME,
json.dumps(processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(B'\xD0\xCF\x11\xE0'):
return False
return FileMagicInfo(data).extension == 'msi'
xtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME) | tables[tbl_name][col_name] = MSITableColumnInfo(col_number, col_attributes)
| random_line_split |
msi.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import List, Dict, NamedTuple, Union, Optional
import codecs
import collections
import enum
import json
import re
import struct
from refinery.lib.structures import StructReader
from refinery.units.formats.office.xtdoc import xtdoc, UnpackResult
from refinery.lib import chunks
from refinery.lib.types import ByteStr
from refinery.lib.mime import FileMagicInfo
from refinery.lib.tools import cached_property
class MsiType(enum.IntEnum):
"""
Known data types for MSI table cell entries.
"""
Long = 0x104
Short = 0x502
Binary = 0x900
String = 0xD00
StringLocalized = 0xF00
Unknown = 0
def __str__(self):
return self.name
class MSITableColumnInfo(NamedTuple):
"""
Represents information about an MSI table column. See also:
https://doxygen.reactos.org/db/de4/msipriv_8h.html
"""
number: int
attributes: int
@property
def type(self) -> MsiType:
try:
if self.is_integer:
return MsiType(self.attributes & 0xFFF)
else:
return MsiType(self.attributes & 0xF00)
except Exception:
return MsiType.Unknown
@property
def is_integer(self) -> bool:
return self.attributes & 0x0F00 < 0x800
@property
def is_key(self) -> bool:
return self.attributes & 0x2000 == 0x2000
@property
def | (self) -> bool:
return self.attributes & 0x1000 == 0x1000
@property
def length(self) -> int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 0xFF
@property
def struct_format(self) -> str:
vt = self.type
if vt is MsiType.Long:
return 'I'
elif vt is MsiType.Short:
return 'H'
else:
return 'H'
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(F'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) -> Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
# https://learn.microsoft.com/en-us/windows/win32/msi/summary-list-of-all-custom-action-types
_CUSTOM_ACTION_TYPES = {
0x01: 'DLL file stored in a Binary table stream.',
0x02: 'EXE file stored in a Binary table stream.',
0x05: 'JScript file stored in a Binary table stream.',
0x06: 'VBScript file stored in a Binary table stream.',
0x11: 'DLL file that is installed with a product.',
0x12: 'EXE file that is installed with a product.',
0x13: 'Displays a specified error message and returns failure, terminating the installation.',
0x15: 'JScript file that is installed with a product.',
0x16: 'VBScript file that is installed with a product.',
0x22: 'EXE file having a path referencing a directory.',
0x23: 'Directory set with formatted text.',
0x25: 'JScript text stored in this sequence table.',
0x26: 'VBScript text stored in this sequence table.',
0x32: 'EXE file having a path specified by a property value.',
0x33: 'Property set with formatted text.',
0x35: 'JScript text specified by a property value.',
0x36: 'VBScript text specified by a property value.',
}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) -> str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(F'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(F'<{sc*row_count}') for sc in row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(stream('!_Columns'), 'HHHH'):
tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
tables[tbl_name][col_name] = MSITableColumnInfo(col_number, col_attributes)
table_names_given = {strings.ref(k) for k in chunks.unpack(stream('!_Tables'), 2, False)}
table_names_known = set(tables)
for name in table_names_known - table_names_given:
self.log_warn(F'table name known but not given: {name}')
for name in table_names_given - table_names_known:
self.log_warn(F'table name given but not known: {name}')
class ScriptItem(NamedTuple):
row_index: int
extension: Optional[str]
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
# https://learn.microsoft.com/en-us/windows/win32/msi/formatted
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return F'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(R'''(?x)
\[ # open square brackent
(?![~\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\]{}]+) # no brackets or braces
\]''', _replace, string)
if _replace.done:
break
string = re.sub(r'\[\\(.)\]', r'\1', string)
string = string.replace('[~]', '\0')
return string
for table_name, table in tables.items():
stream_name = F'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name), column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 0x80000000
elif vt is MsiType.Short:
if value != 0:
value -= 0x8000
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = F'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack(
'<IIII',
row[2] ^ 0x80000000,
row[3] ^ 0x80000000,
row[4] ^ 0x80000000,
row[5] ^ 0x80000000,
).hex()
if table_name == 'CustomAction':
code = row[1] & 0x3F
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {0x25: 'js', 0x26: 'vbs', 0x33: None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = F'Action/{path}'
if item.extension:
path = F'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = F'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in [
'[5]SummaryInformation',
'[5]DocumentSummaryInformation',
'[5]DigitalSignature',
'[5]MsiDigitalSignatureEx'
]:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(F'string reference count computed={c} provided={p}:', strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(F'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = F'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME,
json.dumps(processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(B'\xD0\xCF\x11\xE0'):
return False
return FileMagicInfo(data).extension == 'msi'
xtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)
| is_nullable | identifier_name |
readpanel.py | import sys
import glob
import os
import json
import struct
import time
import threading
import re
import Queue
import serial
import serial.tools.list_ports
import readline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import math
import rlcompleter
import datetime
def showdata(y):
SampleRate = 20
nplot = 200
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("Data")
ax1.set_xlabel('ns')
ax1.set_ylabel('ADC counts')
x=range(0,SampleRate*len(y),SampleRate)
# y2 = y[:]
# for i in range(len(y)):
# y[i] = y[i] + np.cos((x[i]-10200)/2400.)*95-1290
ax1.plot(x,y)
ax1.get_yaxis().get_major_formatter().set_useOffset(False)
# ax1.plot(x,y2)
# leg = ax1.legend()
plt.show()
plt.close(fig)
def | (filename,samples,triggers,display,plot):
global dcvoltages
fout = open(os.path.join(rundir,filename),"r")
allts = []
deltats = []
data = fout.read()
if len(data) < 12:
print "No data found"
return
data = data.split('\n')
data = filter(lambda x: len(x)>0, data)
data = [(int(i,16) & 0xFFF) for i in data]
print len(data),"total words, expected",(samples+12)*triggers
T1 = []
adc1 = []
deltaT1 = []
deltaT2 = []
if True:
gtimeold = -1
htimeold = -1
ctimeold = -1
latestold = 0
for itrig in range(triggers):
if (itrig+1)*(samples+12) > len(data):
break
tdata = data[itrig*(samples+12):(itrig+1)*(samples+12)]
channel = (tdata[3] >> 6) & 0x3F
gtime = ((tdata[0]<<24) + (tdata[1]<<12) + (tdata[2]<<0))
htime = (((tdata[3]&0xF)<<24) + (tdata[4]<<12) + (tdata[5]<<0))
ctime = (((tdata[6]&0xF)<<24) + (tdata[7]<<12) + (tdata[8]<<0))
htime = (htime & 0x3FFFFF00) + (0xFF - (htime&0xFF));
ctime = (ctime & 0x3FFFFF00) + (0xFF - (ctime&0xFF));
hvfired = 0
calfired = 0
if htime > 0 and htime != htimeold:
hvfired = 1
if ctime > 0 and ctime != ctimeold:
calfired = 1
adc1 = tdata[12:]
T1.append(htime)
if hvfired:
hval = htime
else:
hval = " X "
if calfired:
cval = ctime
else:
cval = " X "
if hvfired and calfired:
hvcaldeltat = (ctime-htime)*15.625*10**-3 # in ns
else:
hvcaldeltat = " X "
if gtimeold < 0:
gtimeold = gtime
htimeold = htime
ctimeold = ctime
if (htime >= ctime or not hvfired) and hvfired:
latestold = htime
elif (ctime >= htime or not calfired) and calfired:
latestold = ctime
mindeltat = " X "
else:
if (htime >= ctime or not hvfired) and calfired:
mindeltat = ctime - latestold
if gtime != gtimeold:
mindeltat += (gtime-gtimeold)*2**28
mindeltat *= 15.625*10**-6 # in us
latestold = htime
elif (ctime >= htime or not calfired) and hvfired:
mindeltat = htime - latestold
if gtime != gtimeold:
mindeltat += (gtime-gtimeold)*2**28
mindeltat *= 15.625*10**-6 # in us
latestold = ctime
else:
mindeltat = " X "
if display:
# print "Channel",channel,"- time since last:",mindeltat,"deltat:",hvcaldeltat,"gtime:",gtime,"htime:",hval,"ctime:",cval
dcvoltage = (-750. + np.mean(adc1)*1500./4096.)/10
print "mean ped=",np.mean(adc1)," rms=",np.std(adc1), " voltage=",dcvoltage
dcvoltages.append(dcvoltage)
if plot:
showdata(adc1)
adc1 = []
gtimeold = gtime
htimeold = htime
ctimeold = ctime
def serialread(ser):
global fout
global mode
global gainsettings
global threshsettings
global calenvdata
global hvenvdata
outputmode = 0
while True:
# Print data from serial to screen
try:
data = ser.readline()
if data:
outdata = data.split("\n")
for outline in outdata:
# normal printout
if outputmode == 0:
if outline.startswith("start"):
outputmode = 1
else:
if len(outline) > 0:
print " ",outline
if outline.find("gain/threshold")>0:
gainsettings[int(outline.split()[1])] = int(outline.split()[5])
threshsettings[int(outline.split()[1])] = int(outline.split()[6])
if outline.startswith("CAL")>0:
calenvdata = outline.split()[-3:]
if outline.startswith("HV")>0:
hvenvdata = outline.split()[-3:]
if outline.startswith("FAILED") or outline.startswith("SUCCESS") or outline.startswith("Channel 15") or outline.startswith("HV"):
return 0
# data taking mode
elif outputmode == 1:
if outline.startswith("end"):
outputmode = 0
if fout:
fout.close()
print "File closed"
fout = 0
else:
if fout:
# print "RECIEVED",len(outline)
fout.write(outline.replace(" ","\n"))
except serial.serialutil.SerialException:
ser.close()
print "ARM disconnected"
print ""
ser = None
def get_key_value(keys, flag, default = "0"):
index = 1
while True:
if index >= len(keys) - 1:
return default
if keys[index] == ("-" + flag):
return keys[index+1]
index += 1
def readstrawcmdthresh(strawnumber,samples,clock):
chan_mask = (0x1 << strawnumber)
if strawnumber == 0:
chan_mask = 0xFFFF
adc_mode = 0
tdc_mode = 1
lookback = 2
triggers=1
pulser=1
delay=1
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',20)
data += struct.pack('B',10)
data += struct.pack('B',adc_mode)
data += struct.pack('B',tdc_mode)
data += struct.pack('H',lookback)
data += struct.pack('H',samples)
data += struct.pack('I',triggers)
data += struct.pack('H',chan_mask)
data += struct.pack('B',clock)
data += struct.pack('B',pulser)
data += struct.pack('H',delay)
return data
def readstrawcmd(tdc_mode, channel_read):
# chan_mask = 0xC030 #channels 4,5,14,15
# chan_mask = 0x30 #channels 4,5
# channel_read = 4
chan_mask = 0
chan_mask |= (0x1 << channel_read)
adc_mode = 0
# tdc_mode = 0
lookback = 4
samples = 10
clock = 1
triggers=400
pulser=0
delay=30
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',20)
data += struct.pack('B',10)
data += struct.pack('B',adc_mode)
data += struct.pack('B',tdc_mode)
data += struct.pack('H',lookback)
data += struct.pack('H',samples)
data += struct.pack('I',triggers)
data += struct.pack('H',chan_mask)
data += struct.pack('B',clock)
data += struct.pack('B',pulser)
data += struct.pack('H',delay)
return data
def thresholdset(channel,dvalue):
chan_mask=0
chan_mask |= (0x1 << channel)
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',0x8)
data += struct.pack('B',0x1)
data += struct.pack('H',chan_mask)
data += struct.pack('H',dvalue)
return data
def gainset(channel,dvalue):
chan_mask=0
chan_mask |= (0x1 << channel)
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',0x8)
data += struct.pack('B',0x0)
data += struct.pack('H',channel)
data += struct.pack('H',dvalue)
return data
def dump_settings():
command_id = 7
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',command_id)
return data
def readSPI():
data= ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',10)
return data
def readSensors():
data= ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',11)
return data
def main():
global dcvoltages
global fout
global mode
global gainsettings
global threshsettings
global calenvdata
global hvenvdata
global rundir
serial_ports = [
p[0]
for p in serial.tools.list_ports.comports()
if 'ttyUSB' in p[0]
]
print serial_ports
SERIALPORTROC = serial_ports[3]
SERIALPORTHV = serial_ports[2]
SERIALPORTCAL = serial_ports[1]
print SERIALPORTROC
SERIALRATE = 921600
TIMEOUT = 1
PREFIX = "run"
fout = 0
mode = 0
samples = 0
triggers = 0
topdir = os.path.dirname(os.path.realpath(__file__))
# if len(sys.argv) > 1:
# rundir = sys.argv[1]
# else:
rundir = os.path.join(topdir,'data/noise_180425_json_singles')
if not os.path.exists(rundir):
os.makedirs(rundir)
files = glob.glob(rundir + "/" + PREFIX + "_*.txt")
lastrun = -1
for fn in files:
num = int(fn.split("_")[-1].split(".txt")[0])
if num > lastrun:
lastrun = num
print "Waiting for ARM to connect"
print "=========================="
serroc = serial.Serial(SERIALPORTROC,SERIALRATE, timeout = TIMEOUT)
sercal = serial.Serial(SERIALPORTCAL,SERIALRATE, timeout = TIMEOUT)
serhv = serial.Serial(SERIALPORTHV,SERIALRATE, timeout = TIMEOUT)
print "ARMs connected"
samples=2000
# keyboard input loop
try:
channels = [4,5,94,95]
gainsCal = [63,63,63,63]
gainsHV = [64,65,64,65]
threshCal = [83,78,84,79]
threshHV = [81,81,86,84]
i_channel_read = 0 # id of channel to be read. this will run from 0 to len(channels)
dcvoltages=[]
gainsettings=[]
threshsettings=[]
calenvdata=[]
hvenvdata=[]
for i in range (16):
gainsettings.append(0)
threshsettings.append(0)
if (lastrun+1) % 1 == 0: ## Define how often thresholds and temps will be read
cmd = readSPI()
serroc.write(cmd)
serroc.readline()
serroc.readline()
strawmask = 0xFF
# iterate over all declared channels
for ichannel in range( len(channels) ):
strawnumber = channels[ichannel]
for iside in range(3):
if iside == 0: #zero the cal side
cmd=gainset(strawnumber,0)
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,gainsHV[ichannel])
serroc.write(cmd)
print serroc.readline()
elif iside == 1: #zero the hv side
cmd=gainset(strawnumber,gainsCal[ichannel])
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,0)
serroc.write(cmd)
print serroc.readline()
elif iside == 2: #set both to default
cmd=gainset(strawnumber,gainsCal[ichannel])
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,gainsHV[ichannel])
serroc.write(cmd)
print serroc.readline()
filename = 'threshdata.txt'
mode=1
# if strawnumber==2 or strawnumber==4:
# mode = 0
if strawnumber>90:
cmd = readstrawcmdthresh(strawnumber-80,samples,mode)
else:
cmd = readstrawcmdthresh(strawnumber,samples,mode)
if strawnumber==0:
sercal.write(cmd)
else:
serhv.write(cmd)
fout = open(os.path.join(rundir,filename),"w")
if strawnumber==0:
serialread(sercal)
else:
serialread(serhv)
plotfile(filename,samples,1,1,0)
print "==========================="
cmd = readSensors()
serroc.write(cmd)
serialread(serroc)
print calenvdata
print hvenvdata
timestamp=str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
run_settings={'gain':gainsettings, 'threshold':threshsettings, 'threshmv':dcvoltages}
env_data = {'time':timestamp,'calenv':calenvdata, 'hvenv':hvenvdata}
if (lastrun+1)%3==0:
tdc_mode=0 #coincidence
elif (lastrun+1)%3==1:
tdc_mode=1 #Cal
elif (lastrun+1)%3==2:
tdc_mode=2 #HV
if (lastrun+1)% (3*len(channels))<3:
i_channel_read = 0
elif (lastrun+1)% (3*len(channels))<6:
i_channel_read = 1
elif (lastrun+1)% (3*len(channels))<9:
i_channel_read = 2
else:
i_channel_read = 3
channel_read = channels[i_channel_read]
print 'i_channel_read = ', i_channel_read
print 'tdc mode = ', tdc_mode
if channel_read>16:
channel_read -= 80
cmd = readstrawcmd(tdc_mode, channel_read)
serhv.write(cmd)
print "OPENING FILE"
next_runname = PREFIX + "_" + str(lastrun+1) + ".txt"
filename = next_runname
fout = open(os.path.join(rundir,filename),"w")
json.dump(run_settings, fout)
json.dump(env_data,fout)
fout.write("\n")
serialread(serhv)
serroc.close()
serhv.close()
sercal.close()
print 'all done in this run'
lastrun += 1
except Exception, e:
print type(e),e
finally:
print 'Ending...'
if __name__ == "__main__":
main()
| plotfile | identifier_name |
readpanel.py | import sys
import glob
import os
import json
import struct
import time
import threading
import re
import Queue
import serial
import serial.tools.list_ports
import readline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import math
import rlcompleter
import datetime
def showdata(y):
SampleRate = 20
nplot = 200
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("Data")
ax1.set_xlabel('ns')
ax1.set_ylabel('ADC counts')
x=range(0,SampleRate*len(y),SampleRate)
# y2 = y[:]
# for i in range(len(y)):
# y[i] = y[i] + np.cos((x[i]-10200)/2400.)*95-1290
ax1.plot(x,y)
ax1.get_yaxis().get_major_formatter().set_useOffset(False)
# ax1.plot(x,y2)
# leg = ax1.legend()
plt.show()
plt.close(fig)
def plotfile(filename,samples,triggers,display,plot):
|
def serialread(ser):
global fout
global mode
global gainsettings
global threshsettings
global calenvdata
global hvenvdata
outputmode = 0
while True:
# Print data from serial to screen
try:
data = ser.readline()
if data:
outdata = data.split("\n")
for outline in outdata:
# normal printout
if outputmode == 0:
if outline.startswith("start"):
outputmode = 1
else:
if len(outline) > 0:
print " ",outline
if outline.find("gain/threshold")>0:
gainsettings[int(outline.split()[1])] = int(outline.split()[5])
threshsettings[int(outline.split()[1])] = int(outline.split()[6])
if outline.startswith("CAL")>0:
calenvdata = outline.split()[-3:]
if outline.startswith("HV")>0:
hvenvdata = outline.split()[-3:]
if outline.startswith("FAILED") or outline.startswith("SUCCESS") or outline.startswith("Channel 15") or outline.startswith("HV"):
return 0
# data taking mode
elif outputmode == 1:
if outline.startswith("end"):
outputmode = 0
if fout:
fout.close()
print "File closed"
fout = 0
else:
if fout:
# print "RECIEVED",len(outline)
fout.write(outline.replace(" ","\n"))
except serial.serialutil.SerialException:
ser.close()
print "ARM disconnected"
print ""
ser = None
def get_key_value(keys, flag, default = "0"):
index = 1
while True:
if index >= len(keys) - 1:
return default
if keys[index] == ("-" + flag):
return keys[index+1]
index += 1
def readstrawcmdthresh(strawnumber,samples,clock):
chan_mask = (0x1 << strawnumber)
if strawnumber == 0:
chan_mask = 0xFFFF
adc_mode = 0
tdc_mode = 1
lookback = 2
triggers=1
pulser=1
delay=1
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',20)
data += struct.pack('B',10)
data += struct.pack('B',adc_mode)
data += struct.pack('B',tdc_mode)
data += struct.pack('H',lookback)
data += struct.pack('H',samples)
data += struct.pack('I',triggers)
data += struct.pack('H',chan_mask)
data += struct.pack('B',clock)
data += struct.pack('B',pulser)
data += struct.pack('H',delay)
return data
def readstrawcmd(tdc_mode, channel_read):
# chan_mask = 0xC030 #channels 4,5,14,15
# chan_mask = 0x30 #channels 4,5
# channel_read = 4
chan_mask = 0
chan_mask |= (0x1 << channel_read)
adc_mode = 0
# tdc_mode = 0
lookback = 4
samples = 10
clock = 1
triggers=400
pulser=0
delay=30
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',20)
data += struct.pack('B',10)
data += struct.pack('B',adc_mode)
data += struct.pack('B',tdc_mode)
data += struct.pack('H',lookback)
data += struct.pack('H',samples)
data += struct.pack('I',triggers)
data += struct.pack('H',chan_mask)
data += struct.pack('B',clock)
data += struct.pack('B',pulser)
data += struct.pack('H',delay)
return data
def thresholdset(channel,dvalue):
chan_mask=0
chan_mask |= (0x1 << channel)
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',0x8)
data += struct.pack('B',0x1)
data += struct.pack('H',chan_mask)
data += struct.pack('H',dvalue)
return data
def gainset(channel,dvalue):
chan_mask=0
chan_mask |= (0x1 << channel)
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',0x8)
data += struct.pack('B',0x0)
data += struct.pack('H',channel)
data += struct.pack('H',dvalue)
return data
def dump_settings():
command_id = 7
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',command_id)
return data
def readSPI():
data= ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',10)
return data
def readSensors():
data= ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',11)
return data
def main():
global dcvoltages
global fout
global mode
global gainsettings
global threshsettings
global calenvdata
global hvenvdata
global rundir
serial_ports = [
p[0]
for p in serial.tools.list_ports.comports()
if 'ttyUSB' in p[0]
]
print serial_ports
SERIALPORTROC = serial_ports[3]
SERIALPORTHV = serial_ports[2]
SERIALPORTCAL = serial_ports[1]
print SERIALPORTROC
SERIALRATE = 921600
TIMEOUT = 1
PREFIX = "run"
fout = 0
mode = 0
samples = 0
triggers = 0
topdir = os.path.dirname(os.path.realpath(__file__))
# if len(sys.argv) > 1:
# rundir = sys.argv[1]
# else:
rundir = os.path.join(topdir,'data/noise_180425_json_singles')
if not os.path.exists(rundir):
os.makedirs(rundir)
files = glob.glob(rundir + "/" + PREFIX + "_*.txt")
lastrun = -1
for fn in files:
num = int(fn.split("_")[-1].split(".txt")[0])
if num > lastrun:
lastrun = num
print "Waiting for ARM to connect"
print "=========================="
serroc = serial.Serial(SERIALPORTROC,SERIALRATE, timeout = TIMEOUT)
sercal = serial.Serial(SERIALPORTCAL,SERIALRATE, timeout = TIMEOUT)
serhv = serial.Serial(SERIALPORTHV,SERIALRATE, timeout = TIMEOUT)
print "ARMs connected"
samples=2000
# keyboard input loop
try:
channels = [4,5,94,95]
gainsCal = [63,63,63,63]
gainsHV = [64,65,64,65]
threshCal = [83,78,84,79]
threshHV = [81,81,86,84]
i_channel_read = 0 # id of channel to be read. this will run from 0 to len(channels)
dcvoltages=[]
gainsettings=[]
threshsettings=[]
calenvdata=[]
hvenvdata=[]
for i in range (16):
gainsettings.append(0)
threshsettings.append(0)
if (lastrun+1) % 1 == 0: ## Define how often thresholds and temps will be read
cmd = readSPI()
serroc.write(cmd)
serroc.readline()
serroc.readline()
strawmask = 0xFF
# iterate over all declared channels
for ichannel in range( len(channels) ):
strawnumber = channels[ichannel]
for iside in range(3):
if iside == 0: #zero the cal side
cmd=gainset(strawnumber,0)
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,gainsHV[ichannel])
serroc.write(cmd)
print serroc.readline()
elif iside == 1: #zero the hv side
cmd=gainset(strawnumber,gainsCal[ichannel])
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,0)
serroc.write(cmd)
print serroc.readline()
elif iside == 2: #set both to default
cmd=gainset(strawnumber,gainsCal[ichannel])
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,gainsHV[ichannel])
serroc.write(cmd)
print serroc.readline()
filename = 'threshdata.txt'
mode=1
# if strawnumber==2 or strawnumber==4:
# mode = 0
if strawnumber>90:
cmd = readstrawcmdthresh(strawnumber-80,samples,mode)
else:
cmd = readstrawcmdthresh(strawnumber,samples,mode)
if strawnumber==0:
sercal.write(cmd)
else:
serhv.write(cmd)
fout = open(os.path.join(rundir,filename),"w")
if strawnumber==0:
serialread(sercal)
else:
serialread(serhv)
plotfile(filename,samples,1,1,0)
print "==========================="
cmd = readSensors()
serroc.write(cmd)
serialread(serroc)
print calenvdata
print hvenvdata
timestamp=str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
run_settings={'gain':gainsettings, 'threshold':threshsettings, 'threshmv':dcvoltages}
env_data = {'time':timestamp,'calenv':calenvdata, 'hvenv':hvenvdata}
if (lastrun+1)%3==0:
tdc_mode=0 #coincidence
elif (lastrun+1)%3==1:
tdc_mode=1 #Cal
elif (lastrun+1)%3==2:
tdc_mode=2 #HV
if (lastrun+1)% (3*len(channels))<3:
i_channel_read = 0
elif (lastrun+1)% (3*len(channels))<6:
i_channel_read = 1
elif (lastrun+1)% (3*len(channels))<9:
i_channel_read = 2
else:
i_channel_read = 3
channel_read = channels[i_channel_read]
print 'i_channel_read = ', i_channel_read
print 'tdc mode = ', tdc_mode
if channel_read>16:
channel_read -= 80
cmd = readstrawcmd(tdc_mode, channel_read)
serhv.write(cmd)
print "OPENING FILE"
next_runname = PREFIX + "_" + str(lastrun+1) + ".txt"
filename = next_runname
fout = open(os.path.join(rundir,filename),"w")
json.dump(run_settings, fout)
json.dump(env_data,fout)
fout.write("\n")
serialread(serhv)
serroc.close()
serhv.close()
sercal.close()
print 'all done in this run'
lastrun += 1
except Exception, e:
print type(e),e
finally:
print 'Ending...'
if __name__ == "__main__":
main()
| global dcvoltages
fout = open(os.path.join(rundir,filename),"r")
allts = []
deltats = []
data = fout.read()
if len(data) < 12:
print "No data found"
return
data = data.split('\n')
data = filter(lambda x: len(x)>0, data)
data = [(int(i,16) & 0xFFF) for i in data]
print len(data),"total words, expected",(samples+12)*triggers
T1 = []
adc1 = []
deltaT1 = []
deltaT2 = []
if True:
gtimeold = -1
htimeold = -1
ctimeold = -1
latestold = 0
for itrig in range(triggers):
if (itrig+1)*(samples+12) > len(data):
break
tdata = data[itrig*(samples+12):(itrig+1)*(samples+12)]
channel = (tdata[3] >> 6) & 0x3F
gtime = ((tdata[0]<<24) + (tdata[1]<<12) + (tdata[2]<<0))
htime = (((tdata[3]&0xF)<<24) + (tdata[4]<<12) + (tdata[5]<<0))
ctime = (((tdata[6]&0xF)<<24) + (tdata[7]<<12) + (tdata[8]<<0))
htime = (htime & 0x3FFFFF00) + (0xFF - (htime&0xFF));
ctime = (ctime & 0x3FFFFF00) + (0xFF - (ctime&0xFF));
hvfired = 0
calfired = 0
if htime > 0 and htime != htimeold:
hvfired = 1
if ctime > 0 and ctime != ctimeold:
calfired = 1
adc1 = tdata[12:]
T1.append(htime)
if hvfired:
hval = htime
else:
hval = " X "
if calfired:
cval = ctime
else:
cval = " X "
if hvfired and calfired:
hvcaldeltat = (ctime-htime)*15.625*10**-3 # in ns
else:
hvcaldeltat = " X "
if gtimeold < 0:
gtimeold = gtime
htimeold = htime
ctimeold = ctime
if (htime >= ctime or not hvfired) and hvfired:
latestold = htime
elif (ctime >= htime or not calfired) and calfired:
latestold = ctime
mindeltat = " X "
else:
if (htime >= ctime or not hvfired) and calfired:
mindeltat = ctime - latestold
if gtime != gtimeold:
mindeltat += (gtime-gtimeold)*2**28
mindeltat *= 15.625*10**-6 # in us
latestold = htime
elif (ctime >= htime or not calfired) and hvfired:
mindeltat = htime - latestold
if gtime != gtimeold:
mindeltat += (gtime-gtimeold)*2**28
mindeltat *= 15.625*10**-6 # in us
latestold = ctime
else:
mindeltat = " X "
if display:
# print "Channel",channel,"- time since last:",mindeltat,"deltat:",hvcaldeltat,"gtime:",gtime,"htime:",hval,"ctime:",cval
dcvoltage = (-750. + np.mean(adc1)*1500./4096.)/10
print "mean ped=",np.mean(adc1)," rms=",np.std(adc1), " voltage=",dcvoltage
dcvoltages.append(dcvoltage)
if plot:
showdata(adc1)
adc1 = []
gtimeold = gtime
htimeold = htime
ctimeold = ctime | identifier_body |
readpanel.py | import sys
import glob
import os
import json
import struct
import time
import threading
import re
import Queue
import serial
import serial.tools.list_ports
import readline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import math
import rlcompleter
import datetime
def showdata(y):
SampleRate = 20
nplot = 200
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("Data")
ax1.set_xlabel('ns')
ax1.set_ylabel('ADC counts')
x=range(0,SampleRate*len(y),SampleRate)
# y2 = y[:]
# for i in range(len(y)):
# y[i] = y[i] + np.cos((x[i]-10200)/2400.)*95-1290
ax1.plot(x,y)
ax1.get_yaxis().get_major_formatter().set_useOffset(False)
# ax1.plot(x,y2)
# leg = ax1.legend()
plt.show()
plt.close(fig)
def plotfile(filename,samples,triggers,display,plot):
global dcvoltages
fout = open(os.path.join(rundir,filename),"r")
allts = []
deltats = []
data = fout.read()
if len(data) < 12:
print "No data found"
return
data = data.split('\n')
data = filter(lambda x: len(x)>0, data)
data = [(int(i,16) & 0xFFF) for i in data]
print len(data),"total words, expected",(samples+12)*triggers
T1 = []
adc1 = []
deltaT1 = []
deltaT2 = []
if True:
gtimeold = -1
htimeold = -1
ctimeold = -1
latestold = 0
for itrig in range(triggers):
if (itrig+1)*(samples+12) > len(data):
break
tdata = data[itrig*(samples+12):(itrig+1)*(samples+12)]
channel = (tdata[3] >> 6) & 0x3F
gtime = ((tdata[0]<<24) + (tdata[1]<<12) + (tdata[2]<<0))
htime = (((tdata[3]&0xF)<<24) + (tdata[4]<<12) + (tdata[5]<<0))
ctime = (((tdata[6]&0xF)<<24) + (tdata[7]<<12) + (tdata[8]<<0))
htime = (htime & 0x3FFFFF00) + (0xFF - (htime&0xFF));
ctime = (ctime & 0x3FFFFF00) + (0xFF - (ctime&0xFF));
hvfired = 0
calfired = 0
if htime > 0 and htime != htimeold:
hvfired = 1
if ctime > 0 and ctime != ctimeold:
calfired = 1
adc1 = tdata[12:]
T1.append(htime)
if hvfired:
hval = htime
else:
hval = " X "
if calfired:
cval = ctime
else:
cval = " X "
if hvfired and calfired:
hvcaldeltat = (ctime-htime)*15.625*10**-3 # in ns
else:
hvcaldeltat = " X "
if gtimeold < 0:
gtimeold = gtime
htimeold = htime
ctimeold = ctime
if (htime >= ctime or not hvfired) and hvfired:
latestold = htime
elif (ctime >= htime or not calfired) and calfired:
latestold = ctime
mindeltat = " X "
else:
if (htime >= ctime or not hvfired) and calfired:
mindeltat = ctime - latestold
if gtime != gtimeold:
mindeltat += (gtime-gtimeold)*2**28
mindeltat *= 15.625*10**-6 # in us
latestold = htime
elif (ctime >= htime or not calfired) and hvfired:
mindeltat = htime - latestold
if gtime != gtimeold:
mindeltat += (gtime-gtimeold)*2**28
mindeltat *= 15.625*10**-6 # in us
latestold = ctime
else:
mindeltat = " X "
if display:
# print "Channel",channel,"- time since last:",mindeltat,"deltat:",hvcaldeltat,"gtime:",gtime,"htime:",hval,"ctime:",cval
dcvoltage = (-750. + np.mean(adc1)*1500./4096.)/10
print "mean ped=",np.mean(adc1)," rms=",np.std(adc1), " voltage=",dcvoltage
dcvoltages.append(dcvoltage)
if plot:
showdata(adc1)
adc1 = []
gtimeold = gtime
htimeold = htime
ctimeold = ctime
def serialread(ser):
global fout
global mode
global gainsettings
global threshsettings
global calenvdata
global hvenvdata
outputmode = 0
while True:
# Print data from serial to screen
try:
data = ser.readline()
if data:
outdata = data.split("\n")
for outline in outdata:
# normal printout
if outputmode == 0:
if outline.startswith("start"):
outputmode = 1
else:
if len(outline) > 0:
print " ",outline
if outline.find("gain/threshold")>0:
gainsettings[int(outline.split()[1])] = int(outline.split()[5])
threshsettings[int(outline.split()[1])] = int(outline.split()[6])
if outline.startswith("CAL")>0:
|
if outline.startswith("HV")>0:
hvenvdata = outline.split()[-3:]
if outline.startswith("FAILED") or outline.startswith("SUCCESS") or outline.startswith("Channel 15") or outline.startswith("HV"):
return 0
# data taking mode
elif outputmode == 1:
if outline.startswith("end"):
outputmode = 0
if fout:
fout.close()
print "File closed"
fout = 0
else:
if fout:
# print "RECIEVED",len(outline)
fout.write(outline.replace(" ","\n"))
except serial.serialutil.SerialException:
ser.close()
print "ARM disconnected"
print ""
ser = None
def get_key_value(keys, flag, default = "0"):
index = 1
while True:
if index >= len(keys) - 1:
return default
if keys[index] == ("-" + flag):
return keys[index+1]
index += 1
def readstrawcmdthresh(strawnumber,samples,clock):
chan_mask = (0x1 << strawnumber)
if strawnumber == 0:
chan_mask = 0xFFFF
adc_mode = 0
tdc_mode = 1
lookback = 2
triggers=1
pulser=1
delay=1
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',20)
data += struct.pack('B',10)
data += struct.pack('B',adc_mode)
data += struct.pack('B',tdc_mode)
data += struct.pack('H',lookback)
data += struct.pack('H',samples)
data += struct.pack('I',triggers)
data += struct.pack('H',chan_mask)
data += struct.pack('B',clock)
data += struct.pack('B',pulser)
data += struct.pack('H',delay)
return data
def readstrawcmd(tdc_mode, channel_read):
# chan_mask = 0xC030 #channels 4,5,14,15
# chan_mask = 0x30 #channels 4,5
# channel_read = 4
chan_mask = 0
chan_mask |= (0x1 << channel_read)
adc_mode = 0
# tdc_mode = 0
lookback = 4
samples = 10
clock = 1
triggers=400
pulser=0
delay=30
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',20)
data += struct.pack('B',10)
data += struct.pack('B',adc_mode)
data += struct.pack('B',tdc_mode)
data += struct.pack('H',lookback)
data += struct.pack('H',samples)
data += struct.pack('I',triggers)
data += struct.pack('H',chan_mask)
data += struct.pack('B',clock)
data += struct.pack('B',pulser)
data += struct.pack('H',delay)
return data
def thresholdset(channel,dvalue):
chan_mask=0
chan_mask |= (0x1 << channel)
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',0x8)
data += struct.pack('B',0x1)
data += struct.pack('H',chan_mask)
data += struct.pack('H',dvalue)
return data
def gainset(channel,dvalue):
chan_mask=0
chan_mask |= (0x1 << channel)
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',0x8)
data += struct.pack('B',0x0)
data += struct.pack('H',channel)
data += struct.pack('H',dvalue)
return data
def dump_settings():
command_id = 7
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',command_id)
return data
def readSPI():
data= ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',10)
return data
def readSensors():
data= ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',11)
return data
def main():
global dcvoltages
global fout
global mode
global gainsettings
global threshsettings
global calenvdata
global hvenvdata
global rundir
serial_ports = [
p[0]
for p in serial.tools.list_ports.comports()
if 'ttyUSB' in p[0]
]
print serial_ports
SERIALPORTROC = serial_ports[3]
SERIALPORTHV = serial_ports[2]
SERIALPORTCAL = serial_ports[1]
print SERIALPORTROC
SERIALRATE = 921600
TIMEOUT = 1
PREFIX = "run"
fout = 0
mode = 0
samples = 0
triggers = 0
topdir = os.path.dirname(os.path.realpath(__file__))
# if len(sys.argv) > 1:
# rundir = sys.argv[1]
# else:
rundir = os.path.join(topdir,'data/noise_180425_json_singles')
if not os.path.exists(rundir):
os.makedirs(rundir)
files = glob.glob(rundir + "/" + PREFIX + "_*.txt")
lastrun = -1
for fn in files:
num = int(fn.split("_")[-1].split(".txt")[0])
if num > lastrun:
lastrun = num
print "Waiting for ARM to connect"
print "=========================="
serroc = serial.Serial(SERIALPORTROC,SERIALRATE, timeout = TIMEOUT)
sercal = serial.Serial(SERIALPORTCAL,SERIALRATE, timeout = TIMEOUT)
serhv = serial.Serial(SERIALPORTHV,SERIALRATE, timeout = TIMEOUT)
print "ARMs connected"
samples=2000
# keyboard input loop
try:
channels = [4,5,94,95]
gainsCal = [63,63,63,63]
gainsHV = [64,65,64,65]
threshCal = [83,78,84,79]
threshHV = [81,81,86,84]
i_channel_read = 0 # id of channel to be read. this will run from 0 to len(channels)
dcvoltages=[]
gainsettings=[]
threshsettings=[]
calenvdata=[]
hvenvdata=[]
for i in range (16):
gainsettings.append(0)
threshsettings.append(0)
if (lastrun+1) % 1 == 0: ## Define how often thresholds and temps will be read
cmd = readSPI()
serroc.write(cmd)
serroc.readline()
serroc.readline()
strawmask = 0xFF
# iterate over all declared channels
for ichannel in range( len(channels) ):
strawnumber = channels[ichannel]
for iside in range(3):
if iside == 0: #zero the cal side
cmd=gainset(strawnumber,0)
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,gainsHV[ichannel])
serroc.write(cmd)
print serroc.readline()
elif iside == 1: #zero the hv side
cmd=gainset(strawnumber,gainsCal[ichannel])
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,0)
serroc.write(cmd)
print serroc.readline()
elif iside == 2: #set both to default
cmd=gainset(strawnumber,gainsCal[ichannel])
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,gainsHV[ichannel])
serroc.write(cmd)
print serroc.readline()
filename = 'threshdata.txt'
mode=1
# if strawnumber==2 or strawnumber==4:
# mode = 0
if strawnumber>90:
cmd = readstrawcmdthresh(strawnumber-80,samples,mode)
else:
cmd = readstrawcmdthresh(strawnumber,samples,mode)
if strawnumber==0:
sercal.write(cmd)
else:
serhv.write(cmd)
fout = open(os.path.join(rundir,filename),"w")
if strawnumber==0:
serialread(sercal)
else:
serialread(serhv)
plotfile(filename,samples,1,1,0)
print "==========================="
cmd = readSensors()
serroc.write(cmd)
serialread(serroc)
print calenvdata
print hvenvdata
timestamp=str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
run_settings={'gain':gainsettings, 'threshold':threshsettings, 'threshmv':dcvoltages}
env_data = {'time':timestamp,'calenv':calenvdata, 'hvenv':hvenvdata}
if (lastrun+1)%3==0:
tdc_mode=0 #coincidence
elif (lastrun+1)%3==1:
tdc_mode=1 #Cal
elif (lastrun+1)%3==2:
tdc_mode=2 #HV
if (lastrun+1)% (3*len(channels))<3:
i_channel_read = 0
elif (lastrun+1)% (3*len(channels))<6:
i_channel_read = 1
elif (lastrun+1)% (3*len(channels))<9:
i_channel_read = 2
else:
i_channel_read = 3
channel_read = channels[i_channel_read]
print 'i_channel_read = ', i_channel_read
print 'tdc mode = ', tdc_mode
if channel_read>16:
channel_read -= 80
cmd = readstrawcmd(tdc_mode, channel_read)
serhv.write(cmd)
print "OPENING FILE"
next_runname = PREFIX + "_" + str(lastrun+1) + ".txt"
filename = next_runname
fout = open(os.path.join(rundir,filename),"w")
json.dump(run_settings, fout)
json.dump(env_data,fout)
fout.write("\n")
serialread(serhv)
serroc.close()
serhv.close()
sercal.close()
print 'all done in this run'
lastrun += 1
except Exception, e:
print type(e),e
finally:
print 'Ending...'
if __name__ == "__main__":
main()
| calenvdata = outline.split()[-3:] | conditional_block |
readpanel.py | import sys
import glob
import os
import json
import struct
import time
import threading
import re
import Queue
import serial
import serial.tools.list_ports
import readline
import numpy as np
|
def showdata(y):
SampleRate = 20
nplot = 200
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("Data")
ax1.set_xlabel('ns')
ax1.set_ylabel('ADC counts')
x=range(0,SampleRate*len(y),SampleRate)
# y2 = y[:]
# for i in range(len(y)):
# y[i] = y[i] + np.cos((x[i]-10200)/2400.)*95-1290
ax1.plot(x,y)
ax1.get_yaxis().get_major_formatter().set_useOffset(False)
# ax1.plot(x,y2)
# leg = ax1.legend()
plt.show()
plt.close(fig)
def plotfile(filename,samples,triggers,display,plot):
global dcvoltages
fout = open(os.path.join(rundir,filename),"r")
allts = []
deltats = []
data = fout.read()
if len(data) < 12:
print "No data found"
return
data = data.split('\n')
data = filter(lambda x: len(x)>0, data)
data = [(int(i,16) & 0xFFF) for i in data]
print len(data),"total words, expected",(samples+12)*triggers
T1 = []
adc1 = []
deltaT1 = []
deltaT2 = []
if True:
gtimeold = -1
htimeold = -1
ctimeold = -1
latestold = 0
for itrig in range(triggers):
if (itrig+1)*(samples+12) > len(data):
break
tdata = data[itrig*(samples+12):(itrig+1)*(samples+12)]
channel = (tdata[3] >> 6) & 0x3F
gtime = ((tdata[0]<<24) + (tdata[1]<<12) + (tdata[2]<<0))
htime = (((tdata[3]&0xF)<<24) + (tdata[4]<<12) + (tdata[5]<<0))
ctime = (((tdata[6]&0xF)<<24) + (tdata[7]<<12) + (tdata[8]<<0))
htime = (htime & 0x3FFFFF00) + (0xFF - (htime&0xFF));
ctime = (ctime & 0x3FFFFF00) + (0xFF - (ctime&0xFF));
hvfired = 0
calfired = 0
if htime > 0 and htime != htimeold:
hvfired = 1
if ctime > 0 and ctime != ctimeold:
calfired = 1
adc1 = tdata[12:]
T1.append(htime)
if hvfired:
hval = htime
else:
hval = " X "
if calfired:
cval = ctime
else:
cval = " X "
if hvfired and calfired:
hvcaldeltat = (ctime-htime)*15.625*10**-3 # in ns
else:
hvcaldeltat = " X "
if gtimeold < 0:
gtimeold = gtime
htimeold = htime
ctimeold = ctime
if (htime >= ctime or not hvfired) and hvfired:
latestold = htime
elif (ctime >= htime or not calfired) and calfired:
latestold = ctime
mindeltat = " X "
else:
if (htime >= ctime or not hvfired) and calfired:
mindeltat = ctime - latestold
if gtime != gtimeold:
mindeltat += (gtime-gtimeold)*2**28
mindeltat *= 15.625*10**-6 # in us
latestold = htime
elif (ctime >= htime or not calfired) and hvfired:
mindeltat = htime - latestold
if gtime != gtimeold:
mindeltat += (gtime-gtimeold)*2**28
mindeltat *= 15.625*10**-6 # in us
latestold = ctime
else:
mindeltat = " X "
if display:
# print "Channel",channel,"- time since last:",mindeltat,"deltat:",hvcaldeltat,"gtime:",gtime,"htime:",hval,"ctime:",cval
dcvoltage = (-750. + np.mean(adc1)*1500./4096.)/10
print "mean ped=",np.mean(adc1)," rms=",np.std(adc1), " voltage=",dcvoltage
dcvoltages.append(dcvoltage)
if plot:
showdata(adc1)
adc1 = []
gtimeold = gtime
htimeold = htime
ctimeold = ctime
def serialread(ser):
global fout
global mode
global gainsettings
global threshsettings
global calenvdata
global hvenvdata
outputmode = 0
while True:
# Print data from serial to screen
try:
data = ser.readline()
if data:
outdata = data.split("\n")
for outline in outdata:
# normal printout
if outputmode == 0:
if outline.startswith("start"):
outputmode = 1
else:
if len(outline) > 0:
print " ",outline
if outline.find("gain/threshold")>0:
gainsettings[int(outline.split()[1])] = int(outline.split()[5])
threshsettings[int(outline.split()[1])] = int(outline.split()[6])
if outline.startswith("CAL")>0:
calenvdata = outline.split()[-3:]
if outline.startswith("HV")>0:
hvenvdata = outline.split()[-3:]
if outline.startswith("FAILED") or outline.startswith("SUCCESS") or outline.startswith("Channel 15") or outline.startswith("HV"):
return 0
# data taking mode
elif outputmode == 1:
if outline.startswith("end"):
outputmode = 0
if fout:
fout.close()
print "File closed"
fout = 0
else:
if fout:
# print "RECIEVED",len(outline)
fout.write(outline.replace(" ","\n"))
except serial.serialutil.SerialException:
ser.close()
print "ARM disconnected"
print ""
ser = None
def get_key_value(keys, flag, default = "0"):
index = 1
while True:
if index >= len(keys) - 1:
return default
if keys[index] == ("-" + flag):
return keys[index+1]
index += 1
def readstrawcmdthresh(strawnumber,samples,clock):
chan_mask = (0x1 << strawnumber)
if strawnumber == 0:
chan_mask = 0xFFFF
adc_mode = 0
tdc_mode = 1
lookback = 2
triggers=1
pulser=1
delay=1
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',20)
data += struct.pack('B',10)
data += struct.pack('B',adc_mode)
data += struct.pack('B',tdc_mode)
data += struct.pack('H',lookback)
data += struct.pack('H',samples)
data += struct.pack('I',triggers)
data += struct.pack('H',chan_mask)
data += struct.pack('B',clock)
data += struct.pack('B',pulser)
data += struct.pack('H',delay)
return data
def readstrawcmd(tdc_mode, channel_read):
# chan_mask = 0xC030 #channels 4,5,14,15
# chan_mask = 0x30 #channels 4,5
# channel_read = 4
chan_mask = 0
chan_mask |= (0x1 << channel_read)
adc_mode = 0
# tdc_mode = 0
lookback = 4
samples = 10
clock = 1
triggers=400
pulser=0
delay=30
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',20)
data += struct.pack('B',10)
data += struct.pack('B',adc_mode)
data += struct.pack('B',tdc_mode)
data += struct.pack('H',lookback)
data += struct.pack('H',samples)
data += struct.pack('I',triggers)
data += struct.pack('H',chan_mask)
data += struct.pack('B',clock)
data += struct.pack('B',pulser)
data += struct.pack('H',delay)
return data
def thresholdset(channel,dvalue):
chan_mask=0
chan_mask |= (0x1 << channel)
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',0x8)
data += struct.pack('B',0x1)
data += struct.pack('H',chan_mask)
data += struct.pack('H',dvalue)
return data
def gainset(channel,dvalue):
chan_mask=0
chan_mask |= (0x1 << channel)
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',0x8)
data += struct.pack('B',0x0)
data += struct.pack('H',channel)
data += struct.pack('H',dvalue)
return data
def dump_settings():
command_id = 7
data = ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',command_id)
return data
def readSPI():
data= ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',10)
return data
def readSensors():
data= ""
data += struct.pack('B',0xaa)
data += struct.pack('B',0xaa)
data += struct.pack('B',4)
data += struct.pack('B',11)
return data
def main():
global dcvoltages
global fout
global mode
global gainsettings
global threshsettings
global calenvdata
global hvenvdata
global rundir
serial_ports = [
p[0]
for p in serial.tools.list_ports.comports()
if 'ttyUSB' in p[0]
]
print serial_ports
SERIALPORTROC = serial_ports[3]
SERIALPORTHV = serial_ports[2]
SERIALPORTCAL = serial_ports[1]
print SERIALPORTROC
SERIALRATE = 921600
TIMEOUT = 1
PREFIX = "run"
fout = 0
mode = 0
samples = 0
triggers = 0
topdir = os.path.dirname(os.path.realpath(__file__))
# if len(sys.argv) > 1:
# rundir = sys.argv[1]
# else:
rundir = os.path.join(topdir,'data/noise_180425_json_singles')
if not os.path.exists(rundir):
os.makedirs(rundir)
files = glob.glob(rundir + "/" + PREFIX + "_*.txt")
lastrun = -1
for fn in files:
num = int(fn.split("_")[-1].split(".txt")[0])
if num > lastrun:
lastrun = num
print "Waiting for ARM to connect"
print "=========================="
serroc = serial.Serial(SERIALPORTROC,SERIALRATE, timeout = TIMEOUT)
sercal = serial.Serial(SERIALPORTCAL,SERIALRATE, timeout = TIMEOUT)
serhv = serial.Serial(SERIALPORTHV,SERIALRATE, timeout = TIMEOUT)
print "ARMs connected"
samples=2000
# keyboard input loop
try:
channels = [4,5,94,95]
gainsCal = [63,63,63,63]
gainsHV = [64,65,64,65]
threshCal = [83,78,84,79]
threshHV = [81,81,86,84]
i_channel_read = 0 # id of channel to be read. this will run from 0 to len(channels)
dcvoltages=[]
gainsettings=[]
threshsettings=[]
calenvdata=[]
hvenvdata=[]
for i in range (16):
gainsettings.append(0)
threshsettings.append(0)
if (lastrun+1) % 1 == 0: ## Define how often thresholds and temps will be read
cmd = readSPI()
serroc.write(cmd)
serroc.readline()
serroc.readline()
strawmask = 0xFF
# iterate over all declared channels
for ichannel in range( len(channels) ):
strawnumber = channels[ichannel]
for iside in range(3):
if iside == 0: #zero the cal side
cmd=gainset(strawnumber,0)
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,gainsHV[ichannel])
serroc.write(cmd)
print serroc.readline()
elif iside == 1: #zero the hv side
cmd=gainset(strawnumber,gainsCal[ichannel])
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,0)
serroc.write(cmd)
print serroc.readline()
elif iside == 2: #set both to default
cmd=gainset(strawnumber,gainsCal[ichannel])
serroc.write(cmd)
print serroc.readline()
cmd=gainset(strawnumber+96,gainsHV[ichannel])
serroc.write(cmd)
print serroc.readline()
filename = 'threshdata.txt'
mode=1
# if strawnumber==2 or strawnumber==4:
# mode = 0
if strawnumber>90:
cmd = readstrawcmdthresh(strawnumber-80,samples,mode)
else:
cmd = readstrawcmdthresh(strawnumber,samples,mode)
if strawnumber==0:
sercal.write(cmd)
else:
serhv.write(cmd)
fout = open(os.path.join(rundir,filename),"w")
if strawnumber==0:
serialread(sercal)
else:
serialread(serhv)
plotfile(filename,samples,1,1,0)
print "==========================="
cmd = readSensors()
serroc.write(cmd)
serialread(serroc)
print calenvdata
print hvenvdata
timestamp=str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
run_settings={'gain':gainsettings, 'threshold':threshsettings, 'threshmv':dcvoltages}
env_data = {'time':timestamp,'calenv':calenvdata, 'hvenv':hvenvdata}
if (lastrun+1)%3==0:
tdc_mode=0 #coincidence
elif (lastrun+1)%3==1:
tdc_mode=1 #Cal
elif (lastrun+1)%3==2:
tdc_mode=2 #HV
if (lastrun+1)% (3*len(channels))<3:
i_channel_read = 0
elif (lastrun+1)% (3*len(channels))<6:
i_channel_read = 1
elif (lastrun+1)% (3*len(channels))<9:
i_channel_read = 2
else:
i_channel_read = 3
channel_read = channels[i_channel_read]
print 'i_channel_read = ', i_channel_read
print 'tdc mode = ', tdc_mode
if channel_read>16:
channel_read -= 80
cmd = readstrawcmd(tdc_mode, channel_read)
serhv.write(cmd)
print "OPENING FILE"
next_runname = PREFIX + "_" + str(lastrun+1) + ".txt"
filename = next_runname
fout = open(os.path.join(rundir,filename),"w")
json.dump(run_settings, fout)
json.dump(env_data,fout)
fout.write("\n")
serialread(serhv)
serroc.close()
serhv.close()
sercal.close()
print 'all done in this run'
lastrun += 1
except Exception, e:
print type(e),e
finally:
print 'Ending...'
if __name__ == "__main__":
main() | import matplotlib
import matplotlib.pyplot as plt
import math
import rlcompleter
import datetime
| random_line_split |
light_character.py | """Main module."""
import itertools
import math
import os
from PIL import Image
LIGHTHOUSES_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'lighthouses'
)
COLOURS = {
'R': (255, 0, 0, 1),
'G': (0, 255, 0, 1),
'W': (255, 255, 255, 1),
'Y': (255, 255, 0, 1),
'Off': (0, 0, 0, 0)
}
def save_characteristic_as_image(
characteristic, size, write_buffer, base_img=None
):
on_img, off_img = load_base_images(base_img)
size = on_img.size if on_img is not None else size
frames, durations = states_to_frames(
size, collapse_states(characteristic_to_light_states(characteristic)),
on_img, off_img
)
frames = [frame.convert('RGB') for frame in frames]
if len(frames) > 1:
save_options = {
"format": "GIF",
"save_all": True,
"append_images": frames[1:],
"duration": durations,
"loop": 0
}
if base_img is None:
# If this is just a block light, these settings allow "Off"
# to be fully transparent
# Leaving them in place for images with lighthouses
# can cause odd effects, due to combining palettes.
save_options.update(
{
"transparency": 0,
"optimize": False,
"disposal": 3
}
)
frames[0].save(
write_buffer, **save_options
)
else:
frames[0].save(write_buffer, format="GIF")
return write_buffer
def load_base_images(base_img):
"""
Return the two base images needed to create a lighthouse animation.
base_img is either
- A full/relative path from the run context
- The name of a directory under lighthouses here
"""
if base_img is not None:
if not os.path.exists(base_img):
base_img = os.path.join(LIGHTHOUSES_DIR, base_img)
return (
Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'),
Image.open(os.path.join(base_img, 'off.gif'))
)
return None, None
def characteristic_to_light_states(description):
"""
Given a light characteristic, return a list of 2-tuples representing the
state of light at any given time.
A fixed light is the given colour, permanently
>>> characteristic_to_light_states('F. R')
[('R', 1)]
"""
fragments = description.split()
pattern_type, groups = parse_pattern(fragments.pop(0))
colour, fragments = get_colour_code(fragments)
try:
period = parse_period(fragments)
except IndexError:
if must_have_period(pattern_type, groups):
raise
period = None
if period is not None and cannot_have_period(pattern_type, groups):
raise ValueError('Period is not allowed in this type of light')
return TYPES[pattern_type](groups, colour, period)
def get_colour_code(fragments):
if len(fragments) == 0 or fragments[0] not in COLOURS.keys():
return 'W', fragments
return fragments[0], fragments[1:]
def parse_period(fragments):
"""
Given the split up characteristic, return the period in milliseconds
The period is specified in seconds
>>> parse_period(['2'])
2000
The letter 's' to mark the units may be present
>>> parse_period(['3s'])
3000
It may be separated from the number by a space
>>> parse_period(['4','s'])
4000
A Quick flash can only have a period if it has groups
>>> parse_period(['3s'])
3000
"""
period_spec = fragments[-1]
# The last term is the cycle period,
# it may or may not have 's' for seconds
# The 's' may or may not be attached to the number
if period_spec == 's':
period_spec = fragments[-2]
if period_spec[-1] == 's':
period_spec = period_spec[:-1]
return int(float(period_spec) * 1000)
def cannot_have_period(pattern_type, groups):
return pattern_type == 'f' or (pattern_type == 'q' and groups == [1])
def must_have_period(pattern_type, groups):
return not(cannot_have_period(pattern_type, groups))
def | (pattern):
"""
Crack a pattern definition into its type and any grouping.
A pattern consists of the pattern type (e.g. flashing, occulting)
and optionally a group designation in parentheses.
The pattern definition could just be the type
>>> parse_pattern('Fl')
('fl', [1])
It could have optional dots marking the abbreviation,
these can be discarded
>>> parse_pattern('L.Fl.')
('lfl', [1])
It could have grouping information in parentheses
>>> parse_pattern('Fl(2)')
('fl', [2])
The group could be a composite group.
>>> parse_pattern('Oc(2+1)')
('oc', [2, 1])
"""
pattern_type, _, group_spec = pattern.partition('(')
# Groups are separated by '+' in a composite pattern.
groups = [
int(group) for group in group_spec[:-1].split('+')
] if group_spec else [1]
# Some light lists use dots, some don't, just throw them away
return pattern_type.lower().replace('.', ''), groups
def collapse_states(states):
"""
Given a list of light states, collapse any adjacent entries that have the
same state.
If there are no adjacent matching states, there is no change to the output
>>> collapse_states([('R',1), ('Y', 1), ('R', 1)])
[('R', 1), ('Y', 1), ('R', 1)]
Adjacent states are collapsed, summing their durations
>>> collapse_states([('R',1), ('R', 1), ('Y', 1)])
[('R', 2), ('Y', 1)]
>>> collapse_states([('R',1), ('R', 2), ('R', 3), ('Y', 1)])
[('R', 6), ('Y', 1)]
"""
new_states = states[:1]
for state in states[1:]:
last_state = new_states[-1]
if state[0] == last_state[0]:
new_states[-1] = (state[0], last_state[1] + state[1])
else:
new_states.append(state)
return new_states
def states_to_frames(size, states, fg, off_img):
def create_frame(colour):
if colour == 'Off' and fg is not None:
return off_img
colour_img = Image.new('RGBA', size, color=COLOURS[colour])
if fg is not None:
colour_img.alpha_composite(fg)
return colour_img
return [
create_frame(state[0])
for state in states
], [state[1] for state in states]
def light_sequence(
groups, colour1, colour2, total_period, colour1_period, colour2_period
):
flash_period = colour1_period + colour2_period
group_states = [
single_flash(
group, colour1, colour2, colour1_period, colour2_period
) for group in groups
]
# When there are multiple groups,
# the remainder is shared equally between each of them.
# If the remainder is not perfectly divisible by the number of groups,
# the final period swallows up the spare.
# Being as this is calculated in milliseconds, this is imperceptible.
remainder = total_period - (flash_period * sum(groups))
remainder_share = math.floor(remainder/len(groups))
final_remainder = remainder - (remainder_share * (len(groups)-1))
for group_state in group_states[:-1]:
group_state.append((colour2, remainder_share))
group_states[-1].append((colour2, final_remainder))
return list(itertools.chain.from_iterable(group_states))
def single_flash(flash_count, colour1, colour2, period1, period2):
return [(colour1, period1), (colour2, period2)] * flash_count
def fixed(_groups, colour, _period):
"""
The Fixed pattern is simply an always-on light in the given colour.
groups and period are irrelevant.
"""
return [(colour, 1)]
def flash(groups, colour, period):
"""
A flash is a single colour displayed for a short period, followed by
a longer period of darkness
A single flash of a given colour is a 1 second flash
>>> flash([1], 'R', 5000)
[('R', 1000), ('Off', 4000)]
Grouped flashes have a shorter duration
>>> flash([3], 'R', 10000)
[('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\
('Off', 1000), ('Off', 5500)]
Composite groups are separated by an even period of darkness
>>> flash([3, 1], 'R', 10000)
[('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\
('Off', 1000), ('Off', 2000), ('R', 500), ('Off', 1000), ('Off', 2000)]
The total duration of all states matches the requested period
>>> sum((state[1] for state in flash([1], 'R', 5000))) == 5000
True
"""
if groups == [1]:
if period <= 2000:
raise ValueError(
"The cycle period for a flash must be longer than 2 seconds"
)
return [
(colour, 1000),
('Off', period-1000)
]
return light_sequence(groups, colour, 'Off', period, 500, 1000)
def long_flash(groups, colour, period):
"""A Long flash is at least 2 seconds"""
if groups == [1]:
return [
(colour, 2000),
('Off', period - 2000)
]
return light_sequence(groups, colour, 'Off', period, 2000, 3000)
def isophase(_groups, colour, period):
"""
isophase is a pattern with equal dark and light. There are no groups.
"""
# Whole numbers are required, so odd numbers are dealt with by loading
# the spare into the off period.
# As this is in milliseconds, this will be imperceptible.
# It is also unlikely, as the top-level input is in seconds
# and has been multiplied up to milliseconds before reaching this
# function
return [
(colour, math.floor(period/2)),
('Off', math.ceil(period/2))
]
def occulting(groups, colour, period):
"""
An occulting pattern is the opposite of a flash - dark with longer light
"""
if groups == [1]:
return [
('Off', 1000),
(colour, period - 1000)
]
return light_sequence(groups, 'Off', colour, period, 500, 1000)
def quick(groups, colour, period):
"""
A Quick flash is more than 50 per minute.
"""
# The cycle period cannot be longer than 1.2s (60/50)
# or shorter than 0.5s
if groups == [1]:
if period is not None:
raise ValueError(
"Quick Flash cycle periods must be longer than 0.5 seconds"
)
return [
(colour, 250),
('Off', 750)
]
return light_sequence(groups, 'Off', colour, period, 250, 500)
TYPES = {
'f': fixed,
'fl': flash,
'q': quick,
'lfl': long_flash,
'iso': isophase,
'oc': occulting
}
| parse_pattern | identifier_name |
light_character.py | """Main module."""
import itertools
import math
import os
from PIL import Image
LIGHTHOUSES_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'lighthouses'
)
COLOURS = {
'R': (255, 0, 0, 1),
'G': (0, 255, 0, 1),
'W': (255, 255, 255, 1),
'Y': (255, 255, 0, 1),
'Off': (0, 0, 0, 0)
}
def save_characteristic_as_image(
characteristic, size, write_buffer, base_img=None
):
on_img, off_img = load_base_images(base_img)
size = on_img.size if on_img is not None else size
frames, durations = states_to_frames(
size, collapse_states(characteristic_to_light_states(characteristic)),
on_img, off_img
)
frames = [frame.convert('RGB') for frame in frames]
if len(frames) > 1:
save_options = {
"format": "GIF",
"save_all": True,
"append_images": frames[1:],
"duration": durations,
"loop": 0
}
if base_img is None:
# If this is just a block light, these settings allow "Off"
# to be fully transparent
# Leaving them in place for images with lighthouses
# can cause odd effects, due to combining palettes.
save_options.update(
{
"transparency": 0,
"optimize": False,
"disposal": 3
}
)
frames[0].save(
write_buffer, **save_options
)
else:
frames[0].save(write_buffer, format="GIF")
return write_buffer
def load_base_images(base_img):
"""
Return the two base images needed to create a lighthouse animation.
base_img is either
- A full/relative path from the run context
- The name of a directory under lighthouses here
"""
if base_img is not None:
if not os.path.exists(base_img):
base_img = os.path.join(LIGHTHOUSES_DIR, base_img)
return (
Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'),
Image.open(os.path.join(base_img, 'off.gif'))
)
return None, None
def characteristic_to_light_states(description):
"""
Given a light characteristic, return a list of 2-tuples representing the
state of light at any given time.
A fixed light is the given colour, permanently
>>> characteristic_to_light_states('F. R')
[('R', 1)]
"""
fragments = description.split()
pattern_type, groups = parse_pattern(fragments.pop(0))
colour, fragments = get_colour_code(fragments)
try:
period = parse_period(fragments)
except IndexError:
if must_have_period(pattern_type, groups):
raise
period = None
if period is not None and cannot_have_period(pattern_type, groups):
raise ValueError('Period is not allowed in this type of light')
return TYPES[pattern_type](groups, colour, period)
def get_colour_code(fragments):
if len(fragments) == 0 or fragments[0] not in COLOURS.keys():
return 'W', fragments
return fragments[0], fragments[1:]
def parse_period(fragments):
"""
Given the split up characteristic, return the period in milliseconds
The period is specified in seconds
>>> parse_period(['2'])
2000
The letter 's' to mark the units may be present
>>> parse_period(['3s'])
3000
It may be separated from the number by a space
>>> parse_period(['4','s'])
4000
A Quick flash can only have a period if it has groups
>>> parse_period(['3s'])
3000
"""
period_spec = fragments[-1]
# The last term is the cycle period,
# it may or may not have 's' for seconds
# The 's' may or may not be attached to the number
if period_spec == 's':
period_spec = fragments[-2]
if period_spec[-1] == 's':
period_spec = period_spec[:-1]
return int(float(period_spec) * 1000)
def cannot_have_period(pattern_type, groups):
return pattern_type == 'f' or (pattern_type == 'q' and groups == [1])
def must_have_period(pattern_type, groups):
return not(cannot_have_period(pattern_type, groups))
def parse_pattern(pattern):
"""
Crack a pattern definition into its type and any grouping.
A pattern consists of the pattern type (e.g. flashing, occulting)
and optionally a group designation in parentheses.
The pattern definition could just be the type
>>> parse_pattern('Fl')
('fl', [1])
It could have optional dots marking the abbreviation,
these can be discarded
>>> parse_pattern('L.Fl.')
('lfl', [1])
It could have grouping information in parentheses
>>> parse_pattern('Fl(2)')
('fl', [2])
The group could be a composite group.
>>> parse_pattern('Oc(2+1)')
('oc', [2, 1])
"""
pattern_type, _, group_spec = pattern.partition('(')
# Groups are separated by '+' in a composite pattern.
groups = [
int(group) for group in group_spec[:-1].split('+')
] if group_spec else [1]
# Some light lists use dots, some don't, just throw them away
return pattern_type.lower().replace('.', ''), groups
def collapse_states(states):
"""
Given a list of light states, collapse any adjacent entries that have the
same state.
If there are no adjacent matching states, there is no change to the output
>>> collapse_states([('R',1), ('Y', 1), ('R', 1)])
[('R', 1), ('Y', 1), ('R', 1)]
Adjacent states are collapsed, summing their durations
>>> collapse_states([('R',1), ('R', 1), ('Y', 1)])
[('R', 2), ('Y', 1)]
>>> collapse_states([('R',1), ('R', 2), ('R', 3), ('Y', 1)])
[('R', 6), ('Y', 1)]
"""
new_states = states[:1]
for state in states[1:]:
last_state = new_states[-1]
if state[0] == last_state[0]:
new_states[-1] = (state[0], last_state[1] + state[1])
else:
new_states.append(state)
return new_states
def states_to_frames(size, states, fg, off_img):
def create_frame(colour):
if colour == 'Off' and fg is not None:
return off_img
colour_img = Image.new('RGBA', size, color=COLOURS[colour])
if fg is not None:
colour_img.alpha_composite(fg)
return colour_img
return [
create_frame(state[0])
for state in states
], [state[1] for state in states]
def light_sequence(
groups, colour1, colour2, total_period, colour1_period, colour2_period
):
flash_period = colour1_period + colour2_period
group_states = [
single_flash(
group, colour1, colour2, colour1_period, colour2_period
) for group in groups
]
# When there are multiple groups,
# the remainder is shared equally between each of them.
# If the remainder is not perfectly divisible by the number of groups,
# the final period swallows up the spare.
# Being as this is calculated in milliseconds, this is imperceptible.
remainder = total_period - (flash_period * sum(groups))
remainder_share = math.floor(remainder/len(groups))
final_remainder = remainder - (remainder_share * (len(groups)-1))
for group_state in group_states[:-1]:
group_state.append((colour2, remainder_share))
group_states[-1].append((colour2, final_remainder))
return list(itertools.chain.from_iterable(group_states))
def single_flash(flash_count, colour1, colour2, period1, period2):
return [(colour1, period1), (colour2, period2)] * flash_count
def fixed(_groups, colour, _period):
"""
The Fixed pattern is simply an always-on light in the given colour.
groups and period are irrelevant.
"""
return [(colour, 1)]
def flash(groups, colour, period):
"""
A flash is a single colour displayed for a short period, followed by
a longer period of darkness
A single flash of a given colour is a 1 second flash
>>> flash([1], 'R', 5000)
[('R', 1000), ('Off', 4000)]
Grouped flashes have a shorter duration
>>> flash([3], 'R', 10000)
[('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\
('Off', 1000), ('Off', 5500)]
Composite groups are separated by an even period of darkness
>>> flash([3, 1], 'R', 10000)
[('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\
('Off', 1000), ('Off', 2000), ('R', 500), ('Off', 1000), ('Off', 2000)]
The total duration of all states matches the requested period
>>> sum((state[1] for state in flash([1], 'R', 5000))) == 5000
True
"""
if groups == [1]:
|
return light_sequence(groups, colour, 'Off', period, 500, 1000)
def long_flash(groups, colour, period):
"""A Long flash is at least 2 seconds"""
if groups == [1]:
return [
(colour, 2000),
('Off', period - 2000)
]
return light_sequence(groups, colour, 'Off', period, 2000, 3000)
def isophase(_groups, colour, period):
"""
isophase is a pattern with equal dark and light. There are no groups.
"""
# Whole numbers are required, so odd numbers are dealt with by loading
# the spare into the off period.
# As this is in milliseconds, this will be imperceptible.
# It is also unlikely, as the top-level input is in seconds
# and has been multiplied up to milliseconds before reaching this
# function
return [
(colour, math.floor(period/2)),
('Off', math.ceil(period/2))
]
def occulting(groups, colour, period):
"""
An occulting pattern is the opposite of a flash - dark with longer light
"""
if groups == [1]:
return [
('Off', 1000),
(colour, period - 1000)
]
return light_sequence(groups, 'Off', colour, period, 500, 1000)
def quick(groups, colour, period):
"""
A Quick flash is more than 50 per minute.
"""
# The cycle period cannot be longer than 1.2s (60/50)
# or shorter than 0.5s
if groups == [1]:
if period is not None:
raise ValueError(
"Quick Flash cycle periods must be longer than 0.5 seconds"
)
return [
(colour, 250),
('Off', 750)
]
return light_sequence(groups, 'Off', colour, period, 250, 500)
TYPES = {
'f': fixed,
'fl': flash,
'q': quick,
'lfl': long_flash,
'iso': isophase,
'oc': occulting
}
| if period <= 2000:
raise ValueError(
"The cycle period for a flash must be longer than 2 seconds"
)
return [
(colour, 1000),
('Off', period-1000)
] | conditional_block |
light_character.py | """Main module."""
import itertools
import math
import os
from PIL import Image
LIGHTHOUSES_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'lighthouses'
)
COLOURS = {
'R': (255, 0, 0, 1),
'G': (0, 255, 0, 1),
'W': (255, 255, 255, 1),
'Y': (255, 255, 0, 1),
'Off': (0, 0, 0, 0)
}
def save_characteristic_as_image(
characteristic, size, write_buffer, base_img=None
):
on_img, off_img = load_base_images(base_img)
size = on_img.size if on_img is not None else size
frames, durations = states_to_frames(
size, collapse_states(characteristic_to_light_states(characteristic)),
on_img, off_img
)
frames = [frame.convert('RGB') for frame in frames]
if len(frames) > 1:
save_options = {
"format": "GIF",
"save_all": True,
"append_images": frames[1:],
"duration": durations,
"loop": 0
}
if base_img is None:
# If this is just a block light, these settings allow "Off"
# to be fully transparent
# Leaving them in place for images with lighthouses
# can cause odd effects, due to combining palettes.
save_options.update(
{
"transparency": 0,
"optimize": False,
"disposal": 3
}
)
frames[0].save(
write_buffer, **save_options
)
else:
frames[0].save(write_buffer, format="GIF")
return write_buffer
def load_base_images(base_img):
"""
Return the two base images needed to create a lighthouse animation.
base_img is either
- A full/relative path from the run context
- The name of a directory under lighthouses here
"""
if base_img is not None:
if not os.path.exists(base_img):
base_img = os.path.join(LIGHTHOUSES_DIR, base_img)
return (
Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'),
Image.open(os.path.join(base_img, 'off.gif'))
)
return None, None
def characteristic_to_light_states(description):
"""
Given a light characteristic, return a list of 2-tuples representing the
state of light at any given time.
A fixed light is the given colour, permanently
>>> characteristic_to_light_states('F. R')
[('R', 1)]
"""
fragments = description.split()
pattern_type, groups = parse_pattern(fragments.pop(0))
colour, fragments = get_colour_code(fragments)
try:
period = parse_period(fragments)
except IndexError:
if must_have_period(pattern_type, groups):
raise
period = None
if period is not None and cannot_have_period(pattern_type, groups):
raise ValueError('Period is not allowed in this type of light')
return TYPES[pattern_type](groups, colour, period)
def get_colour_code(fragments):
if len(fragments) == 0 or fragments[0] not in COLOURS.keys():
return 'W', fragments
return fragments[0], fragments[1:]
def parse_period(fragments):
"""
Given the split up characteristic, return the period in milliseconds
The period is specified in seconds
>>> parse_period(['2'])
2000
The letter 's' to mark the units may be present
>>> parse_period(['3s'])
3000
It may be separated from the number by a space
>>> parse_period(['4','s'])
4000
A Quick flash can only have a period if it has groups
>>> parse_period(['3s'])
3000
"""
period_spec = fragments[-1]
# The last term is the cycle period,
# it may or may not have 's' for seconds
# The 's' may or may not be attached to the number
if period_spec == 's':
period_spec = fragments[-2]
if period_spec[-1] == 's':
period_spec = period_spec[:-1]
return int(float(period_spec) * 1000)
def cannot_have_period(pattern_type, groups):
return pattern_type == 'f' or (pattern_type == 'q' and groups == [1])
def must_have_period(pattern_type, groups):
return not(cannot_have_period(pattern_type, groups))
def parse_pattern(pattern):
"""
Crack a pattern definition into its type and any grouping.
A pattern consists of the pattern type (e.g. flashing, occulting)
and optionally a group designation in parentheses.
The pattern definition could just be the type
>>> parse_pattern('Fl')
('fl', [1])
It could have optional dots marking the abbreviation,
these can be discarded
>>> parse_pattern('L.Fl.')
('lfl', [1])
It could have grouping information in parentheses
>>> parse_pattern('Fl(2)')
('fl', [2])
The group could be a composite group.
>>> parse_pattern('Oc(2+1)')
('oc', [2, 1])
"""
pattern_type, _, group_spec = pattern.partition('(')
# Groups are separated by '+' in a composite pattern.
groups = [
int(group) for group in group_spec[:-1].split('+')
] if group_spec else [1]
# Some light lists use dots, some don't, just throw them away
return pattern_type.lower().replace('.', ''), groups
def collapse_states(states):
"""
Given a list of light states, collapse any adjacent entries that have the
same state.
If there are no adjacent matching states, there is no change to the output
>>> collapse_states([('R',1), ('Y', 1), ('R', 1)])
[('R', 1), ('Y', 1), ('R', 1)]
Adjacent states are collapsed, summing their durations
>>> collapse_states([('R',1), ('R', 1), ('Y', 1)])
[('R', 2), ('Y', 1)]
>>> collapse_states([('R',1), ('R', 2), ('R', 3), ('Y', 1)])
[('R', 6), ('Y', 1)]
"""
new_states = states[:1]
for state in states[1:]:
last_state = new_states[-1]
if state[0] == last_state[0]:
new_states[-1] = (state[0], last_state[1] + state[1])
else:
new_states.append(state)
return new_states
def states_to_frames(size, states, fg, off_img):
def create_frame(colour):
if colour == 'Off' and fg is not None:
return off_img
colour_img = Image.new('RGBA', size, color=COLOURS[colour])
if fg is not None:
colour_img.alpha_composite(fg)
return colour_img
return [
create_frame(state[0])
for state in states
], [state[1] for state in states]
def light_sequence(
groups, colour1, colour2, total_period, colour1_period, colour2_period
):
flash_period = colour1_period + colour2_period
group_states = [
single_flash(
group, colour1, colour2, colour1_period, colour2_period
) for group in groups
]
# When there are multiple groups,
# the remainder is shared equally between each of them.
# If the remainder is not perfectly divisible by the number of groups,
# the final period swallows up the spare.
# Being as this is calculated in milliseconds, this is imperceptible.
remainder = total_period - (flash_period * sum(groups))
remainder_share = math.floor(remainder/len(groups))
final_remainder = remainder - (remainder_share * (len(groups)-1))
for group_state in group_states[:-1]:
group_state.append((colour2, remainder_share))
group_states[-1].append((colour2, final_remainder))
return list(itertools.chain.from_iterable(group_states))
def single_flash(flash_count, colour1, colour2, period1, period2):
return [(colour1, period1), (colour2, period2)] * flash_count
def fixed(_groups, colour, _period):
"""
The Fixed pattern is simply an always-on light in the given colour.
groups and period are irrelevant.
"""
return [(colour, 1)]
def flash(groups, colour, period):
"""
A flash is a single colour displayed for a short period, followed by
a longer period of darkness
A single flash of a given colour is a 1 second flash
>>> flash([1], 'R', 5000)
[('R', 1000), ('Off', 4000)]
Grouped flashes have a shorter duration
>>> flash([3], 'R', 10000) | Composite groups are separated by an even period of darkness
>>> flash([3, 1], 'R', 10000)
[('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\
('Off', 1000), ('Off', 2000), ('R', 500), ('Off', 1000), ('Off', 2000)]
The total duration of all states matches the requested period
>>> sum((state[1] for state in flash([1], 'R', 5000))) == 5000
True
"""
if groups == [1]:
if period <= 2000:
raise ValueError(
"The cycle period for a flash must be longer than 2 seconds"
)
return [
(colour, 1000),
('Off', period-1000)
]
return light_sequence(groups, colour, 'Off', period, 500, 1000)
def long_flash(groups, colour, period):
"""A Long flash is at least 2 seconds"""
if groups == [1]:
return [
(colour, 2000),
('Off', period - 2000)
]
return light_sequence(groups, colour, 'Off', period, 2000, 3000)
def isophase(_groups, colour, period):
"""
isophase is a pattern with equal dark and light. There are no groups.
"""
# Whole numbers are required, so odd numbers are dealt with by loading
# the spare into the off period.
# As this is in milliseconds, this will be imperceptible.
# It is also unlikely, as the top-level input is in seconds
# and has been multiplied up to milliseconds before reaching this
# function
return [
(colour, math.floor(period/2)),
('Off', math.ceil(period/2))
]
def occulting(groups, colour, period):
"""
An occulting pattern is the opposite of a flash - dark with longer light
"""
if groups == [1]:
return [
('Off', 1000),
(colour, period - 1000)
]
return light_sequence(groups, 'Off', colour, period, 500, 1000)
def quick(groups, colour, period):
"""
A Quick flash is more than 50 per minute.
"""
# The cycle period cannot be longer than 1.2s (60/50)
# or shorter than 0.5s
if groups == [1]:
if period is not None:
raise ValueError(
"Quick Flash cycle periods must be longer than 0.5 seconds"
)
return [
(colour, 250),
('Off', 750)
]
return light_sequence(groups, 'Off', colour, period, 250, 500)
TYPES = {
'f': fixed,
'fl': flash,
'q': quick,
'lfl': long_flash,
'iso': isophase,
'oc': occulting
} | [('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\
('Off', 1000), ('Off', 5500)]
| random_line_split |
light_character.py | """Main module."""
import itertools
import math
import os
from PIL import Image
LIGHTHOUSES_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'lighthouses'
)
COLOURS = {
'R': (255, 0, 0, 1),
'G': (0, 255, 0, 1),
'W': (255, 255, 255, 1),
'Y': (255, 255, 0, 1),
'Off': (0, 0, 0, 0)
}
def save_characteristic_as_image(
characteristic, size, write_buffer, base_img=None
):
on_img, off_img = load_base_images(base_img)
size = on_img.size if on_img is not None else size
frames, durations = states_to_frames(
size, collapse_states(characteristic_to_light_states(characteristic)),
on_img, off_img
)
frames = [frame.convert('RGB') for frame in frames]
if len(frames) > 1:
save_options = {
"format": "GIF",
"save_all": True,
"append_images": frames[1:],
"duration": durations,
"loop": 0
}
if base_img is None:
# If this is just a block light, these settings allow "Off"
# to be fully transparent
# Leaving them in place for images with lighthouses
# can cause odd effects, due to combining palettes.
save_options.update(
{
"transparency": 0,
"optimize": False,
"disposal": 3
}
)
frames[0].save(
write_buffer, **save_options
)
else:
frames[0].save(write_buffer, format="GIF")
return write_buffer
def load_base_images(base_img):
"""
Return the two base images needed to create a lighthouse animation.
base_img is either
- A full/relative path from the run context
- The name of a directory under lighthouses here
"""
if base_img is not None:
if not os.path.exists(base_img):
base_img = os.path.join(LIGHTHOUSES_DIR, base_img)
return (
Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'),
Image.open(os.path.join(base_img, 'off.gif'))
)
return None, None
def characteristic_to_light_states(description):
"""
Given a light characteristic, return a list of 2-tuples representing the
state of light at any given time.
A fixed light is the given colour, permanently
>>> characteristic_to_light_states('F. R')
[('R', 1)]
"""
fragments = description.split()
pattern_type, groups = parse_pattern(fragments.pop(0))
colour, fragments = get_colour_code(fragments)
try:
period = parse_period(fragments)
except IndexError:
if must_have_period(pattern_type, groups):
raise
period = None
if period is not None and cannot_have_period(pattern_type, groups):
raise ValueError('Period is not allowed in this type of light')
return TYPES[pattern_type](groups, colour, period)
def get_colour_code(fragments):
if len(fragments) == 0 or fragments[0] not in COLOURS.keys():
return 'W', fragments
return fragments[0], fragments[1:]
def parse_period(fragments):
|
def cannot_have_period(pattern_type, groups):
return pattern_type == 'f' or (pattern_type == 'q' and groups == [1])
def must_have_period(pattern_type, groups):
return not(cannot_have_period(pattern_type, groups))
def parse_pattern(pattern):
"""
Crack a pattern definition into its type and any grouping.
A pattern consists of the pattern type (e.g. flashing, occulting)
and optionally a group designation in parentheses.
The pattern definition could just be the type
>>> parse_pattern('Fl')
('fl', [1])
It could have optional dots marking the abbreviation,
these can be discarded
>>> parse_pattern('L.Fl.')
('lfl', [1])
It could have grouping information in parentheses
>>> parse_pattern('Fl(2)')
('fl', [2])
The group could be a composite group.
>>> parse_pattern('Oc(2+1)')
('oc', [2, 1])
"""
pattern_type, _, group_spec = pattern.partition('(')
# Groups are separated by '+' in a composite pattern.
groups = [
int(group) for group in group_spec[:-1].split('+')
] if group_spec else [1]
# Some light lists use dots, some don't, just throw them away
return pattern_type.lower().replace('.', ''), groups
def collapse_states(states):
"""
Given a list of light states, collapse any adjacent entries that have the
same state.
If there are no adjacent matching states, there is no change to the output
>>> collapse_states([('R',1), ('Y', 1), ('R', 1)])
[('R', 1), ('Y', 1), ('R', 1)]
Adjacent states are collapsed, summing their durations
>>> collapse_states([('R',1), ('R', 1), ('Y', 1)])
[('R', 2), ('Y', 1)]
>>> collapse_states([('R',1), ('R', 2), ('R', 3), ('Y', 1)])
[('R', 6), ('Y', 1)]
"""
new_states = states[:1]
for state in states[1:]:
last_state = new_states[-1]
if state[0] == last_state[0]:
new_states[-1] = (state[0], last_state[1] + state[1])
else:
new_states.append(state)
return new_states
def states_to_frames(size, states, fg, off_img):
def create_frame(colour):
if colour == 'Off' and fg is not None:
return off_img
colour_img = Image.new('RGBA', size, color=COLOURS[colour])
if fg is not None:
colour_img.alpha_composite(fg)
return colour_img
return [
create_frame(state[0])
for state in states
], [state[1] for state in states]
def light_sequence(
groups, colour1, colour2, total_period, colour1_period, colour2_period
):
flash_period = colour1_period + colour2_period
group_states = [
single_flash(
group, colour1, colour2, colour1_period, colour2_period
) for group in groups
]
# When there are multiple groups,
# the remainder is shared equally between each of them.
# If the remainder is not perfectly divisible by the number of groups,
# the final period swallows up the spare.
# Being as this is calculated in milliseconds, this is imperceptible.
remainder = total_period - (flash_period * sum(groups))
remainder_share = math.floor(remainder/len(groups))
final_remainder = remainder - (remainder_share * (len(groups)-1))
for group_state in group_states[:-1]:
group_state.append((colour2, remainder_share))
group_states[-1].append((colour2, final_remainder))
return list(itertools.chain.from_iterable(group_states))
def single_flash(flash_count, colour1, colour2, period1, period2):
return [(colour1, period1), (colour2, period2)] * flash_count
def fixed(_groups, colour, _period):
"""
The Fixed pattern is simply an always-on light in the given colour.
groups and period are irrelevant.
"""
return [(colour, 1)]
def flash(groups, colour, period):
"""
A flash is a single colour displayed for a short period, followed by
a longer period of darkness
A single flash of a given colour is a 1 second flash
>>> flash([1], 'R', 5000)
[('R', 1000), ('Off', 4000)]
Grouped flashes have a shorter duration
>>> flash([3], 'R', 10000)
[('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\
('Off', 1000), ('Off', 5500)]
Composite groups are separated by an even period of darkness
>>> flash([3, 1], 'R', 10000)
[('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\
('Off', 1000), ('Off', 2000), ('R', 500), ('Off', 1000), ('Off', 2000)]
The total duration of all states matches the requested period
>>> sum((state[1] for state in flash([1], 'R', 5000))) == 5000
True
"""
if groups == [1]:
if period <= 2000:
raise ValueError(
"The cycle period for a flash must be longer than 2 seconds"
)
return [
(colour, 1000),
('Off', period-1000)
]
return light_sequence(groups, colour, 'Off', period, 500, 1000)
def long_flash(groups, colour, period):
"""A Long flash is at least 2 seconds"""
if groups == [1]:
return [
(colour, 2000),
('Off', period - 2000)
]
return light_sequence(groups, colour, 'Off', period, 2000, 3000)
def isophase(_groups, colour, period):
"""
isophase is a pattern with equal dark and light. There are no groups.
"""
# Whole numbers are required, so odd numbers are dealt with by loading
# the spare into the off period.
# As this is in milliseconds, this will be imperceptible.
# It is also unlikely, as the top-level input is in seconds
# and has been multiplied up to milliseconds before reaching this
# function
return [
(colour, math.floor(period/2)),
('Off', math.ceil(period/2))
]
def occulting(groups, colour, period):
"""
An occulting pattern is the opposite of a flash - dark with longer light
"""
if groups == [1]:
return [
('Off', 1000),
(colour, period - 1000)
]
return light_sequence(groups, 'Off', colour, period, 500, 1000)
def quick(groups, colour, period):
"""
A Quick flash is more than 50 per minute.
"""
# The cycle period cannot be longer than 1.2s (60/50)
# or shorter than 0.5s
if groups == [1]:
if period is not None:
raise ValueError(
"Quick Flash cycle periods must be longer than 0.5 seconds"
)
return [
(colour, 250),
('Off', 750)
]
return light_sequence(groups, 'Off', colour, period, 250, 500)
TYPES = {
'f': fixed,
'fl': flash,
'q': quick,
'lfl': long_flash,
'iso': isophase,
'oc': occulting
}
| """
Given the split up characteristic, return the period in milliseconds
The period is specified in seconds
>>> parse_period(['2'])
2000
The letter 's' to mark the units may be present
>>> parse_period(['3s'])
3000
It may be separated from the number by a space
>>> parse_period(['4','s'])
4000
A Quick flash can only have a period if it has groups
>>> parse_period(['3s'])
3000
"""
period_spec = fragments[-1]
# The last term is the cycle period,
# it may or may not have 's' for seconds
# The 's' may or may not be attached to the number
if period_spec == 's':
period_spec = fragments[-2]
if period_spec[-1] == 's':
period_spec = period_spec[:-1]
return int(float(period_spec) * 1000) | identifier_body |
adapter_test.go | // Copyright (c) 2012-today The upper.io/db authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//go:generate bash -c "sed s/ADAPTER/postgresql/g ../internal/sqladapter/testing/adapter.go.tpl > generated_test.go"
package postgresql
import (
"database/sql"
"database/sql/driver"
"fmt"
"math/rand"
"os"
"sync"
"testing"
"github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
"upper.io/db.v3"
"upper.io/db.v3/internal/sqladapter"
"upper.io/db.v3/lib/sqlbuilder"
)
const (
testTimeZone = "Canada/Eastern"
)
var settings = ConnectionURL{
Database: os.Getenv("DB_NAME"),
User: os.Getenv("DB_USERNAME"),
Password: os.Getenv("DB_PASSWORD"),
Host: os.Getenv("DB_HOST") + ":" + os.Getenv("DB_PORT"),
Options: map[string]string{
"timezone": testTimeZone,
},
}
func tearUp() error {
sess := mustOpen()
defer sess.Close()
batch := []string{
`DROP TABLE IF EXISTS artist`,
`CREATE TABLE artist (
id serial primary key,
name varchar(60)
)`,
`DROP TABLE IF EXISTS publication`,
`CREATE TABLE publication (
id serial primary key,
title varchar(80),
author_id integer
)`,
`DROP TABLE IF EXISTS review`,
`CREATE TABLE review (
id serial primary key,
publication_id integer,
name varchar(80),
comments text,
created timestamp without time zone
)`,
`DROP TABLE IF EXISTS data_types`,
`CREATE TABLE data_types (
id serial primary key,
_uint integer,
_uint8 integer,
_uint16 integer,
_uint32 integer,
_uint64 integer,
_int integer,
_int8 integer,
_int16 integer,
_int32 integer,
_int64 integer,
_float32 numeric(10,6),
_float64 numeric(10,6),
_bool boolean,
_string text,
_date timestamp with time zone,
_nildate timestamp without time zone null,
_ptrdate timestamp without time zone,
_defaultdate timestamp without time zone DEFAULT now(),
_time bigint
)`,
`DROP TABLE IF EXISTS stats_test`,
`CREATE TABLE stats_test (
id serial primary key,
numeric integer,
value integer
)`,
`DROP TABLE IF EXISTS composite_keys`,
`CREATE TABLE composite_keys (
code varchar(255) default '',
user_id varchar(255) default '',
some_val varchar(255) default '',
primary key (code, user_id)
)`,
`DROP TABLE IF EXISTS option_types`,
`CREATE TABLE option_types (
id serial primary key,
name varchar(255) default '',
tags varchar(64)[],
settings jsonb
)`,
`DROP TABLE IF EXISTS test_schema.test`,
`DROP SCHEMA IF EXISTS test_schema`,
`CREATE SCHEMA test_schema`,
`CREATE TABLE test_schema.test (id integer)`,
`DROP TABLE IF EXISTS pg_types`,
`CREATE TABLE pg_types (
id serial primary key,
auto_integer_array integer[],
auto_string_array text[],
auto_integer_array_ptr integer[],
auto_string_array_ptr text[],
integer_array integer[],
string_value varchar(255),
auto_jsonb jsonb,
auto_jsonb_map jsonb,
auto_jsonb_array jsonb,
custom_jsonb jsonb,
auto_jsonb_ptr jsonb,
integer_valuer_value smallint[],
string_array text[],
field1 int,
field2 varchar(64),
field3 decimal
)`,
`DROP TABLE IF EXISTS issue_370`,
`CREATE TABLE issue_370 (
id UUID PRIMARY KEY,
name VARCHAR(25)
)`,
`DROP TABLE IF EXISTS issue_370_2`,
`CREATE TABLE issue_370_2 (
id INTEGER[3] PRIMARY KEY,
name VARCHAR(25)
)`,
}
for _, s := range batch {
driver := sess.Driver().(*sql.DB)
if _, err := driver.Exec(s); err != nil {
return err
}
}
return nil
}
type customJSONB struct {
N string `json:"name"`
V float64 `json:"value"`
}
func (c customJSONB) Value() (driver.Value, error) {
return EncodeJSONB(c)
}
func (c *customJSONB) Scan(src interface{}) error {
return DecodeJSONB(c, src)
}
var (
_ = driver.Valuer(&customJSONB{})
_ = sql.Scanner(&customJSONB{})
)
func testPostgreSQLTypes(t *testing.T, sess sqlbuilder.Database) {
type PGType struct {
ID int64 `db:"id,omitempty"`
IntegerArray Int64Array `db:"integer_array"`
StringValue *string `db:"string_value,omitempty"`
StringArray StringArray `db:"string_array"`
Field1 *int64 `db:"field1,omitempty"`
Field2 *string `db:"field2,omitempty"`
Field3 *float64 `db:"field3,omitempty"`
AutoIntegerArray Int64Array `db:"auto_integer_array"`
AutoStringArray StringArray `db:"auto_string_array"`
AutoJSONB JSONB `db:"auto_jsonb"`
AutoJSONBMap JSONBMap `db:"auto_jsonb_map"`
AutoJSONBArray JSONBArray `db:"auto_jsonb_array"`
CustomJSONB customJSONB `db:"custom_jsonb"`
AutoIntegerArrayPtr *Int64Array `db:"auto_integer_array_ptr,omitempty"`
AutoStringArrayPtr *StringArray `db:"auto_string_array_ptr,omitempty"`
AutoJSONBPtr *JSONB `db:"auto_jsonb_ptr,omitempty"`
}
field1 := int64(10)
field2 := string("ten")
field3 := float64(10.0)
testValue := "Hello world!"
origPgTypeTests := []PGType{
PGType{
Field1: &field1,
Field2: &field2,
Field3: &field3,
},
PGType{
IntegerArray: []int64{1, 2, 3, 4},
},
PGType{
AutoIntegerArray: Int64Array{1, 2, 3, 4},
AutoIntegerArrayPtr: nil,
},
PGType{
AutoJSONBMap: JSONBMap{
"Hello": "world",
"Roses": "red",
},
AutoJSONBArray: JSONBArray{float64(1), float64(2), float64(3), float64(4)},
},
PGType{
AutoIntegerArray: nil,
AutoIntegerArrayPtr: &Int64Array{4, 5, 6, 7},
},
PGType{
AutoJSONBMap: JSONBMap{},
AutoJSONBArray: JSONBArray{},
},
PGType{
AutoJSONBMap: JSONBMap(nil),
AutoJSONBArray: JSONBArray(nil),
},
PGType{
AutoStringArray: StringArray{"aaa", "bbb", "ccc"},
AutoStringArrayPtr: nil,
},
PGType{
AutoStringArray: nil,
AutoStringArrayPtr: &StringArray{"ddd", "eee", "ffff"},
},
PGType{
AutoJSONB: JSONB{map[string]interface{}{"hello": "world!"}},
AutoJSONBPtr: nil,
},
PGType{
AutoJSONB: JSONB{nil},
AutoJSONBPtr: &JSONB{[]interface{}{float64(9), float64(9), float64(9)}},
},
PGType{
IntegerArray: []int64{1, 2, 3, 4},
StringArray: []string{"a", "boo", "bar"},
},
PGType{
Field2: &field2,
Field3: &field3,
},
PGType{
IntegerArray: []int64{},
},
PGType{
StringArray: []string{},
},
PGType{
IntegerArray: []int64{},
StringArray: []string{},
},
PGType{},
PGType{
IntegerArray: []int64{1},
StringArray: []string{"a"},
},
PGType{
IntegerArray: []int64{0, 0, 0, 0},
StringValue: &testValue,
CustomJSONB: customJSONB{
N: "Hello",
},
StringArray: []string{"", "", "", ``, `""`},
},
PGType{
StringValue: &testValue,
},
PGType{
Field1: &field1,
CustomJSONB: customJSONB{
V: 4.4,
},
},
PGType{
StringArray: []string{"a", "boo", "bar"},
},
PGType{
StringArray: []string{"a", "boo", "bar", `""`},
CustomJSONB: customJSONB{},
},
PGType{
IntegerArray: []int64{0},
StringArray: []string{""},
},
PGType{
CustomJSONB: customJSONB{
N: "Peter",
V: 5.56,
},
},
}
for i := 0; i < 100; i++ {
pgTypeTests := make([]PGType, len(origPgTypeTests))
perm := rand.Perm(len(origPgTypeTests))
for i, v := range perm {
pgTypeTests[v] = origPgTypeTests[i]
}
for i := range pgTypeTests {
id, err := sess.Collection("pg_types").Insert(pgTypeTests[i])
assert.NoError(t, err)
var actual PGType
err = sess.Collection("pg_types").Find(id).One(&actual)
assert.NoError(t, err)
expected := pgTypeTests[i]
expected.ID = id.(int64)
assert.Equal(t, expected, actual)
}
for i := range pgTypeTests {
row, err := sess.InsertInto("pg_types").Values(pgTypeTests[i]).Returning("id").QueryRow()
assert.NoError(t, err)
var id int64
err = row.Scan(&id)
assert.NoError(t, err)
var actual PGType
err = sess.Collection("pg_types").Find(id).One(&actual)
assert.NoError(t, err)
expected := pgTypeTests[i]
expected.ID = id
assert.Equal(t, expected, actual)
var actual2 PGType
err = sess.SelectFrom("pg_types").Where("id = ?", id).One(&actual2)
assert.NoError(t, err)
assert.Equal(t, expected, actual2)
}
inserter := sess.InsertInto("pg_types")
for i := range pgTypeTests {
inserter = inserter.Values(pgTypeTests[i])
}
_, err := inserter.Exec()
assert.NoError(t, err)
err = sess.Collection("pg_types").Truncate()
assert.NoError(t, err)
batch := sess.InsertInto("pg_types").Batch(50)
go func() {
defer batch.Done()
for i := range pgTypeTests {
batch.Values(pgTypeTests[i])
}
}()
err = batch.Wait()
assert.NoError(t, err)
var values []PGType
err = sess.SelectFrom("pg_types").All(&values)
assert.NoError(t, err)
for i := range values {
expected := pgTypeTests[i]
expected.ID = values[i].ID
assert.Equal(t, expected, values[i])
}
}
}
func TestOptionTypes(t *testing.T) {
sess := mustOpen()
defer sess.Close()
optionTypes := sess.Collection("option_types")
err := optionTypes.Truncate()
assert.NoError(t, err)
// TODO: lets do some benchmarking on these auto-wrapped option types..
// TODO: add nullable jsonb field mapped to a []string
// A struct with wrapped option types defined in the struct tags
// for postgres string array and jsonb types
type optionType struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings JSONBMap `db:"settings"`
}
// Item 1
item1 := optionType{
Name: "Food",
Tags: []string{"toronto", "pizza"},
Settings: JSONBMap{"a": 1, "b": 2},
}
id, err := optionTypes.Insert(item1)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1Chk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1Chk)
assert.NoError(t, err)
assert.Equal(t, float64(1), item1Chk.Settings["a"])
assert.Equal(t, "toronto", item1Chk.Tags[0])
// Item 1 B
item1b := &optionType{
Name: "Golang",
Tags: []string{"love", "it"},
Settings: map[string]interface{}{"go": 1, "lang": 2},
}
id, err = optionTypes.Insert(item1b)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1bChk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1bChk)
assert.NoError(t, err)
assert.Equal(t, float64(1), item1bChk.Settings["go"])
assert.Equal(t, "love", item1bChk.Tags[0])
// Item 1 C
item1c := &optionType{
Name: "Sup", Tags: []string{}, Settings: map[string]interface{}{},
}
id, err = optionTypes.Insert(item1c)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1cChk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1cChk)
assert.NoError(t, err)
assert.Zero(t, len(item1cChk.Tags))
assert.Zero(t, len(item1cChk.Settings))
// An option type to pointer jsonb field
type optionType2 struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings *JSONBMap `db:"settings"`
}
item2 := optionType2{
Name: "JS", Tags: []string{"hi", "bye"}, Settings: nil,
}
id, err = optionTypes.Insert(item2)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item2Chk optionType2
res := optionTypes.Find(db.Cond{"id": id})
err = res.One(&item2Chk)
assert.NoError(t, err)
assert.Equal(t, id.(int64), item2Chk.ID)
assert.Equal(t, item2Chk.Name, item2.Name)
assert.Equal(t, item2Chk.Tags[0], item2.Tags[0])
assert.Equal(t, len(item2Chk.Tags), len(item2.Tags))
// Update the value
m := JSONBMap{}
m["lang"] = "javascript"
m["num"] = 31337
item2.Settings = &m
err = res.Update(item2)
assert.NoError(t, err)
err = res.One(&item2Chk)
assert.NoError(t, err)
assert.Equal(t, float64(31337), (*item2Chk.Settings)["num"].(float64))
assert.Equal(t, "javascript", (*item2Chk.Settings)["lang"])
// An option type to pointer string array field
type optionType3 struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags *StringArray `db:"tags"`
Settings JSONBMap `db:"settings"`
}
item3 := optionType3{
Name: "Julia",
Tags: nil,
Settings: JSONBMap{"girl": true, "lang": true},
}
id, err = optionTypes.Insert(item3)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item3Chk optionType2
err = optionTypes.Find(db.Cond{"id": id}).One(&item3Chk)
assert.NoError(t, err)
}
type Settings struct {
Name string `json:"name"`
Num int64 `json:"num"`
}
func (s *Settings) Scan(src interface{}) error {
return DecodeJSONB(s, src)
}
func (s Settings) Value() (driver.Value, error) {
return EncodeJSONB(s)
}
func TestOptionTypeJsonbStruct(t *testing.T) {
sess := mustOpen()
defer sess.Close()
optionTypes := sess.Collection("option_types")
err := optionTypes.Truncate()
assert.NoError(t, err)
type OptionType struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings Settings `db:"settings"`
}
item1 := &OptionType{
Name: "Hi",
Tags: []string{"aah", "ok"},
Settings: Settings{Name: "a", Num: 123},
}
id, err := optionTypes.Insert(item1)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1Chk OptionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1Chk)
assert.NoError(t, err)
assert.Equal(t, 2, len(item1Chk.Tags))
assert.Equal(t, "aah", item1Chk.Tags[0])
assert.Equal(t, "a", item1Chk.Settings.Name)
assert.Equal(t, int64(123), item1Chk.Settings.Num)
}
func TestSchemaCollection(t *testing.T) {
sess := mustOpen()
defer sess.Close()
col := sess.Collection("test_schema.test")
_, err := col.Insert(map[string]int{"id": 9})
assert.Equal(t, nil, err)
var dump []map[string]int
err = col.Find().All(&dump)
assert.Nil(t, err)
assert.Equal(t, 1, len(dump))
assert.Equal(t, 9, dump[0]["id"])
}
func TestMaxOpenConns_Issue340(t *testing.T) {
sess := mustOpen()
defer sess.Close()
sess.SetMaxOpenConns(5)
var wg sync.WaitGroup
for i := 0; i < 30; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
_, err := sess.Exec(fmt.Sprintf(`SELECT pg_sleep(1.%d)`, i))
if err != nil {
t.Fatal(err)
}
}(i)
}
wg.Wait()
sess.SetMaxOpenConns(0)
}
func TestUUIDInsert_Issue370(t *testing.T) {
sess := mustOpen()
defer sess.Close()
{
type itemT struct {
ID *uuid.UUID `db:"id"`
Name string `db:"name"`
}
newUUID := uuid.NewV4()
item1 := itemT{
ID: &newUUID,
Name: "Jonny",
}
col := sess.Collection("issue_370")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
{
type itemT struct {
ID uuid.UUID `db:"id"`
Name string `db:"name"`
}
item1 := itemT{
ID: uuid.NewV4(),
Name: "Jonny",
}
col := sess.Collection("issue_370")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
{
type itemT struct {
ID Int64Array `db:"id"`
Name string `db:"name"`
}
item1 := itemT{
ID: Int64Array{1, 2, 3},
Name: "Vojtech",
}
col := sess.Collection("issue_370_2")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
}
func TestEscapeQuestionMark(t *testing.T) {
sess := mustOpen()
defer sess.Close()
var val bool
{
res, err := sess.QueryRow(`SELECT '{"mykey":["val1", "val2"]}'::jsonb->'mykey' ?? ?`, "val2")
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
{
res, err := sess.QueryRow(`SELECT ?::jsonb->'mykey' ?? ?`, `{"mykey":["val1", "val2"]}`, `val2`)
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
{
res, err := sess.QueryRow(`SELECT ?::jsonb->? ?? ?`, `{"mykey":["val1", "val2"]}`, `mykey`, `val2`)
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
}
func TestTextMode_Issue391(t *testing.T) {
sess := mustOpen()
defer sess.Close()
testPostgreSQLTypes(t, sess)
}
func TestBinaryMode_Issue391(t *testing.T) {
settingsWithBinaryMode := settings
settingsWithBinaryMode.Options["binary_parameters"] = "yes"
sess, err := Open(settingsWithBinaryMode)
if err != nil {
t.Fatal(err)
}
defer sess.Close()
testPostgreSQLTypes(t, sess)
}
func getStats(sess sqlbuilder.Database) (map[string]int, error) {
stats := make(map[string]int)
row := sess.Driver().(*sql.DB).QueryRow(`SELECT count(1) AS value FROM pg_prepared_statements`)
var value int
err := row.Scan(&value)
if err != nil {
return nil, err
}
stats["pg_prepared_statements_count"] = value
return stats, nil
}
func | (sess sqlbuilder.Database) (err error) {
var stats map[string]int
stats, err = getStats(sess)
if err != nil {
return err
}
if activeStatements := sqladapter.NumActiveStatements(); activeStatements > 128 {
return fmt.Errorf("Expecting active statements to be at most 128, got %d", activeStatements)
}
sess.ClearCache()
stats, err = getStats(sess)
if err != nil {
return err
}
if stats["pg_prepared_statements_count"] != 0 {
return fmt.Errorf(`Expecting "Prepared_stmt_count" to be 0, got %d`, stats["Prepared_stmt_count"])
}
return nil
}
| cleanUpCheck | identifier_name |
adapter_test.go | // Copyright (c) 2012-today The upper.io/db authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//go:generate bash -c "sed s/ADAPTER/postgresql/g ../internal/sqladapter/testing/adapter.go.tpl > generated_test.go"
package postgresql
import (
"database/sql"
"database/sql/driver"
"fmt"
"math/rand"
"os"
"sync"
"testing"
"github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
"upper.io/db.v3"
"upper.io/db.v3/internal/sqladapter"
"upper.io/db.v3/lib/sqlbuilder"
)
const (
testTimeZone = "Canada/Eastern"
)
var settings = ConnectionURL{
Database: os.Getenv("DB_NAME"),
User: os.Getenv("DB_USERNAME"),
Password: os.Getenv("DB_PASSWORD"),
Host: os.Getenv("DB_HOST") + ":" + os.Getenv("DB_PORT"),
Options: map[string]string{
"timezone": testTimeZone,
},
}
func tearUp() error {
sess := mustOpen()
defer sess.Close()
batch := []string{
`DROP TABLE IF EXISTS artist`,
`CREATE TABLE artist (
id serial primary key,
name varchar(60)
)`,
`DROP TABLE IF EXISTS publication`,
`CREATE TABLE publication (
id serial primary key,
title varchar(80),
author_id integer
)`,
`DROP TABLE IF EXISTS review`,
`CREATE TABLE review (
id serial primary key,
publication_id integer,
name varchar(80),
comments text,
created timestamp without time zone
)`,
`DROP TABLE IF EXISTS data_types`,
`CREATE TABLE data_types (
id serial primary key,
_uint integer,
_uint8 integer,
_uint16 integer,
_uint32 integer,
_uint64 integer,
_int integer,
_int8 integer,
_int16 integer,
_int32 integer,
_int64 integer,
_float32 numeric(10,6),
_float64 numeric(10,6),
_bool boolean,
_string text,
_date timestamp with time zone,
_nildate timestamp without time zone null,
_ptrdate timestamp without time zone,
_defaultdate timestamp without time zone DEFAULT now(),
_time bigint
)`,
`DROP TABLE IF EXISTS stats_test`,
`CREATE TABLE stats_test (
id serial primary key,
numeric integer,
value integer
)`,
`DROP TABLE IF EXISTS composite_keys`,
`CREATE TABLE composite_keys (
code varchar(255) default '',
user_id varchar(255) default '',
some_val varchar(255) default '',
primary key (code, user_id)
)`,
`DROP TABLE IF EXISTS option_types`,
`CREATE TABLE option_types (
id serial primary key,
name varchar(255) default '',
tags varchar(64)[],
settings jsonb
)`,
`DROP TABLE IF EXISTS test_schema.test`,
`DROP SCHEMA IF EXISTS test_schema`,
`CREATE SCHEMA test_schema`,
`CREATE TABLE test_schema.test (id integer)`,
`DROP TABLE IF EXISTS pg_types`,
`CREATE TABLE pg_types (
id serial primary key,
auto_integer_array integer[],
auto_string_array text[],
auto_integer_array_ptr integer[],
auto_string_array_ptr text[],
integer_array integer[],
string_value varchar(255),
auto_jsonb jsonb,
auto_jsonb_map jsonb,
auto_jsonb_array jsonb,
custom_jsonb jsonb,
auto_jsonb_ptr jsonb,
integer_valuer_value smallint[],
string_array text[],
field1 int,
field2 varchar(64),
field3 decimal
)`,
`DROP TABLE IF EXISTS issue_370`,
`CREATE TABLE issue_370 (
id UUID PRIMARY KEY,
name VARCHAR(25)
)`,
`DROP TABLE IF EXISTS issue_370_2`,
`CREATE TABLE issue_370_2 (
id INTEGER[3] PRIMARY KEY,
name VARCHAR(25)
)`,
}
for _, s := range batch {
driver := sess.Driver().(*sql.DB)
if _, err := driver.Exec(s); err != nil {
return err
}
}
return nil
}
type customJSONB struct {
N string `json:"name"`
V float64 `json:"value"`
}
func (c customJSONB) Value() (driver.Value, error) {
return EncodeJSONB(c)
}
func (c *customJSONB) Scan(src interface{}) error {
return DecodeJSONB(c, src)
}
var (
_ = driver.Valuer(&customJSONB{})
_ = sql.Scanner(&customJSONB{})
)
func testPostgreSQLTypes(t *testing.T, sess sqlbuilder.Database) {
type PGType struct {
ID int64 `db:"id,omitempty"`
IntegerArray Int64Array `db:"integer_array"`
StringValue *string `db:"string_value,omitempty"`
StringArray StringArray `db:"string_array"`
Field1 *int64 `db:"field1,omitempty"`
Field2 *string `db:"field2,omitempty"`
Field3 *float64 `db:"field3,omitempty"`
AutoIntegerArray Int64Array `db:"auto_integer_array"`
AutoStringArray StringArray `db:"auto_string_array"`
AutoJSONB JSONB `db:"auto_jsonb"`
AutoJSONBMap JSONBMap `db:"auto_jsonb_map"`
AutoJSONBArray JSONBArray `db:"auto_jsonb_array"`
CustomJSONB customJSONB `db:"custom_jsonb"`
AutoIntegerArrayPtr *Int64Array `db:"auto_integer_array_ptr,omitempty"`
AutoStringArrayPtr *StringArray `db:"auto_string_array_ptr,omitempty"`
AutoJSONBPtr *JSONB `db:"auto_jsonb_ptr,omitempty"`
}
field1 := int64(10)
field2 := string("ten")
field3 := float64(10.0)
testValue := "Hello world!"
origPgTypeTests := []PGType{
PGType{
Field1: &field1,
Field2: &field2,
Field3: &field3,
},
PGType{
IntegerArray: []int64{1, 2, 3, 4},
},
PGType{
AutoIntegerArray: Int64Array{1, 2, 3, 4},
AutoIntegerArrayPtr: nil,
},
PGType{
AutoJSONBMap: JSONBMap{
"Hello": "world",
"Roses": "red",
},
AutoJSONBArray: JSONBArray{float64(1), float64(2), float64(3), float64(4)},
},
PGType{
AutoIntegerArray: nil,
AutoIntegerArrayPtr: &Int64Array{4, 5, 6, 7},
},
PGType{
AutoJSONBMap: JSONBMap{},
AutoJSONBArray: JSONBArray{},
},
PGType{
AutoJSONBMap: JSONBMap(nil),
AutoJSONBArray: JSONBArray(nil),
},
PGType{
AutoStringArray: StringArray{"aaa", "bbb", "ccc"},
AutoStringArrayPtr: nil,
},
PGType{
AutoStringArray: nil,
AutoStringArrayPtr: &StringArray{"ddd", "eee", "ffff"},
},
PGType{
AutoJSONB: JSONB{map[string]interface{}{"hello": "world!"}},
AutoJSONBPtr: nil,
},
PGType{
AutoJSONB: JSONB{nil},
AutoJSONBPtr: &JSONB{[]interface{}{float64(9), float64(9), float64(9)}},
},
PGType{
IntegerArray: []int64{1, 2, 3, 4},
StringArray: []string{"a", "boo", "bar"},
},
PGType{
Field2: &field2,
Field3: &field3,
},
PGType{
IntegerArray: []int64{},
},
PGType{
StringArray: []string{}, | PGType{},
PGType{
IntegerArray: []int64{1},
StringArray: []string{"a"},
},
PGType{
IntegerArray: []int64{0, 0, 0, 0},
StringValue: &testValue,
CustomJSONB: customJSONB{
N: "Hello",
},
StringArray: []string{"", "", "", ``, `""`},
},
PGType{
StringValue: &testValue,
},
PGType{
Field1: &field1,
CustomJSONB: customJSONB{
V: 4.4,
},
},
PGType{
StringArray: []string{"a", "boo", "bar"},
},
PGType{
StringArray: []string{"a", "boo", "bar", `""`},
CustomJSONB: customJSONB{},
},
PGType{
IntegerArray: []int64{0},
StringArray: []string{""},
},
PGType{
CustomJSONB: customJSONB{
N: "Peter",
V: 5.56,
},
},
}
for i := 0; i < 100; i++ {
pgTypeTests := make([]PGType, len(origPgTypeTests))
perm := rand.Perm(len(origPgTypeTests))
for i, v := range perm {
pgTypeTests[v] = origPgTypeTests[i]
}
for i := range pgTypeTests {
id, err := sess.Collection("pg_types").Insert(pgTypeTests[i])
assert.NoError(t, err)
var actual PGType
err = sess.Collection("pg_types").Find(id).One(&actual)
assert.NoError(t, err)
expected := pgTypeTests[i]
expected.ID = id.(int64)
assert.Equal(t, expected, actual)
}
for i := range pgTypeTests {
row, err := sess.InsertInto("pg_types").Values(pgTypeTests[i]).Returning("id").QueryRow()
assert.NoError(t, err)
var id int64
err = row.Scan(&id)
assert.NoError(t, err)
var actual PGType
err = sess.Collection("pg_types").Find(id).One(&actual)
assert.NoError(t, err)
expected := pgTypeTests[i]
expected.ID = id
assert.Equal(t, expected, actual)
var actual2 PGType
err = sess.SelectFrom("pg_types").Where("id = ?", id).One(&actual2)
assert.NoError(t, err)
assert.Equal(t, expected, actual2)
}
inserter := sess.InsertInto("pg_types")
for i := range pgTypeTests {
inserter = inserter.Values(pgTypeTests[i])
}
_, err := inserter.Exec()
assert.NoError(t, err)
err = sess.Collection("pg_types").Truncate()
assert.NoError(t, err)
batch := sess.InsertInto("pg_types").Batch(50)
go func() {
defer batch.Done()
for i := range pgTypeTests {
batch.Values(pgTypeTests[i])
}
}()
err = batch.Wait()
assert.NoError(t, err)
var values []PGType
err = sess.SelectFrom("pg_types").All(&values)
assert.NoError(t, err)
for i := range values {
expected := pgTypeTests[i]
expected.ID = values[i].ID
assert.Equal(t, expected, values[i])
}
}
}
func TestOptionTypes(t *testing.T) {
sess := mustOpen()
defer sess.Close()
optionTypes := sess.Collection("option_types")
err := optionTypes.Truncate()
assert.NoError(t, err)
// TODO: lets do some benchmarking on these auto-wrapped option types..
// TODO: add nullable jsonb field mapped to a []string
// A struct with wrapped option types defined in the struct tags
// for postgres string array and jsonb types
type optionType struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings JSONBMap `db:"settings"`
}
// Item 1
item1 := optionType{
Name: "Food",
Tags: []string{"toronto", "pizza"},
Settings: JSONBMap{"a": 1, "b": 2},
}
id, err := optionTypes.Insert(item1)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1Chk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1Chk)
assert.NoError(t, err)
assert.Equal(t, float64(1), item1Chk.Settings["a"])
assert.Equal(t, "toronto", item1Chk.Tags[0])
// Item 1 B
item1b := &optionType{
Name: "Golang",
Tags: []string{"love", "it"},
Settings: map[string]interface{}{"go": 1, "lang": 2},
}
id, err = optionTypes.Insert(item1b)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1bChk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1bChk)
assert.NoError(t, err)
assert.Equal(t, float64(1), item1bChk.Settings["go"])
assert.Equal(t, "love", item1bChk.Tags[0])
// Item 1 C
item1c := &optionType{
Name: "Sup", Tags: []string{}, Settings: map[string]interface{}{},
}
id, err = optionTypes.Insert(item1c)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1cChk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1cChk)
assert.NoError(t, err)
assert.Zero(t, len(item1cChk.Tags))
assert.Zero(t, len(item1cChk.Settings))
// An option type to pointer jsonb field
type optionType2 struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings *JSONBMap `db:"settings"`
}
item2 := optionType2{
Name: "JS", Tags: []string{"hi", "bye"}, Settings: nil,
}
id, err = optionTypes.Insert(item2)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item2Chk optionType2
res := optionTypes.Find(db.Cond{"id": id})
err = res.One(&item2Chk)
assert.NoError(t, err)
assert.Equal(t, id.(int64), item2Chk.ID)
assert.Equal(t, item2Chk.Name, item2.Name)
assert.Equal(t, item2Chk.Tags[0], item2.Tags[0])
assert.Equal(t, len(item2Chk.Tags), len(item2.Tags))
// Update the value
m := JSONBMap{}
m["lang"] = "javascript"
m["num"] = 31337
item2.Settings = &m
err = res.Update(item2)
assert.NoError(t, err)
err = res.One(&item2Chk)
assert.NoError(t, err)
assert.Equal(t, float64(31337), (*item2Chk.Settings)["num"].(float64))
assert.Equal(t, "javascript", (*item2Chk.Settings)["lang"])
// An option type to pointer string array field
type optionType3 struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags *StringArray `db:"tags"`
Settings JSONBMap `db:"settings"`
}
item3 := optionType3{
Name: "Julia",
Tags: nil,
Settings: JSONBMap{"girl": true, "lang": true},
}
id, err = optionTypes.Insert(item3)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item3Chk optionType2
err = optionTypes.Find(db.Cond{"id": id}).One(&item3Chk)
assert.NoError(t, err)
}
type Settings struct {
Name string `json:"name"`
Num int64 `json:"num"`
}
func (s *Settings) Scan(src interface{}) error {
return DecodeJSONB(s, src)
}
func (s Settings) Value() (driver.Value, error) {
return EncodeJSONB(s)
}
func TestOptionTypeJsonbStruct(t *testing.T) {
sess := mustOpen()
defer sess.Close()
optionTypes := sess.Collection("option_types")
err := optionTypes.Truncate()
assert.NoError(t, err)
type OptionType struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings Settings `db:"settings"`
}
item1 := &OptionType{
Name: "Hi",
Tags: []string{"aah", "ok"},
Settings: Settings{Name: "a", Num: 123},
}
id, err := optionTypes.Insert(item1)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1Chk OptionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1Chk)
assert.NoError(t, err)
assert.Equal(t, 2, len(item1Chk.Tags))
assert.Equal(t, "aah", item1Chk.Tags[0])
assert.Equal(t, "a", item1Chk.Settings.Name)
assert.Equal(t, int64(123), item1Chk.Settings.Num)
}
func TestSchemaCollection(t *testing.T) {
sess := mustOpen()
defer sess.Close()
col := sess.Collection("test_schema.test")
_, err := col.Insert(map[string]int{"id": 9})
assert.Equal(t, nil, err)
var dump []map[string]int
err = col.Find().All(&dump)
assert.Nil(t, err)
assert.Equal(t, 1, len(dump))
assert.Equal(t, 9, dump[0]["id"])
}
func TestMaxOpenConns_Issue340(t *testing.T) {
sess := mustOpen()
defer sess.Close()
sess.SetMaxOpenConns(5)
var wg sync.WaitGroup
for i := 0; i < 30; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
_, err := sess.Exec(fmt.Sprintf(`SELECT pg_sleep(1.%d)`, i))
if err != nil {
t.Fatal(err)
}
}(i)
}
wg.Wait()
sess.SetMaxOpenConns(0)
}
func TestUUIDInsert_Issue370(t *testing.T) {
sess := mustOpen()
defer sess.Close()
{
type itemT struct {
ID *uuid.UUID `db:"id"`
Name string `db:"name"`
}
newUUID := uuid.NewV4()
item1 := itemT{
ID: &newUUID,
Name: "Jonny",
}
col := sess.Collection("issue_370")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
{
type itemT struct {
ID uuid.UUID `db:"id"`
Name string `db:"name"`
}
item1 := itemT{
ID: uuid.NewV4(),
Name: "Jonny",
}
col := sess.Collection("issue_370")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
{
type itemT struct {
ID Int64Array `db:"id"`
Name string `db:"name"`
}
item1 := itemT{
ID: Int64Array{1, 2, 3},
Name: "Vojtech",
}
col := sess.Collection("issue_370_2")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
}
func TestEscapeQuestionMark(t *testing.T) {
sess := mustOpen()
defer sess.Close()
var val bool
{
res, err := sess.QueryRow(`SELECT '{"mykey":["val1", "val2"]}'::jsonb->'mykey' ?? ?`, "val2")
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
{
res, err := sess.QueryRow(`SELECT ?::jsonb->'mykey' ?? ?`, `{"mykey":["val1", "val2"]}`, `val2`)
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
{
res, err := sess.QueryRow(`SELECT ?::jsonb->? ?? ?`, `{"mykey":["val1", "val2"]}`, `mykey`, `val2`)
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
}
func TestTextMode_Issue391(t *testing.T) {
sess := mustOpen()
defer sess.Close()
testPostgreSQLTypes(t, sess)
}
func TestBinaryMode_Issue391(t *testing.T) {
settingsWithBinaryMode := settings
settingsWithBinaryMode.Options["binary_parameters"] = "yes"
sess, err := Open(settingsWithBinaryMode)
if err != nil {
t.Fatal(err)
}
defer sess.Close()
testPostgreSQLTypes(t, sess)
}
func getStats(sess sqlbuilder.Database) (map[string]int, error) {
stats := make(map[string]int)
row := sess.Driver().(*sql.DB).QueryRow(`SELECT count(1) AS value FROM pg_prepared_statements`)
var value int
err := row.Scan(&value)
if err != nil {
return nil, err
}
stats["pg_prepared_statements_count"] = value
return stats, nil
}
func cleanUpCheck(sess sqlbuilder.Database) (err error) {
var stats map[string]int
stats, err = getStats(sess)
if err != nil {
return err
}
if activeStatements := sqladapter.NumActiveStatements(); activeStatements > 128 {
return fmt.Errorf("Expecting active statements to be at most 128, got %d", activeStatements)
}
sess.ClearCache()
stats, err = getStats(sess)
if err != nil {
return err
}
if stats["pg_prepared_statements_count"] != 0 {
return fmt.Errorf(`Expecting "Prepared_stmt_count" to be 0, got %d`, stats["Prepared_stmt_count"])
}
return nil
} | },
PGType{
IntegerArray: []int64{},
StringArray: []string{},
}, | random_line_split |
adapter_test.go | // Copyright (c) 2012-today The upper.io/db authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//go:generate bash -c "sed s/ADAPTER/postgresql/g ../internal/sqladapter/testing/adapter.go.tpl > generated_test.go"
package postgresql
import (
"database/sql"
"database/sql/driver"
"fmt"
"math/rand"
"os"
"sync"
"testing"
"github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
"upper.io/db.v3"
"upper.io/db.v3/internal/sqladapter"
"upper.io/db.v3/lib/sqlbuilder"
)
const (
testTimeZone = "Canada/Eastern"
)
var settings = ConnectionURL{
Database: os.Getenv("DB_NAME"),
User: os.Getenv("DB_USERNAME"),
Password: os.Getenv("DB_PASSWORD"),
Host: os.Getenv("DB_HOST") + ":" + os.Getenv("DB_PORT"),
Options: map[string]string{
"timezone": testTimeZone,
},
}
func tearUp() error {
sess := mustOpen()
defer sess.Close()
batch := []string{
`DROP TABLE IF EXISTS artist`,
`CREATE TABLE artist (
id serial primary key,
name varchar(60)
)`,
`DROP TABLE IF EXISTS publication`,
`CREATE TABLE publication (
id serial primary key,
title varchar(80),
author_id integer
)`,
`DROP TABLE IF EXISTS review`,
`CREATE TABLE review (
id serial primary key,
publication_id integer,
name varchar(80),
comments text,
created timestamp without time zone
)`,
`DROP TABLE IF EXISTS data_types`,
`CREATE TABLE data_types (
id serial primary key,
_uint integer,
_uint8 integer,
_uint16 integer,
_uint32 integer,
_uint64 integer,
_int integer,
_int8 integer,
_int16 integer,
_int32 integer,
_int64 integer,
_float32 numeric(10,6),
_float64 numeric(10,6),
_bool boolean,
_string text,
_date timestamp with time zone,
_nildate timestamp without time zone null,
_ptrdate timestamp without time zone,
_defaultdate timestamp without time zone DEFAULT now(),
_time bigint
)`,
`DROP TABLE IF EXISTS stats_test`,
`CREATE TABLE stats_test (
id serial primary key,
numeric integer,
value integer
)`,
`DROP TABLE IF EXISTS composite_keys`,
`CREATE TABLE composite_keys (
code varchar(255) default '',
user_id varchar(255) default '',
some_val varchar(255) default '',
primary key (code, user_id)
)`,
`DROP TABLE IF EXISTS option_types`,
`CREATE TABLE option_types (
id serial primary key,
name varchar(255) default '',
tags varchar(64)[],
settings jsonb
)`,
`DROP TABLE IF EXISTS test_schema.test`,
`DROP SCHEMA IF EXISTS test_schema`,
`CREATE SCHEMA test_schema`,
`CREATE TABLE test_schema.test (id integer)`,
`DROP TABLE IF EXISTS pg_types`,
`CREATE TABLE pg_types (
id serial primary key,
auto_integer_array integer[],
auto_string_array text[],
auto_integer_array_ptr integer[],
auto_string_array_ptr text[],
integer_array integer[],
string_value varchar(255),
auto_jsonb jsonb,
auto_jsonb_map jsonb,
auto_jsonb_array jsonb,
custom_jsonb jsonb,
auto_jsonb_ptr jsonb,
integer_valuer_value smallint[],
string_array text[],
field1 int,
field2 varchar(64),
field3 decimal
)`,
`DROP TABLE IF EXISTS issue_370`,
`CREATE TABLE issue_370 (
id UUID PRIMARY KEY,
name VARCHAR(25)
)`,
`DROP TABLE IF EXISTS issue_370_2`,
`CREATE TABLE issue_370_2 (
id INTEGER[3] PRIMARY KEY,
name VARCHAR(25)
)`,
}
for _, s := range batch {
driver := sess.Driver().(*sql.DB)
if _, err := driver.Exec(s); err != nil {
return err
}
}
return nil
}
type customJSONB struct {
N string `json:"name"`
V float64 `json:"value"`
}
func (c customJSONB) Value() (driver.Value, error) {
return EncodeJSONB(c)
}
func (c *customJSONB) Scan(src interface{}) error {
return DecodeJSONB(c, src)
}
var (
_ = driver.Valuer(&customJSONB{})
_ = sql.Scanner(&customJSONB{})
)
func testPostgreSQLTypes(t *testing.T, sess sqlbuilder.Database) {
type PGType struct {
ID int64 `db:"id,omitempty"`
IntegerArray Int64Array `db:"integer_array"`
StringValue *string `db:"string_value,omitempty"`
StringArray StringArray `db:"string_array"`
Field1 *int64 `db:"field1,omitempty"`
Field2 *string `db:"field2,omitempty"`
Field3 *float64 `db:"field3,omitempty"`
AutoIntegerArray Int64Array `db:"auto_integer_array"`
AutoStringArray StringArray `db:"auto_string_array"`
AutoJSONB JSONB `db:"auto_jsonb"`
AutoJSONBMap JSONBMap `db:"auto_jsonb_map"`
AutoJSONBArray JSONBArray `db:"auto_jsonb_array"`
CustomJSONB customJSONB `db:"custom_jsonb"`
AutoIntegerArrayPtr *Int64Array `db:"auto_integer_array_ptr,omitempty"`
AutoStringArrayPtr *StringArray `db:"auto_string_array_ptr,omitempty"`
AutoJSONBPtr *JSONB `db:"auto_jsonb_ptr,omitempty"`
}
field1 := int64(10)
field2 := string("ten")
field3 := float64(10.0)
testValue := "Hello world!"
origPgTypeTests := []PGType{
PGType{
Field1: &field1,
Field2: &field2,
Field3: &field3,
},
PGType{
IntegerArray: []int64{1, 2, 3, 4},
},
PGType{
AutoIntegerArray: Int64Array{1, 2, 3, 4},
AutoIntegerArrayPtr: nil,
},
PGType{
AutoJSONBMap: JSONBMap{
"Hello": "world",
"Roses": "red",
},
AutoJSONBArray: JSONBArray{float64(1), float64(2), float64(3), float64(4)},
},
PGType{
AutoIntegerArray: nil,
AutoIntegerArrayPtr: &Int64Array{4, 5, 6, 7},
},
PGType{
AutoJSONBMap: JSONBMap{},
AutoJSONBArray: JSONBArray{},
},
PGType{
AutoJSONBMap: JSONBMap(nil),
AutoJSONBArray: JSONBArray(nil),
},
PGType{
AutoStringArray: StringArray{"aaa", "bbb", "ccc"},
AutoStringArrayPtr: nil,
},
PGType{
AutoStringArray: nil,
AutoStringArrayPtr: &StringArray{"ddd", "eee", "ffff"},
},
PGType{
AutoJSONB: JSONB{map[string]interface{}{"hello": "world!"}},
AutoJSONBPtr: nil,
},
PGType{
AutoJSONB: JSONB{nil},
AutoJSONBPtr: &JSONB{[]interface{}{float64(9), float64(9), float64(9)}},
},
PGType{
IntegerArray: []int64{1, 2, 3, 4},
StringArray: []string{"a", "boo", "bar"},
},
PGType{
Field2: &field2,
Field3: &field3,
},
PGType{
IntegerArray: []int64{},
},
PGType{
StringArray: []string{},
},
PGType{
IntegerArray: []int64{},
StringArray: []string{},
},
PGType{},
PGType{
IntegerArray: []int64{1},
StringArray: []string{"a"},
},
PGType{
IntegerArray: []int64{0, 0, 0, 0},
StringValue: &testValue,
CustomJSONB: customJSONB{
N: "Hello",
},
StringArray: []string{"", "", "", ``, `""`},
},
PGType{
StringValue: &testValue,
},
PGType{
Field1: &field1,
CustomJSONB: customJSONB{
V: 4.4,
},
},
PGType{
StringArray: []string{"a", "boo", "bar"},
},
PGType{
StringArray: []string{"a", "boo", "bar", `""`},
CustomJSONB: customJSONB{},
},
PGType{
IntegerArray: []int64{0},
StringArray: []string{""},
},
PGType{
CustomJSONB: customJSONB{
N: "Peter",
V: 5.56,
},
},
}
for i := 0; i < 100; i++ {
pgTypeTests := make([]PGType, len(origPgTypeTests))
perm := rand.Perm(len(origPgTypeTests))
for i, v := range perm {
pgTypeTests[v] = origPgTypeTests[i]
}
for i := range pgTypeTests {
id, err := sess.Collection("pg_types").Insert(pgTypeTests[i])
assert.NoError(t, err)
var actual PGType
err = sess.Collection("pg_types").Find(id).One(&actual)
assert.NoError(t, err)
expected := pgTypeTests[i]
expected.ID = id.(int64)
assert.Equal(t, expected, actual)
}
for i := range pgTypeTests {
row, err := sess.InsertInto("pg_types").Values(pgTypeTests[i]).Returning("id").QueryRow()
assert.NoError(t, err)
var id int64
err = row.Scan(&id)
assert.NoError(t, err)
var actual PGType
err = sess.Collection("pg_types").Find(id).One(&actual)
assert.NoError(t, err)
expected := pgTypeTests[i]
expected.ID = id
assert.Equal(t, expected, actual)
var actual2 PGType
err = sess.SelectFrom("pg_types").Where("id = ?", id).One(&actual2)
assert.NoError(t, err)
assert.Equal(t, expected, actual2)
}
inserter := sess.InsertInto("pg_types")
for i := range pgTypeTests {
inserter = inserter.Values(pgTypeTests[i])
}
_, err := inserter.Exec()
assert.NoError(t, err)
err = sess.Collection("pg_types").Truncate()
assert.NoError(t, err)
batch := sess.InsertInto("pg_types").Batch(50)
go func() {
defer batch.Done()
for i := range pgTypeTests {
batch.Values(pgTypeTests[i])
}
}()
err = batch.Wait()
assert.NoError(t, err)
var values []PGType
err = sess.SelectFrom("pg_types").All(&values)
assert.NoError(t, err)
for i := range values {
expected := pgTypeTests[i]
expected.ID = values[i].ID
assert.Equal(t, expected, values[i])
}
}
}
func TestOptionTypes(t *testing.T) {
sess := mustOpen()
defer sess.Close()
optionTypes := sess.Collection("option_types")
err := optionTypes.Truncate()
assert.NoError(t, err)
// TODO: lets do some benchmarking on these auto-wrapped option types..
// TODO: add nullable jsonb field mapped to a []string
// A struct with wrapped option types defined in the struct tags
// for postgres string array and jsonb types
type optionType struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings JSONBMap `db:"settings"`
}
// Item 1
item1 := optionType{
Name: "Food",
Tags: []string{"toronto", "pizza"},
Settings: JSONBMap{"a": 1, "b": 2},
}
id, err := optionTypes.Insert(item1)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1Chk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1Chk)
assert.NoError(t, err)
assert.Equal(t, float64(1), item1Chk.Settings["a"])
assert.Equal(t, "toronto", item1Chk.Tags[0])
// Item 1 B
item1b := &optionType{
Name: "Golang",
Tags: []string{"love", "it"},
Settings: map[string]interface{}{"go": 1, "lang": 2},
}
id, err = optionTypes.Insert(item1b)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1bChk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1bChk)
assert.NoError(t, err)
assert.Equal(t, float64(1), item1bChk.Settings["go"])
assert.Equal(t, "love", item1bChk.Tags[0])
// Item 1 C
item1c := &optionType{
Name: "Sup", Tags: []string{}, Settings: map[string]interface{}{},
}
id, err = optionTypes.Insert(item1c)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1cChk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1cChk)
assert.NoError(t, err)
assert.Zero(t, len(item1cChk.Tags))
assert.Zero(t, len(item1cChk.Settings))
// An option type to pointer jsonb field
type optionType2 struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings *JSONBMap `db:"settings"`
}
item2 := optionType2{
Name: "JS", Tags: []string{"hi", "bye"}, Settings: nil,
}
id, err = optionTypes.Insert(item2)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item2Chk optionType2
res := optionTypes.Find(db.Cond{"id": id})
err = res.One(&item2Chk)
assert.NoError(t, err)
assert.Equal(t, id.(int64), item2Chk.ID)
assert.Equal(t, item2Chk.Name, item2.Name)
assert.Equal(t, item2Chk.Tags[0], item2.Tags[0])
assert.Equal(t, len(item2Chk.Tags), len(item2.Tags))
// Update the value
m := JSONBMap{}
m["lang"] = "javascript"
m["num"] = 31337
item2.Settings = &m
err = res.Update(item2)
assert.NoError(t, err)
err = res.One(&item2Chk)
assert.NoError(t, err)
assert.Equal(t, float64(31337), (*item2Chk.Settings)["num"].(float64))
assert.Equal(t, "javascript", (*item2Chk.Settings)["lang"])
// An option type to pointer string array field
type optionType3 struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags *StringArray `db:"tags"`
Settings JSONBMap `db:"settings"`
}
item3 := optionType3{
Name: "Julia",
Tags: nil,
Settings: JSONBMap{"girl": true, "lang": true},
}
id, err = optionTypes.Insert(item3)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item3Chk optionType2
err = optionTypes.Find(db.Cond{"id": id}).One(&item3Chk)
assert.NoError(t, err)
}
type Settings struct {
Name string `json:"name"`
Num int64 `json:"num"`
}
func (s *Settings) Scan(src interface{}) error {
return DecodeJSONB(s, src)
}
func (s Settings) Value() (driver.Value, error) {
return EncodeJSONB(s)
}
func TestOptionTypeJsonbStruct(t *testing.T) {
sess := mustOpen()
defer sess.Close()
optionTypes := sess.Collection("option_types")
err := optionTypes.Truncate()
assert.NoError(t, err)
type OptionType struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings Settings `db:"settings"`
}
item1 := &OptionType{
Name: "Hi",
Tags: []string{"aah", "ok"},
Settings: Settings{Name: "a", Num: 123},
}
id, err := optionTypes.Insert(item1)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1Chk OptionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1Chk)
assert.NoError(t, err)
assert.Equal(t, 2, len(item1Chk.Tags))
assert.Equal(t, "aah", item1Chk.Tags[0])
assert.Equal(t, "a", item1Chk.Settings.Name)
assert.Equal(t, int64(123), item1Chk.Settings.Num)
}
func TestSchemaCollection(t *testing.T) {
sess := mustOpen()
defer sess.Close()
col := sess.Collection("test_schema.test")
_, err := col.Insert(map[string]int{"id": 9})
assert.Equal(t, nil, err)
var dump []map[string]int
err = col.Find().All(&dump)
assert.Nil(t, err)
assert.Equal(t, 1, len(dump))
assert.Equal(t, 9, dump[0]["id"])
}
func TestMaxOpenConns_Issue340(t *testing.T) {
sess := mustOpen()
defer sess.Close()
sess.SetMaxOpenConns(5)
var wg sync.WaitGroup
for i := 0; i < 30; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
_, err := sess.Exec(fmt.Sprintf(`SELECT pg_sleep(1.%d)`, i))
if err != nil {
t.Fatal(err)
}
}(i)
}
wg.Wait()
sess.SetMaxOpenConns(0)
}
func TestUUIDInsert_Issue370(t *testing.T) {
sess := mustOpen()
defer sess.Close()
{
type itemT struct {
ID *uuid.UUID `db:"id"`
Name string `db:"name"`
}
newUUID := uuid.NewV4()
item1 := itemT{
ID: &newUUID,
Name: "Jonny",
}
col := sess.Collection("issue_370")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
{
type itemT struct {
ID uuid.UUID `db:"id"`
Name string `db:"name"`
}
item1 := itemT{
ID: uuid.NewV4(),
Name: "Jonny",
}
col := sess.Collection("issue_370")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
{
type itemT struct {
ID Int64Array `db:"id"`
Name string `db:"name"`
}
item1 := itemT{
ID: Int64Array{1, 2, 3},
Name: "Vojtech",
}
col := sess.Collection("issue_370_2")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
}
func TestEscapeQuestionMark(t *testing.T) {
sess := mustOpen()
defer sess.Close()
var val bool
{
res, err := sess.QueryRow(`SELECT '{"mykey":["val1", "val2"]}'::jsonb->'mykey' ?? ?`, "val2")
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
{
res, err := sess.QueryRow(`SELECT ?::jsonb->'mykey' ?? ?`, `{"mykey":["val1", "val2"]}`, `val2`)
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
{
res, err := sess.QueryRow(`SELECT ?::jsonb->? ?? ?`, `{"mykey":["val1", "val2"]}`, `mykey`, `val2`)
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
}
func TestTextMode_Issue391(t *testing.T) {
sess := mustOpen()
defer sess.Close()
testPostgreSQLTypes(t, sess)
}
func TestBinaryMode_Issue391(t *testing.T) {
settingsWithBinaryMode := settings
settingsWithBinaryMode.Options["binary_parameters"] = "yes"
sess, err := Open(settingsWithBinaryMode)
if err != nil {
t.Fatal(err)
}
defer sess.Close()
testPostgreSQLTypes(t, sess)
}
func getStats(sess sqlbuilder.Database) (map[string]int, error) {
stats := make(map[string]int)
row := sess.Driver().(*sql.DB).QueryRow(`SELECT count(1) AS value FROM pg_prepared_statements`)
var value int
err := row.Scan(&value)
if err != nil {
return nil, err
}
stats["pg_prepared_statements_count"] = value
return stats, nil
}
func cleanUpCheck(sess sqlbuilder.Database) (err error) {
var stats map[string]int
stats, err = getStats(sess)
if err != nil {
return err
}
if activeStatements := sqladapter.NumActiveStatements(); activeStatements > 128 |
sess.ClearCache()
stats, err = getStats(sess)
if err != nil {
return err
}
if stats["pg_prepared_statements_count"] != 0 {
return fmt.Errorf(`Expecting "Prepared_stmt_count" to be 0, got %d`, stats["Prepared_stmt_count"])
}
return nil
}
| {
return fmt.Errorf("Expecting active statements to be at most 128, got %d", activeStatements)
} | conditional_block |
adapter_test.go | // Copyright (c) 2012-today The upper.io/db authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//go:generate bash -c "sed s/ADAPTER/postgresql/g ../internal/sqladapter/testing/adapter.go.tpl > generated_test.go"
package postgresql
import (
"database/sql"
"database/sql/driver"
"fmt"
"math/rand"
"os"
"sync"
"testing"
"github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
"upper.io/db.v3"
"upper.io/db.v3/internal/sqladapter"
"upper.io/db.v3/lib/sqlbuilder"
)
const (
testTimeZone = "Canada/Eastern"
)
var settings = ConnectionURL{
Database: os.Getenv("DB_NAME"),
User: os.Getenv("DB_USERNAME"),
Password: os.Getenv("DB_PASSWORD"),
Host: os.Getenv("DB_HOST") + ":" + os.Getenv("DB_PORT"),
Options: map[string]string{
"timezone": testTimeZone,
},
}
func tearUp() error {
sess := mustOpen()
defer sess.Close()
batch := []string{
`DROP TABLE IF EXISTS artist`,
`CREATE TABLE artist (
id serial primary key,
name varchar(60)
)`,
`DROP TABLE IF EXISTS publication`,
`CREATE TABLE publication (
id serial primary key,
title varchar(80),
author_id integer
)`,
`DROP TABLE IF EXISTS review`,
`CREATE TABLE review (
id serial primary key,
publication_id integer,
name varchar(80),
comments text,
created timestamp without time zone
)`,
`DROP TABLE IF EXISTS data_types`,
`CREATE TABLE data_types (
id serial primary key,
_uint integer,
_uint8 integer,
_uint16 integer,
_uint32 integer,
_uint64 integer,
_int integer,
_int8 integer,
_int16 integer,
_int32 integer,
_int64 integer,
_float32 numeric(10,6),
_float64 numeric(10,6),
_bool boolean,
_string text,
_date timestamp with time zone,
_nildate timestamp without time zone null,
_ptrdate timestamp without time zone,
_defaultdate timestamp without time zone DEFAULT now(),
_time bigint
)`,
`DROP TABLE IF EXISTS stats_test`,
`CREATE TABLE stats_test (
id serial primary key,
numeric integer,
value integer
)`,
`DROP TABLE IF EXISTS composite_keys`,
`CREATE TABLE composite_keys (
code varchar(255) default '',
user_id varchar(255) default '',
some_val varchar(255) default '',
primary key (code, user_id)
)`,
`DROP TABLE IF EXISTS option_types`,
`CREATE TABLE option_types (
id serial primary key,
name varchar(255) default '',
tags varchar(64)[],
settings jsonb
)`,
`DROP TABLE IF EXISTS test_schema.test`,
`DROP SCHEMA IF EXISTS test_schema`,
`CREATE SCHEMA test_schema`,
`CREATE TABLE test_schema.test (id integer)`,
`DROP TABLE IF EXISTS pg_types`,
`CREATE TABLE pg_types (
id serial primary key,
auto_integer_array integer[],
auto_string_array text[],
auto_integer_array_ptr integer[],
auto_string_array_ptr text[],
integer_array integer[],
string_value varchar(255),
auto_jsonb jsonb,
auto_jsonb_map jsonb,
auto_jsonb_array jsonb,
custom_jsonb jsonb,
auto_jsonb_ptr jsonb,
integer_valuer_value smallint[],
string_array text[],
field1 int,
field2 varchar(64),
field3 decimal
)`,
`DROP TABLE IF EXISTS issue_370`,
`CREATE TABLE issue_370 (
id UUID PRIMARY KEY,
name VARCHAR(25)
)`,
`DROP TABLE IF EXISTS issue_370_2`,
`CREATE TABLE issue_370_2 (
id INTEGER[3] PRIMARY KEY,
name VARCHAR(25)
)`,
}
for _, s := range batch {
driver := sess.Driver().(*sql.DB)
if _, err := driver.Exec(s); err != nil {
return err
}
}
return nil
}
type customJSONB struct {
N string `json:"name"`
V float64 `json:"value"`
}
func (c customJSONB) Value() (driver.Value, error) {
return EncodeJSONB(c)
}
func (c *customJSONB) Scan(src interface{}) error {
return DecodeJSONB(c, src)
}
var (
_ = driver.Valuer(&customJSONB{})
_ = sql.Scanner(&customJSONB{})
)
func testPostgreSQLTypes(t *testing.T, sess sqlbuilder.Database) {
type PGType struct {
ID int64 `db:"id,omitempty"`
IntegerArray Int64Array `db:"integer_array"`
StringValue *string `db:"string_value,omitempty"`
StringArray StringArray `db:"string_array"`
Field1 *int64 `db:"field1,omitempty"`
Field2 *string `db:"field2,omitempty"`
Field3 *float64 `db:"field3,omitempty"`
AutoIntegerArray Int64Array `db:"auto_integer_array"`
AutoStringArray StringArray `db:"auto_string_array"`
AutoJSONB JSONB `db:"auto_jsonb"`
AutoJSONBMap JSONBMap `db:"auto_jsonb_map"`
AutoJSONBArray JSONBArray `db:"auto_jsonb_array"`
CustomJSONB customJSONB `db:"custom_jsonb"`
AutoIntegerArrayPtr *Int64Array `db:"auto_integer_array_ptr,omitempty"`
AutoStringArrayPtr *StringArray `db:"auto_string_array_ptr,omitempty"`
AutoJSONBPtr *JSONB `db:"auto_jsonb_ptr,omitempty"`
}
field1 := int64(10)
field2 := string("ten")
field3 := float64(10.0)
testValue := "Hello world!"
origPgTypeTests := []PGType{
PGType{
Field1: &field1,
Field2: &field2,
Field3: &field3,
},
PGType{
IntegerArray: []int64{1, 2, 3, 4},
},
PGType{
AutoIntegerArray: Int64Array{1, 2, 3, 4},
AutoIntegerArrayPtr: nil,
},
PGType{
AutoJSONBMap: JSONBMap{
"Hello": "world",
"Roses": "red",
},
AutoJSONBArray: JSONBArray{float64(1), float64(2), float64(3), float64(4)},
},
PGType{
AutoIntegerArray: nil,
AutoIntegerArrayPtr: &Int64Array{4, 5, 6, 7},
},
PGType{
AutoJSONBMap: JSONBMap{},
AutoJSONBArray: JSONBArray{},
},
PGType{
AutoJSONBMap: JSONBMap(nil),
AutoJSONBArray: JSONBArray(nil),
},
PGType{
AutoStringArray: StringArray{"aaa", "bbb", "ccc"},
AutoStringArrayPtr: nil,
},
PGType{
AutoStringArray: nil,
AutoStringArrayPtr: &StringArray{"ddd", "eee", "ffff"},
},
PGType{
AutoJSONB: JSONB{map[string]interface{}{"hello": "world!"}},
AutoJSONBPtr: nil,
},
PGType{
AutoJSONB: JSONB{nil},
AutoJSONBPtr: &JSONB{[]interface{}{float64(9), float64(9), float64(9)}},
},
PGType{
IntegerArray: []int64{1, 2, 3, 4},
StringArray: []string{"a", "boo", "bar"},
},
PGType{
Field2: &field2,
Field3: &field3,
},
PGType{
IntegerArray: []int64{},
},
PGType{
StringArray: []string{},
},
PGType{
IntegerArray: []int64{},
StringArray: []string{},
},
PGType{},
PGType{
IntegerArray: []int64{1},
StringArray: []string{"a"},
},
PGType{
IntegerArray: []int64{0, 0, 0, 0},
StringValue: &testValue,
CustomJSONB: customJSONB{
N: "Hello",
},
StringArray: []string{"", "", "", ``, `""`},
},
PGType{
StringValue: &testValue,
},
PGType{
Field1: &field1,
CustomJSONB: customJSONB{
V: 4.4,
},
},
PGType{
StringArray: []string{"a", "boo", "bar"},
},
PGType{
StringArray: []string{"a", "boo", "bar", `""`},
CustomJSONB: customJSONB{},
},
PGType{
IntegerArray: []int64{0},
StringArray: []string{""},
},
PGType{
CustomJSONB: customJSONB{
N: "Peter",
V: 5.56,
},
},
}
for i := 0; i < 100; i++ {
pgTypeTests := make([]PGType, len(origPgTypeTests))
perm := rand.Perm(len(origPgTypeTests))
for i, v := range perm {
pgTypeTests[v] = origPgTypeTests[i]
}
for i := range pgTypeTests {
id, err := sess.Collection("pg_types").Insert(pgTypeTests[i])
assert.NoError(t, err)
var actual PGType
err = sess.Collection("pg_types").Find(id).One(&actual)
assert.NoError(t, err)
expected := pgTypeTests[i]
expected.ID = id.(int64)
assert.Equal(t, expected, actual)
}
for i := range pgTypeTests {
row, err := sess.InsertInto("pg_types").Values(pgTypeTests[i]).Returning("id").QueryRow()
assert.NoError(t, err)
var id int64
err = row.Scan(&id)
assert.NoError(t, err)
var actual PGType
err = sess.Collection("pg_types").Find(id).One(&actual)
assert.NoError(t, err)
expected := pgTypeTests[i]
expected.ID = id
assert.Equal(t, expected, actual)
var actual2 PGType
err = sess.SelectFrom("pg_types").Where("id = ?", id).One(&actual2)
assert.NoError(t, err)
assert.Equal(t, expected, actual2)
}
inserter := sess.InsertInto("pg_types")
for i := range pgTypeTests {
inserter = inserter.Values(pgTypeTests[i])
}
_, err := inserter.Exec()
assert.NoError(t, err)
err = sess.Collection("pg_types").Truncate()
assert.NoError(t, err)
batch := sess.InsertInto("pg_types").Batch(50)
go func() {
defer batch.Done()
for i := range pgTypeTests {
batch.Values(pgTypeTests[i])
}
}()
err = batch.Wait()
assert.NoError(t, err)
var values []PGType
err = sess.SelectFrom("pg_types").All(&values)
assert.NoError(t, err)
for i := range values {
expected := pgTypeTests[i]
expected.ID = values[i].ID
assert.Equal(t, expected, values[i])
}
}
}
func TestOptionTypes(t *testing.T) {
sess := mustOpen()
defer sess.Close()
optionTypes := sess.Collection("option_types")
err := optionTypes.Truncate()
assert.NoError(t, err)
// TODO: lets do some benchmarking on these auto-wrapped option types..
// TODO: add nullable jsonb field mapped to a []string
// A struct with wrapped option types defined in the struct tags
// for postgres string array and jsonb types
type optionType struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings JSONBMap `db:"settings"`
}
// Item 1
item1 := optionType{
Name: "Food",
Tags: []string{"toronto", "pizza"},
Settings: JSONBMap{"a": 1, "b": 2},
}
id, err := optionTypes.Insert(item1)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1Chk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1Chk)
assert.NoError(t, err)
assert.Equal(t, float64(1), item1Chk.Settings["a"])
assert.Equal(t, "toronto", item1Chk.Tags[0])
// Item 1 B
item1b := &optionType{
Name: "Golang",
Tags: []string{"love", "it"},
Settings: map[string]interface{}{"go": 1, "lang": 2},
}
id, err = optionTypes.Insert(item1b)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1bChk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1bChk)
assert.NoError(t, err)
assert.Equal(t, float64(1), item1bChk.Settings["go"])
assert.Equal(t, "love", item1bChk.Tags[0])
// Item 1 C
item1c := &optionType{
Name: "Sup", Tags: []string{}, Settings: map[string]interface{}{},
}
id, err = optionTypes.Insert(item1c)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1cChk optionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1cChk)
assert.NoError(t, err)
assert.Zero(t, len(item1cChk.Tags))
assert.Zero(t, len(item1cChk.Settings))
// An option type to pointer jsonb field
type optionType2 struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings *JSONBMap `db:"settings"`
}
item2 := optionType2{
Name: "JS", Tags: []string{"hi", "bye"}, Settings: nil,
}
id, err = optionTypes.Insert(item2)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item2Chk optionType2
res := optionTypes.Find(db.Cond{"id": id})
err = res.One(&item2Chk)
assert.NoError(t, err)
assert.Equal(t, id.(int64), item2Chk.ID)
assert.Equal(t, item2Chk.Name, item2.Name)
assert.Equal(t, item2Chk.Tags[0], item2.Tags[0])
assert.Equal(t, len(item2Chk.Tags), len(item2.Tags))
// Update the value
m := JSONBMap{}
m["lang"] = "javascript"
m["num"] = 31337
item2.Settings = &m
err = res.Update(item2)
assert.NoError(t, err)
err = res.One(&item2Chk)
assert.NoError(t, err)
assert.Equal(t, float64(31337), (*item2Chk.Settings)["num"].(float64))
assert.Equal(t, "javascript", (*item2Chk.Settings)["lang"])
// An option type to pointer string array field
type optionType3 struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags *StringArray `db:"tags"`
Settings JSONBMap `db:"settings"`
}
item3 := optionType3{
Name: "Julia",
Tags: nil,
Settings: JSONBMap{"girl": true, "lang": true},
}
id, err = optionTypes.Insert(item3)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item3Chk optionType2
err = optionTypes.Find(db.Cond{"id": id}).One(&item3Chk)
assert.NoError(t, err)
}
type Settings struct {
Name string `json:"name"`
Num int64 `json:"num"`
}
func (s *Settings) Scan(src interface{}) error {
return DecodeJSONB(s, src)
}
func (s Settings) Value() (driver.Value, error) {
return EncodeJSONB(s)
}
func TestOptionTypeJsonbStruct(t *testing.T) {
sess := mustOpen()
defer sess.Close()
optionTypes := sess.Collection("option_types")
err := optionTypes.Truncate()
assert.NoError(t, err)
type OptionType struct {
ID int64 `db:"id,omitempty"`
Name string `db:"name"`
Tags StringArray `db:"tags"`
Settings Settings `db:"settings"`
}
item1 := &OptionType{
Name: "Hi",
Tags: []string{"aah", "ok"},
Settings: Settings{Name: "a", Num: 123},
}
id, err := optionTypes.Insert(item1)
assert.NoError(t, err)
if pk, ok := id.(int64); !ok || pk == 0 {
t.Fatalf("Expecting an ID.")
}
var item1Chk OptionType
err = optionTypes.Find(db.Cond{"id": id}).One(&item1Chk)
assert.NoError(t, err)
assert.Equal(t, 2, len(item1Chk.Tags))
assert.Equal(t, "aah", item1Chk.Tags[0])
assert.Equal(t, "a", item1Chk.Settings.Name)
assert.Equal(t, int64(123), item1Chk.Settings.Num)
}
func TestSchemaCollection(t *testing.T) {
sess := mustOpen()
defer sess.Close()
col := sess.Collection("test_schema.test")
_, err := col.Insert(map[string]int{"id": 9})
assert.Equal(t, nil, err)
var dump []map[string]int
err = col.Find().All(&dump)
assert.Nil(t, err)
assert.Equal(t, 1, len(dump))
assert.Equal(t, 9, dump[0]["id"])
}
func TestMaxOpenConns_Issue340(t *testing.T) {
sess := mustOpen()
defer sess.Close()
sess.SetMaxOpenConns(5)
var wg sync.WaitGroup
for i := 0; i < 30; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
_, err := sess.Exec(fmt.Sprintf(`SELECT pg_sleep(1.%d)`, i))
if err != nil {
t.Fatal(err)
}
}(i)
}
wg.Wait()
sess.SetMaxOpenConns(0)
}
func TestUUIDInsert_Issue370(t *testing.T) {
sess := mustOpen()
defer sess.Close()
{
type itemT struct {
ID *uuid.UUID `db:"id"`
Name string `db:"name"`
}
newUUID := uuid.NewV4()
item1 := itemT{
ID: &newUUID,
Name: "Jonny",
}
col := sess.Collection("issue_370")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
{
type itemT struct {
ID uuid.UUID `db:"id"`
Name string `db:"name"`
}
item1 := itemT{
ID: uuid.NewV4(),
Name: "Jonny",
}
col := sess.Collection("issue_370")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
{
type itemT struct {
ID Int64Array `db:"id"`
Name string `db:"name"`
}
item1 := itemT{
ID: Int64Array{1, 2, 3},
Name: "Vojtech",
}
col := sess.Collection("issue_370_2")
err := col.Truncate()
assert.NoError(t, err)
err = col.InsertReturning(&item1)
assert.NoError(t, err)
var item2 itemT
err = col.Find(item1.ID).One(&item2)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item2.Name)
var item3 itemT
err = col.Find(db.Cond{"id": item1.ID}).One(&item3)
assert.NoError(t, err)
assert.Equal(t, item1.Name, item3.Name)
}
}
func TestEscapeQuestionMark(t *testing.T) {
sess := mustOpen()
defer sess.Close()
var val bool
{
res, err := sess.QueryRow(`SELECT '{"mykey":["val1", "val2"]}'::jsonb->'mykey' ?? ?`, "val2")
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
{
res, err := sess.QueryRow(`SELECT ?::jsonb->'mykey' ?? ?`, `{"mykey":["val1", "val2"]}`, `val2`)
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
{
res, err := sess.QueryRow(`SELECT ?::jsonb->? ?? ?`, `{"mykey":["val1", "val2"]}`, `mykey`, `val2`)
assert.NoError(t, err)
err = res.Scan(&val)
assert.NoError(t, err)
assert.Equal(t, true, val)
}
}
func TestTextMode_Issue391(t *testing.T) {
sess := mustOpen()
defer sess.Close()
testPostgreSQLTypes(t, sess)
}
func TestBinaryMode_Issue391(t *testing.T) {
settingsWithBinaryMode := settings
settingsWithBinaryMode.Options["binary_parameters"] = "yes"
sess, err := Open(settingsWithBinaryMode)
if err != nil {
t.Fatal(err)
}
defer sess.Close()
testPostgreSQLTypes(t, sess)
}
func getStats(sess sqlbuilder.Database) (map[string]int, error) {
stats := make(map[string]int)
row := sess.Driver().(*sql.DB).QueryRow(`SELECT count(1) AS value FROM pg_prepared_statements`)
var value int
err := row.Scan(&value)
if err != nil {
return nil, err
}
stats["pg_prepared_statements_count"] = value
return stats, nil
}
func cleanUpCheck(sess sqlbuilder.Database) (err error) | {
var stats map[string]int
stats, err = getStats(sess)
if err != nil {
return err
}
if activeStatements := sqladapter.NumActiveStatements(); activeStatements > 128 {
return fmt.Errorf("Expecting active statements to be at most 128, got %d", activeStatements)
}
sess.ClearCache()
stats, err = getStats(sess)
if err != nil {
return err
}
if stats["pg_prepared_statements_count"] != 0 {
return fmt.Errorf(`Expecting "Prepared_stmt_count" to be 0, got %d`, stats["Prepared_stmt_count"])
}
return nil
} | identifier_body | |
test_util.rs | use crate::runtime::Runtime;
use crate::{event::LogEvent, Event};
use futures::{compat::Stream01CompatExt, stream, SinkExt, Stream, StreamExt, TryStreamExt};
use futures01::{
future, stream as stream01, sync::mpsc, try_ready, Async, Future, Poll, Stream as Stream01,
};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use std::{
collections::HashMap,
convert::Infallible,
fs::File,
io::Read,
iter, mem,
net::{Shutdown, SocketAddr},
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering},
sync::Arc,
};
use tokio::{
io::{AsyncRead, AsyncWrite, Result as IoResult},
net::{TcpListener, TcpStream},
sync::oneshot,
task::JoinHandle,
};
use tokio01::util::FutureExt;
use tokio_util::codec::{Encoder, FramedRead, FramedWrite, LinesCodec};
#[macro_export]
macro_rules! assert_downcast_matches {
($e:expr, $t:ty, $v:pat) => {{
match $e.downcast_ref::<$t>() {
Some($v) => (),
got => panic!("assertion failed: got wrong error variant {:?}", got),
}
}};
}
static NEXT_PORT: AtomicUsize = AtomicUsize::new(1234);
pub fn next_addr() -> SocketAddr {
use std::net::{IpAddr, Ipv4Addr};
let port = NEXT_PORT.fetch_add(1, Ordering::AcqRel) as u16;
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port)
}
pub fn trace_init() {
let env = std::env::var("TEST_LOG").unwrap_or_else(|_| "off".to_string());
let subscriber = tracing_subscriber::FmtSubscriber::builder()
.with_env_filter(env)
.finish();
let _ = tracing_log::LogTracer::init();
let _ = tracing::dispatcher::set_global_default(tracing::Dispatch::new(subscriber));
}
pub async fn send_lines(
addr: SocketAddr,
lines: impl IntoIterator<Item = String>,
) -> Result<(), Infallible> {
send_encodable(addr, LinesCodec::new(), lines).await
}
pub async fn send_encodable<I, E: From<std::io::Error> + std::fmt::Debug>(
addr: SocketAddr,
encoder: impl Encoder<I, Error = E>,
lines: impl IntoIterator<Item = I>,
) -> Result<(), Infallible> {
let stream = TcpStream::connect(&addr).await.unwrap();
let mut sink = FramedWrite::new(stream, encoder);
let mut lines = stream::iter(lines.into_iter()).map(Ok);
sink.send_all(&mut lines).await.unwrap();
let stream = sink.get_mut();
stream.shutdown(Shutdown::Both).unwrap();
Ok(())
}
pub async fn send_lines_tls(
addr: SocketAddr,
host: String,
lines: impl Iterator<Item = String>,
) -> Result<(), Infallible> {
let stream = TcpStream::connect(&addr).await.unwrap();
let mut connector = SslConnector::builder(SslMethod::tls()).unwrap();
connector.set_verify(SslVerifyMode::NONE);
let config = connector.build().configure().unwrap();
let stream = tokio_openssl::connect(config, &host, stream).await.unwrap();
let mut sink = FramedWrite::new(stream, LinesCodec::new());
let mut lines = stream::iter(lines).map(Ok);
sink.send_all(&mut lines).await.unwrap();
let stream = sink.get_mut().get_mut();
stream.shutdown(Shutdown::Both).unwrap();
Ok(())
}
pub fn temp_file() -> PathBuf {
let path = std::env::temp_dir();
let file_name = random_string(16);
path.join(file_name + ".log")
}
pub fn temp_dir() -> PathBuf {
let path = std::env::temp_dir();
let dir_name = random_string(16);
path.join(dir_name)
}
pub fn random_lines_with_stream(
len: usize,
count: usize,
) -> (Vec<String>, impl Stream01<Item = Event, Error = ()>) {
let lines = (0..count).map(|_| random_string(len)).collect::<Vec<_>>();
let stream = stream01::iter_ok(lines.clone().into_iter().map(Event::from));
(lines, stream)
}
pub fn random_events_with_stream(
len: usize,
count: usize,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>) {
random_events_with_stream_generic(count, move || Event::from(random_string(len)))
}
pub fn random_nested_events_with_stream(
len: usize,
breadth: usize,
depth: usize,
count: usize,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>) {
random_events_with_stream_generic(count, move || {
let mut log = LogEvent::default();
let tree = random_pseudonested_map(len, breadth, depth);
for (k, v) in tree.into_iter() {
log.insert(k, v);
}
Event::Log(log)
})
}
pub fn random_string(len: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(len)
.collect::<String>()
}
pub fn random_lines(len: usize) -> impl Iterator<Item = String> {
std::iter::repeat(()).map(move |_| random_string(len))
}
pub fn random_map(max_size: usize, field_len: usize) -> HashMap<String, String> {
let size = thread_rng().gen_range(0, max_size);
(0..size)
.map(move |_| (random_string(field_len), random_string(field_len)))
.collect()
}
pub fn random_maps(
max_size: usize,
field_len: usize,
) -> impl Iterator<Item = HashMap<String, String>> {
iter::repeat(()).map(move |_| random_map(max_size, field_len))
}
pub fn collect_n<T>(mut rx: mpsc::Receiver<T>, n: usize) -> impl Future<Item = Vec<T>, Error = ()> {
let mut events = Vec::new();
future::poll_fn(move || {
while events.len() < n {
let e = try_ready!(rx.poll()).unwrap();
events.push(e);
}
Ok(Async::Ready(mem::replace(&mut events, Vec::new())))
})
}
pub fn lines_from_file<P: AsRef<Path>>(path: P) -> Vec<String> {
trace!(message = "Reading file.", path = %path.as_ref().display());
let mut file = File::open(path).unwrap();
let mut output = String::new();
file.read_to_string(&mut output).unwrap();
output.lines().map(|s| s.to_owned()).collect()
}
pub fn wait_for(mut f: impl FnMut() -> bool) {
let wait = std::time::Duration::from_millis(5);
let limit = std::time::Duration::from_secs(5);
let mut attempts = 0;
while !f() {
std::thread::sleep(wait);
attempts += 1;
if attempts * wait > limit {
panic!("timed out while waiting");
}
}
}
pub fn block_on<F, R, E>(future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let mut rt = runtime();
rt.block_on(future)
}
pub fn block_on_std<F>(future: F) -> F::Output
where
F: std::future::Future + Send + 'static,
F::Output: Send + 'static,
{
let mut rt = runtime();
rt.block_on_std(future)
}
pub fn runtime() -> Runtime {
Runtime::single_threaded().unwrap()
}
pub fn wait_for_tcp(addr: SocketAddr) {
wait_for(|| std::net::TcpStream::connect(addr).is_ok())
}
pub fn wait_for_atomic_usize<T, F>(val: T, unblock: F)
where
T: AsRef<AtomicUsize>,
F: Fn(usize) -> bool,
{
let val = val.as_ref();
wait_for(|| unblock(val.load(Ordering::SeqCst)))
}
pub fn shutdown_on_idle(runtime: Runtime) {
block_on(
runtime
.shutdown_on_idle()
.timeout(std::time::Duration::from_secs(10)),
)
.unwrap()
}
#[derive(Debug)]
pub struct CollectN<S>
where
S: Stream01,
{
stream: Option<S>,
remaining: usize,
items: Option<Vec<S::Item>>,
}
impl<S: Stream01> CollectN<S> {
pub fn new(s: S, n: usize) -> Self {
Self {
stream: Some(s),
remaining: n,
items: Some(Vec::new()),
}
}
}
impl<S> Future for CollectN<S>
where
S: Stream01,
{
type Item = (S, Vec<S::Item>);
type Error = S::Error;
fn poll(&mut self) -> Poll<(S, Vec<S::Item>), S::Error> {
let stream = self.stream.take();
if stream.is_none() {
panic!("Stream is missing");
}
let mut stream = stream.unwrap();
loop {
if self.remaining == 0 {
return Ok(Async::Ready((stream, self.items.take().unwrap())));
}
match stream.poll() {
Ok(Async::Ready(Some(e))) => {
self.items.as_mut().unwrap().push(e);
self.remaining -= 1;
}
Ok(Async::Ready(None)) => {
return Ok(Async::Ready((stream, self.items.take().unwrap())));
}
Ok(Async::NotReady) => {
self.stream.replace(stream);
return Ok(Async::NotReady);
}
Err(e) => {
return Err(e);
}
}
}
}
}
#[derive(Debug)]
pub struct CollectCurrent<S>
where
S: Stream01,
{
stream: Option<S>,
}
impl<S: Stream01> CollectCurrent<S> {
pub fn new(s: S) -> Self {
Self { stream: Some(s) }
}
}
impl<S> Future for CollectCurrent<S>
where
S: Stream01,
{
type Item = (S, Vec<S::Item>);
type Error = S::Error;
fn poll(&mut self) -> Poll<(S, Vec<S::Item>), S::Error> {
if let Some(mut stream) = self.stream.take() {
let mut items = vec![];
loop {
match stream.poll() {
Ok(Async::Ready(Some(e))) => items.push(e),
Ok(Async::Ready(None)) | Ok(Async::NotReady) => {
return Ok(Async::Ready((stream, items)));
}
Err(e) => {
return Err(e);
}
}
}
} else {
panic!("Future already completed");
}
}
}
pub struct CountReceiver<T> {
count: Arc<AtomicUsize>,
trigger: oneshot::Sender<()>,
handle: JoinHandle<Vec<T>>,
}
impl<T: Send + 'static> CountReceiver<T> {
pub fn count(&self) -> usize {
self.count.load(Ordering::Relaxed)
}
pub async fn wait(self) -> Vec<T> {
let _ = self.trigger.send(());
self.handle.await.unwrap()
}
fn new<F, Fut>(make_fut: F) -> CountReceiver<T>
where
F: FnOnce(Arc<AtomicUsize>, oneshot::Receiver<()>) -> Fut,
Fut: std::future::Future<Output = Vec<T>> + Send + 'static,
{
let count = Arc::new(AtomicUsize::new(0));
let (trigger, tripwire) = oneshot::channel();
CountReceiver {
count: Arc::clone(&count),
trigger,
handle: tokio::spawn(make_fut(count, tripwire)),
}
}
}
impl CountReceiver<String> {
pub fn receive_lines(addr: SocketAddr) -> CountReceiver<String> {
CountReceiver::new(|count, tripwire| async move {
let mut listener = TcpListener::bind(addr).await.unwrap();
CountReceiver::receive_lines_stream(listener.incoming(), count, tripwire).await
})
}
#[cfg(all(feature = "tokio/uds", unix))]
pub fn receive_lines_unix<P>(path: P) -> CountReceiver<String>
where
P: AsRef<Path> + Send + 'static,
|
async fn receive_lines_stream<S, T>(
stream: S,
count: Arc<AtomicUsize>,
tripwire: oneshot::Receiver<()>,
) -> Vec<String>
where
S: Stream<Item = IoResult<T>>,
T: AsyncWrite + AsyncRead,
{
stream
.take_until(tripwire)
.map_ok(|socket| FramedRead::new(socket, LinesCodec::new()))
.map(|x| x.unwrap())
.flatten()
.map(|x| x.unwrap())
.inspect(move |_| {
count.fetch_add(1, Ordering::Relaxed);
})
.collect::<Vec<String>>()
.await
}
}
impl CountReceiver<Event> {
pub fn receive_events<S>(stream: S) -> CountReceiver<Event>
where
S: Stream01<Item = Event> + Send + 'static,
<S as Stream01>::Error: std::fmt::Debug,
{
CountReceiver::new(|count, tripwire| async move {
stream
.compat()
.take_until(tripwire)
.map(|x| x.unwrap())
.inspect(move |_| {
count.fetch_add(1, Ordering::Relaxed);
})
.collect::<Vec<Event>>()
.await
})
}
}
fn random_events_with_stream_generic<F>(
count: usize,
generator: F,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>)
where
F: Fn() -> Event,
{
let events = (0..count).map(|_| generator()).collect::<Vec<_>>();
let stream = stream01::iter_ok(events.clone().into_iter());
(events, stream)
}
fn random_pseudonested_map(len: usize, breadth: usize, depth: usize) -> HashMap<String, String> {
if breadth == 0 || depth == 0 {
return HashMap::new();
}
if depth == 1 {
let mut leaf = HashMap::new();
leaf.insert(random_string(len), random_string(len));
return leaf;
}
let mut tree = HashMap::new();
for _ in 0..breadth {
let prefix = random_string(len);
let subtree = random_pseudonested_map(len, breadth, depth - 1);
let subtree: HashMap<String, String> = subtree
.into_iter()
.map(|(mut key, value)| {
key.insert(0, '.');
key.insert_str(0, &prefix[..]);
(key, value)
})
.collect();
for (key, value) in subtree.into_iter() {
tree.insert(key, value);
}
}
tree
}
| {
CountReceiver::new(|count, tripwire| async move {
let mut listener = tokio::net::UnixListener::bind(path).unwrap();
CountReceiver::receive_lines_stream(listener.incoming(), count, tripwire).await
})
} | identifier_body |
test_util.rs | use crate::runtime::Runtime;
use crate::{event::LogEvent, Event};
use futures::{compat::Stream01CompatExt, stream, SinkExt, Stream, StreamExt, TryStreamExt};
use futures01::{
future, stream as stream01, sync::mpsc, try_ready, Async, Future, Poll, Stream as Stream01,
};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use std::{
collections::HashMap,
convert::Infallible,
fs::File,
io::Read,
iter, mem,
net::{Shutdown, SocketAddr},
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering},
sync::Arc,
};
use tokio::{
io::{AsyncRead, AsyncWrite, Result as IoResult},
net::{TcpListener, TcpStream},
sync::oneshot,
task::JoinHandle,
};
use tokio01::util::FutureExt;
use tokio_util::codec::{Encoder, FramedRead, FramedWrite, LinesCodec};
#[macro_export]
macro_rules! assert_downcast_matches {
($e:expr, $t:ty, $v:pat) => {{
match $e.downcast_ref::<$t>() {
Some($v) => (),
got => panic!("assertion failed: got wrong error variant {:?}", got),
}
}};
}
static NEXT_PORT: AtomicUsize = AtomicUsize::new(1234);
pub fn next_addr() -> SocketAddr {
use std::net::{IpAddr, Ipv4Addr};
let port = NEXT_PORT.fetch_add(1, Ordering::AcqRel) as u16;
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port)
}
pub fn trace_init() {
let env = std::env::var("TEST_LOG").unwrap_or_else(|_| "off".to_string());
let subscriber = tracing_subscriber::FmtSubscriber::builder()
.with_env_filter(env)
.finish();
let _ = tracing_log::LogTracer::init();
let _ = tracing::dispatcher::set_global_default(tracing::Dispatch::new(subscriber));
}
pub async fn send_lines(
addr: SocketAddr,
lines: impl IntoIterator<Item = String>,
) -> Result<(), Infallible> {
send_encodable(addr, LinesCodec::new(), lines).await
}
pub async fn send_encodable<I, E: From<std::io::Error> + std::fmt::Debug>(
addr: SocketAddr,
encoder: impl Encoder<I, Error = E>,
lines: impl IntoIterator<Item = I>,
) -> Result<(), Infallible> {
let stream = TcpStream::connect(&addr).await.unwrap();
let mut sink = FramedWrite::new(stream, encoder);
let mut lines = stream::iter(lines.into_iter()).map(Ok);
sink.send_all(&mut lines).await.unwrap();
let stream = sink.get_mut();
stream.shutdown(Shutdown::Both).unwrap();
Ok(())
}
pub async fn send_lines_tls(
addr: SocketAddr,
host: String,
lines: impl Iterator<Item = String>,
) -> Result<(), Infallible> {
let stream = TcpStream::connect(&addr).await.unwrap();
let mut connector = SslConnector::builder(SslMethod::tls()).unwrap();
connector.set_verify(SslVerifyMode::NONE);
let config = connector.build().configure().unwrap();
let stream = tokio_openssl::connect(config, &host, stream).await.unwrap();
let mut sink = FramedWrite::new(stream, LinesCodec::new());
let mut lines = stream::iter(lines).map(Ok);
sink.send_all(&mut lines).await.unwrap();
let stream = sink.get_mut().get_mut();
stream.shutdown(Shutdown::Both).unwrap();
Ok(())
}
pub fn temp_file() -> PathBuf {
let path = std::env::temp_dir();
let file_name = random_string(16);
path.join(file_name + ".log")
}
pub fn temp_dir() -> PathBuf {
let path = std::env::temp_dir();
let dir_name = random_string(16);
path.join(dir_name)
}
pub fn random_lines_with_stream(
len: usize,
count: usize,
) -> (Vec<String>, impl Stream01<Item = Event, Error = ()>) {
let lines = (0..count).map(|_| random_string(len)).collect::<Vec<_>>();
let stream = stream01::iter_ok(lines.clone().into_iter().map(Event::from));
(lines, stream)
}
pub fn random_events_with_stream(
len: usize,
count: usize,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>) {
random_events_with_stream_generic(count, move || Event::from(random_string(len)))
}
pub fn random_nested_events_with_stream(
len: usize,
breadth: usize,
depth: usize,
count: usize,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>) {
random_events_with_stream_generic(count, move || {
let mut log = LogEvent::default();
let tree = random_pseudonested_map(len, breadth, depth);
for (k, v) in tree.into_iter() {
log.insert(k, v);
}
Event::Log(log)
})
}
pub fn random_string(len: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(len)
.collect::<String>()
}
pub fn random_lines(len: usize) -> impl Iterator<Item = String> {
std::iter::repeat(()).map(move |_| random_string(len))
}
pub fn random_map(max_size: usize, field_len: usize) -> HashMap<String, String> {
let size = thread_rng().gen_range(0, max_size);
(0..size)
.map(move |_| (random_string(field_len), random_string(field_len)))
.collect()
}
pub fn random_maps(
max_size: usize,
field_len: usize,
) -> impl Iterator<Item = HashMap<String, String>> {
iter::repeat(()).map(move |_| random_map(max_size, field_len))
}
pub fn collect_n<T>(mut rx: mpsc::Receiver<T>, n: usize) -> impl Future<Item = Vec<T>, Error = ()> {
let mut events = Vec::new();
future::poll_fn(move || {
while events.len() < n {
let e = try_ready!(rx.poll()).unwrap();
events.push(e);
}
Ok(Async::Ready(mem::replace(&mut events, Vec::new())))
})
}
pub fn lines_from_file<P: AsRef<Path>>(path: P) -> Vec<String> {
trace!(message = "Reading file.", path = %path.as_ref().display());
let mut file = File::open(path).unwrap();
let mut output = String::new();
file.read_to_string(&mut output).unwrap();
output.lines().map(|s| s.to_owned()).collect()
}
pub fn wait_for(mut f: impl FnMut() -> bool) {
let wait = std::time::Duration::from_millis(5);
let limit = std::time::Duration::from_secs(5);
let mut attempts = 0;
while !f() {
std::thread::sleep(wait);
attempts += 1;
if attempts * wait > limit {
panic!("timed out while waiting");
}
}
}
pub fn block_on<F, R, E>(future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let mut rt = runtime();
rt.block_on(future)
}
pub fn block_on_std<F>(future: F) -> F::Output
where
F: std::future::Future + Send + 'static,
F::Output: Send + 'static,
{
let mut rt = runtime();
rt.block_on_std(future)
} | pub fn wait_for_tcp(addr: SocketAddr) {
wait_for(|| std::net::TcpStream::connect(addr).is_ok())
}
pub fn wait_for_atomic_usize<T, F>(val: T, unblock: F)
where
T: AsRef<AtomicUsize>,
F: Fn(usize) -> bool,
{
let val = val.as_ref();
wait_for(|| unblock(val.load(Ordering::SeqCst)))
}
pub fn shutdown_on_idle(runtime: Runtime) {
block_on(
runtime
.shutdown_on_idle()
.timeout(std::time::Duration::from_secs(10)),
)
.unwrap()
}
#[derive(Debug)]
pub struct CollectN<S>
where
S: Stream01,
{
stream: Option<S>,
remaining: usize,
items: Option<Vec<S::Item>>,
}
impl<S: Stream01> CollectN<S> {
pub fn new(s: S, n: usize) -> Self {
Self {
stream: Some(s),
remaining: n,
items: Some(Vec::new()),
}
}
}
impl<S> Future for CollectN<S>
where
S: Stream01,
{
type Item = (S, Vec<S::Item>);
type Error = S::Error;
fn poll(&mut self) -> Poll<(S, Vec<S::Item>), S::Error> {
let stream = self.stream.take();
if stream.is_none() {
panic!("Stream is missing");
}
let mut stream = stream.unwrap();
loop {
if self.remaining == 0 {
return Ok(Async::Ready((stream, self.items.take().unwrap())));
}
match stream.poll() {
Ok(Async::Ready(Some(e))) => {
self.items.as_mut().unwrap().push(e);
self.remaining -= 1;
}
Ok(Async::Ready(None)) => {
return Ok(Async::Ready((stream, self.items.take().unwrap())));
}
Ok(Async::NotReady) => {
self.stream.replace(stream);
return Ok(Async::NotReady);
}
Err(e) => {
return Err(e);
}
}
}
}
}
#[derive(Debug)]
pub struct CollectCurrent<S>
where
S: Stream01,
{
stream: Option<S>,
}
impl<S: Stream01> CollectCurrent<S> {
pub fn new(s: S) -> Self {
Self { stream: Some(s) }
}
}
impl<S> Future for CollectCurrent<S>
where
S: Stream01,
{
type Item = (S, Vec<S::Item>);
type Error = S::Error;
fn poll(&mut self) -> Poll<(S, Vec<S::Item>), S::Error> {
if let Some(mut stream) = self.stream.take() {
let mut items = vec![];
loop {
match stream.poll() {
Ok(Async::Ready(Some(e))) => items.push(e),
Ok(Async::Ready(None)) | Ok(Async::NotReady) => {
return Ok(Async::Ready((stream, items)));
}
Err(e) => {
return Err(e);
}
}
}
} else {
panic!("Future already completed");
}
}
}
pub struct CountReceiver<T> {
count: Arc<AtomicUsize>,
trigger: oneshot::Sender<()>,
handle: JoinHandle<Vec<T>>,
}
impl<T: Send + 'static> CountReceiver<T> {
pub fn count(&self) -> usize {
self.count.load(Ordering::Relaxed)
}
pub async fn wait(self) -> Vec<T> {
let _ = self.trigger.send(());
self.handle.await.unwrap()
}
fn new<F, Fut>(make_fut: F) -> CountReceiver<T>
where
F: FnOnce(Arc<AtomicUsize>, oneshot::Receiver<()>) -> Fut,
Fut: std::future::Future<Output = Vec<T>> + Send + 'static,
{
let count = Arc::new(AtomicUsize::new(0));
let (trigger, tripwire) = oneshot::channel();
CountReceiver {
count: Arc::clone(&count),
trigger,
handle: tokio::spawn(make_fut(count, tripwire)),
}
}
}
impl CountReceiver<String> {
pub fn receive_lines(addr: SocketAddr) -> CountReceiver<String> {
CountReceiver::new(|count, tripwire| async move {
let mut listener = TcpListener::bind(addr).await.unwrap();
CountReceiver::receive_lines_stream(listener.incoming(), count, tripwire).await
})
}
#[cfg(all(feature = "tokio/uds", unix))]
pub fn receive_lines_unix<P>(path: P) -> CountReceiver<String>
where
P: AsRef<Path> + Send + 'static,
{
CountReceiver::new(|count, tripwire| async move {
let mut listener = tokio::net::UnixListener::bind(path).unwrap();
CountReceiver::receive_lines_stream(listener.incoming(), count, tripwire).await
})
}
async fn receive_lines_stream<S, T>(
stream: S,
count: Arc<AtomicUsize>,
tripwire: oneshot::Receiver<()>,
) -> Vec<String>
where
S: Stream<Item = IoResult<T>>,
T: AsyncWrite + AsyncRead,
{
stream
.take_until(tripwire)
.map_ok(|socket| FramedRead::new(socket, LinesCodec::new()))
.map(|x| x.unwrap())
.flatten()
.map(|x| x.unwrap())
.inspect(move |_| {
count.fetch_add(1, Ordering::Relaxed);
})
.collect::<Vec<String>>()
.await
}
}
impl CountReceiver<Event> {
pub fn receive_events<S>(stream: S) -> CountReceiver<Event>
where
S: Stream01<Item = Event> + Send + 'static,
<S as Stream01>::Error: std::fmt::Debug,
{
CountReceiver::new(|count, tripwire| async move {
stream
.compat()
.take_until(tripwire)
.map(|x| x.unwrap())
.inspect(move |_| {
count.fetch_add(1, Ordering::Relaxed);
})
.collect::<Vec<Event>>()
.await
})
}
}
fn random_events_with_stream_generic<F>(
count: usize,
generator: F,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>)
where
F: Fn() -> Event,
{
let events = (0..count).map(|_| generator()).collect::<Vec<_>>();
let stream = stream01::iter_ok(events.clone().into_iter());
(events, stream)
}
fn random_pseudonested_map(len: usize, breadth: usize, depth: usize) -> HashMap<String, String> {
if breadth == 0 || depth == 0 {
return HashMap::new();
}
if depth == 1 {
let mut leaf = HashMap::new();
leaf.insert(random_string(len), random_string(len));
return leaf;
}
let mut tree = HashMap::new();
for _ in 0..breadth {
let prefix = random_string(len);
let subtree = random_pseudonested_map(len, breadth, depth - 1);
let subtree: HashMap<String, String> = subtree
.into_iter()
.map(|(mut key, value)| {
key.insert(0, '.');
key.insert_str(0, &prefix[..]);
(key, value)
})
.collect();
for (key, value) in subtree.into_iter() {
tree.insert(key, value);
}
}
tree
} |
pub fn runtime() -> Runtime {
Runtime::single_threaded().unwrap()
}
| random_line_split |
test_util.rs | use crate::runtime::Runtime;
use crate::{event::LogEvent, Event};
use futures::{compat::Stream01CompatExt, stream, SinkExt, Stream, StreamExt, TryStreamExt};
use futures01::{
future, stream as stream01, sync::mpsc, try_ready, Async, Future, Poll, Stream as Stream01,
};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use std::{
collections::HashMap,
convert::Infallible,
fs::File,
io::Read,
iter, mem,
net::{Shutdown, SocketAddr},
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering},
sync::Arc,
};
use tokio::{
io::{AsyncRead, AsyncWrite, Result as IoResult},
net::{TcpListener, TcpStream},
sync::oneshot,
task::JoinHandle,
};
use tokio01::util::FutureExt;
use tokio_util::codec::{Encoder, FramedRead, FramedWrite, LinesCodec};
#[macro_export]
macro_rules! assert_downcast_matches {
($e:expr, $t:ty, $v:pat) => {{
match $e.downcast_ref::<$t>() {
Some($v) => (),
got => panic!("assertion failed: got wrong error variant {:?}", got),
}
}};
}
static NEXT_PORT: AtomicUsize = AtomicUsize::new(1234);
pub fn next_addr() -> SocketAddr {
use std::net::{IpAddr, Ipv4Addr};
let port = NEXT_PORT.fetch_add(1, Ordering::AcqRel) as u16;
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port)
}
pub fn trace_init() {
let env = std::env::var("TEST_LOG").unwrap_or_else(|_| "off".to_string());
let subscriber = tracing_subscriber::FmtSubscriber::builder()
.with_env_filter(env)
.finish();
let _ = tracing_log::LogTracer::init();
let _ = tracing::dispatcher::set_global_default(tracing::Dispatch::new(subscriber));
}
pub async fn send_lines(
addr: SocketAddr,
lines: impl IntoIterator<Item = String>,
) -> Result<(), Infallible> {
send_encodable(addr, LinesCodec::new(), lines).await
}
pub async fn send_encodable<I, E: From<std::io::Error> + std::fmt::Debug>(
addr: SocketAddr,
encoder: impl Encoder<I, Error = E>,
lines: impl IntoIterator<Item = I>,
) -> Result<(), Infallible> {
let stream = TcpStream::connect(&addr).await.unwrap();
let mut sink = FramedWrite::new(stream, encoder);
let mut lines = stream::iter(lines.into_iter()).map(Ok);
sink.send_all(&mut lines).await.unwrap();
let stream = sink.get_mut();
stream.shutdown(Shutdown::Both).unwrap();
Ok(())
}
pub async fn send_lines_tls(
addr: SocketAddr,
host: String,
lines: impl Iterator<Item = String>,
) -> Result<(), Infallible> {
let stream = TcpStream::connect(&addr).await.unwrap();
let mut connector = SslConnector::builder(SslMethod::tls()).unwrap();
connector.set_verify(SslVerifyMode::NONE);
let config = connector.build().configure().unwrap();
let stream = tokio_openssl::connect(config, &host, stream).await.unwrap();
let mut sink = FramedWrite::new(stream, LinesCodec::new());
let mut lines = stream::iter(lines).map(Ok);
sink.send_all(&mut lines).await.unwrap();
let stream = sink.get_mut().get_mut();
stream.shutdown(Shutdown::Both).unwrap();
Ok(())
}
pub fn temp_file() -> PathBuf {
let path = std::env::temp_dir();
let file_name = random_string(16);
path.join(file_name + ".log")
}
pub fn temp_dir() -> PathBuf {
let path = std::env::temp_dir();
let dir_name = random_string(16);
path.join(dir_name)
}
pub fn random_lines_with_stream(
len: usize,
count: usize,
) -> (Vec<String>, impl Stream01<Item = Event, Error = ()>) {
let lines = (0..count).map(|_| random_string(len)).collect::<Vec<_>>();
let stream = stream01::iter_ok(lines.clone().into_iter().map(Event::from));
(lines, stream)
}
pub fn random_events_with_stream(
len: usize,
count: usize,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>) {
random_events_with_stream_generic(count, move || Event::from(random_string(len)))
}
pub fn | (
len: usize,
breadth: usize,
depth: usize,
count: usize,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>) {
random_events_with_stream_generic(count, move || {
let mut log = LogEvent::default();
let tree = random_pseudonested_map(len, breadth, depth);
for (k, v) in tree.into_iter() {
log.insert(k, v);
}
Event::Log(log)
})
}
pub fn random_string(len: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(len)
.collect::<String>()
}
pub fn random_lines(len: usize) -> impl Iterator<Item = String> {
std::iter::repeat(()).map(move |_| random_string(len))
}
pub fn random_map(max_size: usize, field_len: usize) -> HashMap<String, String> {
let size = thread_rng().gen_range(0, max_size);
(0..size)
.map(move |_| (random_string(field_len), random_string(field_len)))
.collect()
}
pub fn random_maps(
max_size: usize,
field_len: usize,
) -> impl Iterator<Item = HashMap<String, String>> {
iter::repeat(()).map(move |_| random_map(max_size, field_len))
}
pub fn collect_n<T>(mut rx: mpsc::Receiver<T>, n: usize) -> impl Future<Item = Vec<T>, Error = ()> {
let mut events = Vec::new();
future::poll_fn(move || {
while events.len() < n {
let e = try_ready!(rx.poll()).unwrap();
events.push(e);
}
Ok(Async::Ready(mem::replace(&mut events, Vec::new())))
})
}
pub fn lines_from_file<P: AsRef<Path>>(path: P) -> Vec<String> {
trace!(message = "Reading file.", path = %path.as_ref().display());
let mut file = File::open(path).unwrap();
let mut output = String::new();
file.read_to_string(&mut output).unwrap();
output.lines().map(|s| s.to_owned()).collect()
}
pub fn wait_for(mut f: impl FnMut() -> bool) {
let wait = std::time::Duration::from_millis(5);
let limit = std::time::Duration::from_secs(5);
let mut attempts = 0;
while !f() {
std::thread::sleep(wait);
attempts += 1;
if attempts * wait > limit {
panic!("timed out while waiting");
}
}
}
pub fn block_on<F, R, E>(future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let mut rt = runtime();
rt.block_on(future)
}
pub fn block_on_std<F>(future: F) -> F::Output
where
F: std::future::Future + Send + 'static,
F::Output: Send + 'static,
{
let mut rt = runtime();
rt.block_on_std(future)
}
pub fn runtime() -> Runtime {
Runtime::single_threaded().unwrap()
}
pub fn wait_for_tcp(addr: SocketAddr) {
wait_for(|| std::net::TcpStream::connect(addr).is_ok())
}
pub fn wait_for_atomic_usize<T, F>(val: T, unblock: F)
where
T: AsRef<AtomicUsize>,
F: Fn(usize) -> bool,
{
let val = val.as_ref();
wait_for(|| unblock(val.load(Ordering::SeqCst)))
}
pub fn shutdown_on_idle(runtime: Runtime) {
block_on(
runtime
.shutdown_on_idle()
.timeout(std::time::Duration::from_secs(10)),
)
.unwrap()
}
#[derive(Debug)]
pub struct CollectN<S>
where
S: Stream01,
{
stream: Option<S>,
remaining: usize,
items: Option<Vec<S::Item>>,
}
impl<S: Stream01> CollectN<S> {
pub fn new(s: S, n: usize) -> Self {
Self {
stream: Some(s),
remaining: n,
items: Some(Vec::new()),
}
}
}
impl<S> Future for CollectN<S>
where
S: Stream01,
{
type Item = (S, Vec<S::Item>);
type Error = S::Error;
fn poll(&mut self) -> Poll<(S, Vec<S::Item>), S::Error> {
let stream = self.stream.take();
if stream.is_none() {
panic!("Stream is missing");
}
let mut stream = stream.unwrap();
loop {
if self.remaining == 0 {
return Ok(Async::Ready((stream, self.items.take().unwrap())));
}
match stream.poll() {
Ok(Async::Ready(Some(e))) => {
self.items.as_mut().unwrap().push(e);
self.remaining -= 1;
}
Ok(Async::Ready(None)) => {
return Ok(Async::Ready((stream, self.items.take().unwrap())));
}
Ok(Async::NotReady) => {
self.stream.replace(stream);
return Ok(Async::NotReady);
}
Err(e) => {
return Err(e);
}
}
}
}
}
#[derive(Debug)]
pub struct CollectCurrent<S>
where
S: Stream01,
{
stream: Option<S>,
}
impl<S: Stream01> CollectCurrent<S> {
pub fn new(s: S) -> Self {
Self { stream: Some(s) }
}
}
impl<S> Future for CollectCurrent<S>
where
S: Stream01,
{
type Item = (S, Vec<S::Item>);
type Error = S::Error;
fn poll(&mut self) -> Poll<(S, Vec<S::Item>), S::Error> {
if let Some(mut stream) = self.stream.take() {
let mut items = vec![];
loop {
match stream.poll() {
Ok(Async::Ready(Some(e))) => items.push(e),
Ok(Async::Ready(None)) | Ok(Async::NotReady) => {
return Ok(Async::Ready((stream, items)));
}
Err(e) => {
return Err(e);
}
}
}
} else {
panic!("Future already completed");
}
}
}
pub struct CountReceiver<T> {
count: Arc<AtomicUsize>,
trigger: oneshot::Sender<()>,
handle: JoinHandle<Vec<T>>,
}
impl<T: Send + 'static> CountReceiver<T> {
pub fn count(&self) -> usize {
self.count.load(Ordering::Relaxed)
}
pub async fn wait(self) -> Vec<T> {
let _ = self.trigger.send(());
self.handle.await.unwrap()
}
fn new<F, Fut>(make_fut: F) -> CountReceiver<T>
where
F: FnOnce(Arc<AtomicUsize>, oneshot::Receiver<()>) -> Fut,
Fut: std::future::Future<Output = Vec<T>> + Send + 'static,
{
let count = Arc::new(AtomicUsize::new(0));
let (trigger, tripwire) = oneshot::channel();
CountReceiver {
count: Arc::clone(&count),
trigger,
handle: tokio::spawn(make_fut(count, tripwire)),
}
}
}
impl CountReceiver<String> {
pub fn receive_lines(addr: SocketAddr) -> CountReceiver<String> {
CountReceiver::new(|count, tripwire| async move {
let mut listener = TcpListener::bind(addr).await.unwrap();
CountReceiver::receive_lines_stream(listener.incoming(), count, tripwire).await
})
}
#[cfg(all(feature = "tokio/uds", unix))]
pub fn receive_lines_unix<P>(path: P) -> CountReceiver<String>
where
P: AsRef<Path> + Send + 'static,
{
CountReceiver::new(|count, tripwire| async move {
let mut listener = tokio::net::UnixListener::bind(path).unwrap();
CountReceiver::receive_lines_stream(listener.incoming(), count, tripwire).await
})
}
async fn receive_lines_stream<S, T>(
stream: S,
count: Arc<AtomicUsize>,
tripwire: oneshot::Receiver<()>,
) -> Vec<String>
where
S: Stream<Item = IoResult<T>>,
T: AsyncWrite + AsyncRead,
{
stream
.take_until(tripwire)
.map_ok(|socket| FramedRead::new(socket, LinesCodec::new()))
.map(|x| x.unwrap())
.flatten()
.map(|x| x.unwrap())
.inspect(move |_| {
count.fetch_add(1, Ordering::Relaxed);
})
.collect::<Vec<String>>()
.await
}
}
impl CountReceiver<Event> {
pub fn receive_events<S>(stream: S) -> CountReceiver<Event>
where
S: Stream01<Item = Event> + Send + 'static,
<S as Stream01>::Error: std::fmt::Debug,
{
CountReceiver::new(|count, tripwire| async move {
stream
.compat()
.take_until(tripwire)
.map(|x| x.unwrap())
.inspect(move |_| {
count.fetch_add(1, Ordering::Relaxed);
})
.collect::<Vec<Event>>()
.await
})
}
}
fn random_events_with_stream_generic<F>(
count: usize,
generator: F,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>)
where
F: Fn() -> Event,
{
let events = (0..count).map(|_| generator()).collect::<Vec<_>>();
let stream = stream01::iter_ok(events.clone().into_iter());
(events, stream)
}
fn random_pseudonested_map(len: usize, breadth: usize, depth: usize) -> HashMap<String, String> {
if breadth == 0 || depth == 0 {
return HashMap::new();
}
if depth == 1 {
let mut leaf = HashMap::new();
leaf.insert(random_string(len), random_string(len));
return leaf;
}
let mut tree = HashMap::new();
for _ in 0..breadth {
let prefix = random_string(len);
let subtree = random_pseudonested_map(len, breadth, depth - 1);
let subtree: HashMap<String, String> = subtree
.into_iter()
.map(|(mut key, value)| {
key.insert(0, '.');
key.insert_str(0, &prefix[..]);
(key, value)
})
.collect();
for (key, value) in subtree.into_iter() {
tree.insert(key, value);
}
}
tree
}
| random_nested_events_with_stream | identifier_name |
test_util.rs | use crate::runtime::Runtime;
use crate::{event::LogEvent, Event};
use futures::{compat::Stream01CompatExt, stream, SinkExt, Stream, StreamExt, TryStreamExt};
use futures01::{
future, stream as stream01, sync::mpsc, try_ready, Async, Future, Poll, Stream as Stream01,
};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use std::{
collections::HashMap,
convert::Infallible,
fs::File,
io::Read,
iter, mem,
net::{Shutdown, SocketAddr},
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering},
sync::Arc,
};
use tokio::{
io::{AsyncRead, AsyncWrite, Result as IoResult},
net::{TcpListener, TcpStream},
sync::oneshot,
task::JoinHandle,
};
use tokio01::util::FutureExt;
use tokio_util::codec::{Encoder, FramedRead, FramedWrite, LinesCodec};
#[macro_export]
macro_rules! assert_downcast_matches {
($e:expr, $t:ty, $v:pat) => {{
match $e.downcast_ref::<$t>() {
Some($v) => (),
got => panic!("assertion failed: got wrong error variant {:?}", got),
}
}};
}
static NEXT_PORT: AtomicUsize = AtomicUsize::new(1234);
pub fn next_addr() -> SocketAddr {
use std::net::{IpAddr, Ipv4Addr};
let port = NEXT_PORT.fetch_add(1, Ordering::AcqRel) as u16;
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port)
}
pub fn trace_init() {
let env = std::env::var("TEST_LOG").unwrap_or_else(|_| "off".to_string());
let subscriber = tracing_subscriber::FmtSubscriber::builder()
.with_env_filter(env)
.finish();
let _ = tracing_log::LogTracer::init();
let _ = tracing::dispatcher::set_global_default(tracing::Dispatch::new(subscriber));
}
pub async fn send_lines(
addr: SocketAddr,
lines: impl IntoIterator<Item = String>,
) -> Result<(), Infallible> {
send_encodable(addr, LinesCodec::new(), lines).await
}
pub async fn send_encodable<I, E: From<std::io::Error> + std::fmt::Debug>(
addr: SocketAddr,
encoder: impl Encoder<I, Error = E>,
lines: impl IntoIterator<Item = I>,
) -> Result<(), Infallible> {
let stream = TcpStream::connect(&addr).await.unwrap();
let mut sink = FramedWrite::new(stream, encoder);
let mut lines = stream::iter(lines.into_iter()).map(Ok);
sink.send_all(&mut lines).await.unwrap();
let stream = sink.get_mut();
stream.shutdown(Shutdown::Both).unwrap();
Ok(())
}
pub async fn send_lines_tls(
addr: SocketAddr,
host: String,
lines: impl Iterator<Item = String>,
) -> Result<(), Infallible> {
let stream = TcpStream::connect(&addr).await.unwrap();
let mut connector = SslConnector::builder(SslMethod::tls()).unwrap();
connector.set_verify(SslVerifyMode::NONE);
let config = connector.build().configure().unwrap();
let stream = tokio_openssl::connect(config, &host, stream).await.unwrap();
let mut sink = FramedWrite::new(stream, LinesCodec::new());
let mut lines = stream::iter(lines).map(Ok);
sink.send_all(&mut lines).await.unwrap();
let stream = sink.get_mut().get_mut();
stream.shutdown(Shutdown::Both).unwrap();
Ok(())
}
pub fn temp_file() -> PathBuf {
let path = std::env::temp_dir();
let file_name = random_string(16);
path.join(file_name + ".log")
}
pub fn temp_dir() -> PathBuf {
let path = std::env::temp_dir();
let dir_name = random_string(16);
path.join(dir_name)
}
pub fn random_lines_with_stream(
len: usize,
count: usize,
) -> (Vec<String>, impl Stream01<Item = Event, Error = ()>) {
let lines = (0..count).map(|_| random_string(len)).collect::<Vec<_>>();
let stream = stream01::iter_ok(lines.clone().into_iter().map(Event::from));
(lines, stream)
}
pub fn random_events_with_stream(
len: usize,
count: usize,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>) {
random_events_with_stream_generic(count, move || Event::from(random_string(len)))
}
pub fn random_nested_events_with_stream(
len: usize,
breadth: usize,
depth: usize,
count: usize,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>) {
random_events_with_stream_generic(count, move || {
let mut log = LogEvent::default();
let tree = random_pseudonested_map(len, breadth, depth);
for (k, v) in tree.into_iter() {
log.insert(k, v);
}
Event::Log(log)
})
}
pub fn random_string(len: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(len)
.collect::<String>()
}
pub fn random_lines(len: usize) -> impl Iterator<Item = String> {
std::iter::repeat(()).map(move |_| random_string(len))
}
pub fn random_map(max_size: usize, field_len: usize) -> HashMap<String, String> {
let size = thread_rng().gen_range(0, max_size);
(0..size)
.map(move |_| (random_string(field_len), random_string(field_len)))
.collect()
}
pub fn random_maps(
max_size: usize,
field_len: usize,
) -> impl Iterator<Item = HashMap<String, String>> {
iter::repeat(()).map(move |_| random_map(max_size, field_len))
}
pub fn collect_n<T>(mut rx: mpsc::Receiver<T>, n: usize) -> impl Future<Item = Vec<T>, Error = ()> {
let mut events = Vec::new();
future::poll_fn(move || {
while events.len() < n {
let e = try_ready!(rx.poll()).unwrap();
events.push(e);
}
Ok(Async::Ready(mem::replace(&mut events, Vec::new())))
})
}
pub fn lines_from_file<P: AsRef<Path>>(path: P) -> Vec<String> {
trace!(message = "Reading file.", path = %path.as_ref().display());
let mut file = File::open(path).unwrap();
let mut output = String::new();
file.read_to_string(&mut output).unwrap();
output.lines().map(|s| s.to_owned()).collect()
}
pub fn wait_for(mut f: impl FnMut() -> bool) {
let wait = std::time::Duration::from_millis(5);
let limit = std::time::Duration::from_secs(5);
let mut attempts = 0;
while !f() {
std::thread::sleep(wait);
attempts += 1;
if attempts * wait > limit {
panic!("timed out while waiting");
}
}
}
pub fn block_on<F, R, E>(future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let mut rt = runtime();
rt.block_on(future)
}
pub fn block_on_std<F>(future: F) -> F::Output
where
F: std::future::Future + Send + 'static,
F::Output: Send + 'static,
{
let mut rt = runtime();
rt.block_on_std(future)
}
pub fn runtime() -> Runtime {
Runtime::single_threaded().unwrap()
}
pub fn wait_for_tcp(addr: SocketAddr) {
wait_for(|| std::net::TcpStream::connect(addr).is_ok())
}
pub fn wait_for_atomic_usize<T, F>(val: T, unblock: F)
where
T: AsRef<AtomicUsize>,
F: Fn(usize) -> bool,
{
let val = val.as_ref();
wait_for(|| unblock(val.load(Ordering::SeqCst)))
}
pub fn shutdown_on_idle(runtime: Runtime) {
block_on(
runtime
.shutdown_on_idle()
.timeout(std::time::Duration::from_secs(10)),
)
.unwrap()
}
#[derive(Debug)]
pub struct CollectN<S>
where
S: Stream01,
{
stream: Option<S>,
remaining: usize,
items: Option<Vec<S::Item>>,
}
impl<S: Stream01> CollectN<S> {
pub fn new(s: S, n: usize) -> Self {
Self {
stream: Some(s),
remaining: n,
items: Some(Vec::new()),
}
}
}
impl<S> Future for CollectN<S>
where
S: Stream01,
{
type Item = (S, Vec<S::Item>);
type Error = S::Error;
fn poll(&mut self) -> Poll<(S, Vec<S::Item>), S::Error> {
let stream = self.stream.take();
if stream.is_none() {
panic!("Stream is missing");
}
let mut stream = stream.unwrap();
loop {
if self.remaining == 0 {
return Ok(Async::Ready((stream, self.items.take().unwrap())));
}
match stream.poll() {
Ok(Async::Ready(Some(e))) => {
self.items.as_mut().unwrap().push(e);
self.remaining -= 1;
}
Ok(Async::Ready(None)) => {
return Ok(Async::Ready((stream, self.items.take().unwrap())));
}
Ok(Async::NotReady) => {
self.stream.replace(stream);
return Ok(Async::NotReady);
}
Err(e) => {
return Err(e);
}
}
}
}
}
#[derive(Debug)]
pub struct CollectCurrent<S>
where
S: Stream01,
{
stream: Option<S>,
}
impl<S: Stream01> CollectCurrent<S> {
pub fn new(s: S) -> Self {
Self { stream: Some(s) }
}
}
impl<S> Future for CollectCurrent<S>
where
S: Stream01,
{
type Item = (S, Vec<S::Item>);
type Error = S::Error;
fn poll(&mut self) -> Poll<(S, Vec<S::Item>), S::Error> {
if let Some(mut stream) = self.stream.take() {
let mut items = vec![];
loop {
match stream.poll() {
Ok(Async::Ready(Some(e))) => items.push(e),
Ok(Async::Ready(None)) | Ok(Async::NotReady) => {
return Ok(Async::Ready((stream, items)));
}
Err(e) => {
return Err(e);
}
}
}
} else |
}
}
pub struct CountReceiver<T> {
count: Arc<AtomicUsize>,
trigger: oneshot::Sender<()>,
handle: JoinHandle<Vec<T>>,
}
impl<T: Send + 'static> CountReceiver<T> {
pub fn count(&self) -> usize {
self.count.load(Ordering::Relaxed)
}
pub async fn wait(self) -> Vec<T> {
let _ = self.trigger.send(());
self.handle.await.unwrap()
}
fn new<F, Fut>(make_fut: F) -> CountReceiver<T>
where
F: FnOnce(Arc<AtomicUsize>, oneshot::Receiver<()>) -> Fut,
Fut: std::future::Future<Output = Vec<T>> + Send + 'static,
{
let count = Arc::new(AtomicUsize::new(0));
let (trigger, tripwire) = oneshot::channel();
CountReceiver {
count: Arc::clone(&count),
trigger,
handle: tokio::spawn(make_fut(count, tripwire)),
}
}
}
impl CountReceiver<String> {
pub fn receive_lines(addr: SocketAddr) -> CountReceiver<String> {
CountReceiver::new(|count, tripwire| async move {
let mut listener = TcpListener::bind(addr).await.unwrap();
CountReceiver::receive_lines_stream(listener.incoming(), count, tripwire).await
})
}
#[cfg(all(feature = "tokio/uds", unix))]
pub fn receive_lines_unix<P>(path: P) -> CountReceiver<String>
where
P: AsRef<Path> + Send + 'static,
{
CountReceiver::new(|count, tripwire| async move {
let mut listener = tokio::net::UnixListener::bind(path).unwrap();
CountReceiver::receive_lines_stream(listener.incoming(), count, tripwire).await
})
}
async fn receive_lines_stream<S, T>(
stream: S,
count: Arc<AtomicUsize>,
tripwire: oneshot::Receiver<()>,
) -> Vec<String>
where
S: Stream<Item = IoResult<T>>,
T: AsyncWrite + AsyncRead,
{
stream
.take_until(tripwire)
.map_ok(|socket| FramedRead::new(socket, LinesCodec::new()))
.map(|x| x.unwrap())
.flatten()
.map(|x| x.unwrap())
.inspect(move |_| {
count.fetch_add(1, Ordering::Relaxed);
})
.collect::<Vec<String>>()
.await
}
}
impl CountReceiver<Event> {
pub fn receive_events<S>(stream: S) -> CountReceiver<Event>
where
S: Stream01<Item = Event> + Send + 'static,
<S as Stream01>::Error: std::fmt::Debug,
{
CountReceiver::new(|count, tripwire| async move {
stream
.compat()
.take_until(tripwire)
.map(|x| x.unwrap())
.inspect(move |_| {
count.fetch_add(1, Ordering::Relaxed);
})
.collect::<Vec<Event>>()
.await
})
}
}
fn random_events_with_stream_generic<F>(
count: usize,
generator: F,
) -> (Vec<Event>, impl Stream01<Item = Event, Error = ()>)
where
F: Fn() -> Event,
{
let events = (0..count).map(|_| generator()).collect::<Vec<_>>();
let stream = stream01::iter_ok(events.clone().into_iter());
(events, stream)
}
fn random_pseudonested_map(len: usize, breadth: usize, depth: usize) -> HashMap<String, String> {
if breadth == 0 || depth == 0 {
return HashMap::new();
}
if depth == 1 {
let mut leaf = HashMap::new();
leaf.insert(random_string(len), random_string(len));
return leaf;
}
let mut tree = HashMap::new();
for _ in 0..breadth {
let prefix = random_string(len);
let subtree = random_pseudonested_map(len, breadth, depth - 1);
let subtree: HashMap<String, String> = subtree
.into_iter()
.map(|(mut key, value)| {
key.insert(0, '.');
key.insert_str(0, &prefix[..]);
(key, value)
})
.collect();
for (key, value) in subtree.into_iter() {
tree.insert(key, value);
}
}
tree
}
| {
panic!("Future already completed");
} | conditional_block |
tree_entries.go | package gitbase
import (
"bytes"
"io"
"strconv"
"gopkg.in/src-d/go-mysql-server.v0/sql"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
)
type treeEntriesTable struct {
partitioned
filters []sql.Expression
index sql.IndexLookup
}
// TreeEntriesSchema is the schema for the tree entries table.
var TreeEntriesSchema = sql.Schema{
{Name: "repository_id", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_entry_name", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "blob_hash", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_hash", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_entry_mode", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
}
func newTreeEntriesTable() *treeEntriesTable {
return new(treeEntriesTable)
}
var _ Table = (*treeEntriesTable)(nil)
var _ Squashable = (*treeEntriesTable)(nil)
func (treeEntriesTable) isSquashable() {}
func (treeEntriesTable) isGitbaseTable() {}
func (treeEntriesTable) Name() string {
return TreeEntriesTableName
}
func (treeEntriesTable) Schema() sql.Schema {
return TreeEntriesSchema
}
func (r *treeEntriesTable) WithFilters(filters []sql.Expression) sql.Table {
nt := *r
nt.filters = filters
return &nt
}
func (r *treeEntriesTable) WithIndexLookup(idx sql.IndexLookup) sql.Table {
nt := *r
nt.index = idx
return &nt
}
func (r *treeEntriesTable) IndexLookup() sql.IndexLookup { return r.index }
func (r *treeEntriesTable) Filters() []sql.Expression { return r.filters }
func (r *treeEntriesTable) PartitionRows(
ctx *sql.Context,
p sql.Partition,
) (sql.RowIter, error) {
repo, err := getPartitionRepo(ctx, p)
if err != nil {
return nil, err
}
span, ctx := ctx.Span("gitbase.TreeEntriesTable")
iter, err := rowIterWithSelectors(
ctx, TreeEntriesSchema, TreeEntriesTableName,
r.filters,
r.handledColumns(),
func(selectors selectors) (sql.RowIter, error) {
hashes, err := selectors.textValues("tree_hash")
if err != nil {
return nil, err
}
if r.index != nil {
values, err := r.index.Values(p)
if err != nil {
return nil, err
}
session, err := getSession(ctx)
if err != nil {
return nil, err
}
return newTreeEntriesIndexIter(
values,
session.Pool,
stringsToHashes(hashes),
), nil
}
return &treeEntriesRowIter{
repo: repo,
hashes: stringsToHashes(hashes),
skipGitErrors: shouldSkipErrors(ctx),
}, nil
},
)
if err != nil {
span.Finish()
return nil, err
}
return sql.NewSpanIter(span, iter), nil
}
func (treeEntriesTable) HandledFilters(filters []sql.Expression) []sql.Expression {
return handledFilters(TreeEntriesTableName, TreeEntriesSchema, filters)
}
func (treeEntriesTable) handledColumns() []string {
return []string{"tree_hash"}
}
func (r treeEntriesTable) String() string {
return printTable(TreeEntriesTableName, TreeEntriesSchema)
}
// IndexKeyValues implements the sql.IndexableTable interface.
func (*treeEntriesTable) IndexKeyValues(
ctx *sql.Context,
colNames []string,
) (sql.PartitionIndexKeyValueIter, error) {
return newPartitionedIndexKeyValueIter(
ctx,
newTreeEntriesTable(),
colNames,
newTreeEntriesKeyValueIter,
)
}
type treeEntriesRowIter struct {
hashes []plumbing.Hash
pos int
tree *object.Tree
iter *object.TreeIter
cursor int
repo *Repository
skipGitErrors bool
}
func (i *treeEntriesRowIter) Next() (sql.Row, error) {
if len(i.hashes) > 0 {
return i.nextByHash()
}
return i.next()
}
func (i *treeEntriesRowIter) next() (sql.Row, error) {
for {
if i.iter == nil {
var err error
i.iter, err = i.repo.TreeObjects()
if err != nil {
if i.skipGitErrors {
return nil, io.EOF
}
return nil, err
}
}
if i.tree == nil {
var err error
i.tree, err = i.iter.Next()
if err != nil {
if err != io.EOF && i.skipGitErrors {
continue
}
return nil, err
}
i.cursor = 0
}
if i.cursor >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := &TreeEntry{i.tree.Hash, i.tree.Entries[i.cursor]}
i.cursor++
return treeEntryToRow(i.repo.ID, entry), nil
}
}
func (i *treeEntriesRowIter) nextByHash() (sql.Row, error) {
for {
if i.pos >= len(i.hashes) && i.tree == nil {
return nil, io.EOF
}
if i.tree == nil {
var err error
i.tree, err = i.repo.TreeObject(i.hashes[i.pos])
i.pos++
if err != nil {
if err == plumbing.ErrObjectNotFound || i.skipGitErrors {
continue
}
return nil, err
}
i.cursor = 0
}
if i.cursor >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := &TreeEntry{i.tree.Hash, i.tree.Entries[i.cursor]}
i.cursor++
return treeEntryToRow(i.repo.ID, entry), nil
}
}
func (i *treeEntriesRowIter) Close() error {
if i.iter != nil {
i.iter.Close()
}
i.repo.Close()
return nil
}
// TreeEntry is a tree entry object.
type TreeEntry struct {
TreeHash plumbing.Hash
object.TreeEntry
}
func treeEntryToRow(repoID string, entry *TreeEntry) sql.Row {
return sql.NewRow(
repoID,
entry.Name,
entry.Hash.String(),
entry.TreeHash.String(),
strconv.FormatInt(int64(entry.Mode), 8),
)
}
type treeEntriesIndexKey struct {
Repository string
Packfile string
Offset int64
Pos int
Hash string
}
func (k *treeEntriesIndexKey) encode() ([]byte, error) {
var buf bytes.Buffer
writeString(&buf, k.Repository)
writeHash(&buf, k.Packfile)
writeBool(&buf, k.Offset >= 0)
if k.Offset >= 0 {
writeInt64(&buf, k.Offset)
} else {
if err := writeHash(&buf, k.Hash); err != nil {
return nil, err
}
}
writeInt64(&buf, int64(k.Pos))
return buf.Bytes(), nil
}
func (k *treeEntriesIndexKey) decode(data []byte) error {
var buf = bytes.NewBuffer(data)
var err error
if k.Repository, err = readString(buf); err != nil {
return err
}
if k.Packfile, err = readHash(buf); err != nil {
return err
}
ok, err := readBool(buf)
if err != nil {
return err
}
if ok {
k.Hash = ""
if k.Offset, err = readInt64(buf); err != nil {
return err
}
} else {
k.Offset = -1
if k.Hash, err = readHash(buf); err != nil {
return err
}
}
pos, err := readInt64(buf)
if err != nil {
return err
}
k.Pos = int(pos)
return nil
}
type treeEntriesKeyValueIter struct {
pool *RepositoryPool
repo *Repository
idx *repositoryIndex
trees *object.TreeIter
tree *object.Tree
pos int
columns []string
}
func newTreeEntriesKeyValueIter(
pool *RepositoryPool,
repo *Repository,
columns []string,
) (sql.IndexKeyValueIter, error) {
trees, err := repo.TreeObjects()
if err != nil {
return nil, err
}
r := pool.repositories[repo.ID]
idx, err := newRepositoryIndex(r)
if err != nil {
return nil, err
}
return &treeEntriesKeyValueIter{
pool: pool,
repo: repo,
columns: columns,
idx: idx,
trees: trees,
}, nil
}
func (i *treeEntriesKeyValueIter) Next() ([]interface{}, []byte, error) {
for {
if i.tree == nil {
var err error
i.tree, err = i.trees.Next()
if err != nil {
return nil, nil, err
}
i.pos = 0
}
if i.pos >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := i.tree.Entries[i.pos]
i.pos++
offset, packfile, err := i.idx.find(i.tree.Hash)
if err != nil {
return nil, nil, err
}
var hash string
if offset < 0 {
hash = i.tree.Hash.String()
}
key, err := encodeIndexKey(&treeEntriesIndexKey{
Repository: i.repo.ID,
Packfile: packfile.String(),
Offset: offset,
Pos: i.pos - 1,
Hash: hash,
})
if err != nil {
return nil, nil, err
}
row := treeEntryToRow(i.repo.ID, &TreeEntry{i.tree.Hash, entry})
values, err := rowIndexValues(row, i.columns, TreeEntriesSchema)
if err != nil {
return nil, nil, err
}
return values, key, nil
}
}
func (i *treeEntriesKeyValueIter) Close() error |
type treeEntriesIndexIter struct {
index sql.IndexValueIter
decoder *objectDecoder
prevTreeOffset int64
hashes []plumbing.Hash
tree *object.Tree // holds the last obtained tree
entry *TreeEntry // holds the last obtained tree entry
repoID string // holds the repo ID of the last tree entry processed
}
func newTreeEntriesIndexIter(
index sql.IndexValueIter,
pool *RepositoryPool,
hashes []plumbing.Hash,
) *treeEntriesIndexIter {
return &treeEntriesIndexIter{
index: index,
decoder: newObjectDecoder(pool),
hashes: hashes,
}
}
func (i *treeEntriesIndexIter) Next() (sql.Row, error) {
for {
var err error
var data []byte
defer closeIndexOnError(&err, i.index)
data, err = i.index.Next()
if err != nil {
return nil, err
}
var key treeEntriesIndexKey
if err = decodeIndexKey(data, &key); err != nil {
return nil, err
}
i.repoID = key.Repository
var tree *object.Tree
if i.prevTreeOffset == key.Offset && key.Offset >= 0 ||
(i.tree != nil && i.tree.Hash.String() == key.Hash) {
tree = i.tree
} else {
var obj object.Object
obj, err = i.decoder.decode(
key.Repository,
plumbing.NewHash(key.Packfile),
key.Offset,
plumbing.NewHash(key.Hash),
)
if err != nil {
return nil, err
}
var ok bool
i.tree, ok = obj.(*object.Tree)
if !ok {
err = ErrInvalidObjectType.New(obj, "*object.Tree")
return nil, err
}
if len(i.hashes) > 0 && !hashContains(i.hashes, i.tree.Hash) {
continue
}
tree = i.tree
}
i.prevTreeOffset = key.Offset
i.entry = &TreeEntry{tree.Hash, tree.Entries[key.Pos]}
return treeEntryToRow(key.Repository, i.entry), nil
}
}
func (i *treeEntriesIndexIter) Close() error {
if i.decoder != nil {
if err := i.decoder.Close(); err != nil {
_ = i.index.Close()
return err
}
}
return i.index.Close()
}
| {
if i.trees != nil {
i.trees.Close()
}
if i.idx != nil {
i.idx.Close()
}
if i.repo != nil {
i.repo.Close()
}
return nil
} | identifier_body |
tree_entries.go | package gitbase
import (
"bytes"
"io"
"strconv"
"gopkg.in/src-d/go-mysql-server.v0/sql"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
)
type treeEntriesTable struct {
partitioned
filters []sql.Expression
index sql.IndexLookup
}
// TreeEntriesSchema is the schema for the tree entries table.
var TreeEntriesSchema = sql.Schema{
{Name: "repository_id", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_entry_name", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "blob_hash", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_hash", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_entry_mode", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
}
func newTreeEntriesTable() *treeEntriesTable {
return new(treeEntriesTable)
}
var _ Table = (*treeEntriesTable)(nil)
var _ Squashable = (*treeEntriesTable)(nil)
func (treeEntriesTable) isSquashable() {}
func (treeEntriesTable) isGitbaseTable() {}
func (treeEntriesTable) Name() string {
return TreeEntriesTableName
}
func (treeEntriesTable) Schema() sql.Schema {
return TreeEntriesSchema
}
func (r *treeEntriesTable) WithFilters(filters []sql.Expression) sql.Table {
nt := *r
nt.filters = filters
return &nt
}
func (r *treeEntriesTable) WithIndexLookup(idx sql.IndexLookup) sql.Table {
nt := *r
nt.index = idx
return &nt
}
func (r *treeEntriesTable) IndexLookup() sql.IndexLookup { return r.index }
func (r *treeEntriesTable) Filters() []sql.Expression { return r.filters }
func (r *treeEntriesTable) PartitionRows(
ctx *sql.Context,
p sql.Partition,
) (sql.RowIter, error) {
repo, err := getPartitionRepo(ctx, p)
if err != nil {
return nil, err
}
span, ctx := ctx.Span("gitbase.TreeEntriesTable")
iter, err := rowIterWithSelectors(
ctx, TreeEntriesSchema, TreeEntriesTableName,
r.filters,
r.handledColumns(),
func(selectors selectors) (sql.RowIter, error) {
hashes, err := selectors.textValues("tree_hash")
if err != nil {
return nil, err
}
if r.index != nil {
values, err := r.index.Values(p)
if err != nil {
return nil, err
}
session, err := getSession(ctx)
if err != nil {
return nil, err
}
return newTreeEntriesIndexIter(
values,
session.Pool,
stringsToHashes(hashes),
), nil
}
return &treeEntriesRowIter{
repo: repo,
hashes: stringsToHashes(hashes),
skipGitErrors: shouldSkipErrors(ctx),
}, nil
},
)
if err != nil {
span.Finish()
return nil, err
}
return sql.NewSpanIter(span, iter), nil
}
func (treeEntriesTable) HandledFilters(filters []sql.Expression) []sql.Expression {
return handledFilters(TreeEntriesTableName, TreeEntriesSchema, filters)
}
func (treeEntriesTable) handledColumns() []string {
return []string{"tree_hash"}
}
func (r treeEntriesTable) String() string {
return printTable(TreeEntriesTableName, TreeEntriesSchema)
}
// IndexKeyValues implements the sql.IndexableTable interface.
func (*treeEntriesTable) IndexKeyValues(
ctx *sql.Context,
colNames []string,
) (sql.PartitionIndexKeyValueIter, error) {
return newPartitionedIndexKeyValueIter(
ctx,
newTreeEntriesTable(),
colNames,
newTreeEntriesKeyValueIter,
)
}
type treeEntriesRowIter struct {
hashes []plumbing.Hash
pos int
tree *object.Tree
iter *object.TreeIter
cursor int
repo *Repository
skipGitErrors bool
}
func (i *treeEntriesRowIter) Next() (sql.Row, error) {
if len(i.hashes) > 0 {
return i.nextByHash()
}
return i.next()
}
func (i *treeEntriesRowIter) next() (sql.Row, error) {
for {
if i.iter == nil {
var err error
i.iter, err = i.repo.TreeObjects()
if err != nil {
if i.skipGitErrors {
return nil, io.EOF
}
return nil, err
}
}
if i.tree == nil {
var err error
i.tree, err = i.iter.Next()
if err != nil {
if err != io.EOF && i.skipGitErrors {
continue
}
return nil, err
}
i.cursor = 0
}
if i.cursor >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := &TreeEntry{i.tree.Hash, i.tree.Entries[i.cursor]}
i.cursor++
return treeEntryToRow(i.repo.ID, entry), nil
}
}
func (i *treeEntriesRowIter) nextByHash() (sql.Row, error) {
for {
if i.pos >= len(i.hashes) && i.tree == nil {
return nil, io.EOF
}
if i.tree == nil {
var err error
i.tree, err = i.repo.TreeObject(i.hashes[i.pos])
i.pos++
if err != nil {
if err == plumbing.ErrObjectNotFound || i.skipGitErrors {
continue
}
return nil, err
}
i.cursor = 0
}
if i.cursor >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := &TreeEntry{i.tree.Hash, i.tree.Entries[i.cursor]}
i.cursor++
return treeEntryToRow(i.repo.ID, entry), nil
}
}
func (i *treeEntriesRowIter) Close() error {
if i.iter != nil {
i.iter.Close()
}
i.repo.Close()
return nil
}
// TreeEntry is a tree entry object.
type TreeEntry struct {
TreeHash plumbing.Hash
object.TreeEntry
}
func treeEntryToRow(repoID string, entry *TreeEntry) sql.Row {
return sql.NewRow(
repoID,
entry.Name,
entry.Hash.String(),
entry.TreeHash.String(),
strconv.FormatInt(int64(entry.Mode), 8),
)
}
type treeEntriesIndexKey struct {
Repository string
Packfile string
Offset int64
Pos int
Hash string
}
func (k *treeEntriesIndexKey) encode() ([]byte, error) {
var buf bytes.Buffer
writeString(&buf, k.Repository)
writeHash(&buf, k.Packfile)
writeBool(&buf, k.Offset >= 0)
if k.Offset >= 0 {
writeInt64(&buf, k.Offset)
} else {
if err := writeHash(&buf, k.Hash); err != nil {
return nil, err
}
}
writeInt64(&buf, int64(k.Pos))
return buf.Bytes(), nil
}
func (k *treeEntriesIndexKey) decode(data []byte) error {
var buf = bytes.NewBuffer(data)
var err error
if k.Repository, err = readString(buf); err != nil {
return err
}
if k.Packfile, err = readHash(buf); err != nil {
return err
}
ok, err := readBool(buf)
if err != nil {
return err
}
if ok {
k.Hash = ""
if k.Offset, err = readInt64(buf); err != nil |
} else {
k.Offset = -1
if k.Hash, err = readHash(buf); err != nil {
return err
}
}
pos, err := readInt64(buf)
if err != nil {
return err
}
k.Pos = int(pos)
return nil
}
type treeEntriesKeyValueIter struct {
pool *RepositoryPool
repo *Repository
idx *repositoryIndex
trees *object.TreeIter
tree *object.Tree
pos int
columns []string
}
func newTreeEntriesKeyValueIter(
pool *RepositoryPool,
repo *Repository,
columns []string,
) (sql.IndexKeyValueIter, error) {
trees, err := repo.TreeObjects()
if err != nil {
return nil, err
}
r := pool.repositories[repo.ID]
idx, err := newRepositoryIndex(r)
if err != nil {
return nil, err
}
return &treeEntriesKeyValueIter{
pool: pool,
repo: repo,
columns: columns,
idx: idx,
trees: trees,
}, nil
}
func (i *treeEntriesKeyValueIter) Next() ([]interface{}, []byte, error) {
for {
if i.tree == nil {
var err error
i.tree, err = i.trees.Next()
if err != nil {
return nil, nil, err
}
i.pos = 0
}
if i.pos >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := i.tree.Entries[i.pos]
i.pos++
offset, packfile, err := i.idx.find(i.tree.Hash)
if err != nil {
return nil, nil, err
}
var hash string
if offset < 0 {
hash = i.tree.Hash.String()
}
key, err := encodeIndexKey(&treeEntriesIndexKey{
Repository: i.repo.ID,
Packfile: packfile.String(),
Offset: offset,
Pos: i.pos - 1,
Hash: hash,
})
if err != nil {
return nil, nil, err
}
row := treeEntryToRow(i.repo.ID, &TreeEntry{i.tree.Hash, entry})
values, err := rowIndexValues(row, i.columns, TreeEntriesSchema)
if err != nil {
return nil, nil, err
}
return values, key, nil
}
}
func (i *treeEntriesKeyValueIter) Close() error {
if i.trees != nil {
i.trees.Close()
}
if i.idx != nil {
i.idx.Close()
}
if i.repo != nil {
i.repo.Close()
}
return nil
}
type treeEntriesIndexIter struct {
index sql.IndexValueIter
decoder *objectDecoder
prevTreeOffset int64
hashes []plumbing.Hash
tree *object.Tree // holds the last obtained tree
entry *TreeEntry // holds the last obtained tree entry
repoID string // holds the repo ID of the last tree entry processed
}
func newTreeEntriesIndexIter(
index sql.IndexValueIter,
pool *RepositoryPool,
hashes []plumbing.Hash,
) *treeEntriesIndexIter {
return &treeEntriesIndexIter{
index: index,
decoder: newObjectDecoder(pool),
hashes: hashes,
}
}
func (i *treeEntriesIndexIter) Next() (sql.Row, error) {
for {
var err error
var data []byte
defer closeIndexOnError(&err, i.index)
data, err = i.index.Next()
if err != nil {
return nil, err
}
var key treeEntriesIndexKey
if err = decodeIndexKey(data, &key); err != nil {
return nil, err
}
i.repoID = key.Repository
var tree *object.Tree
if i.prevTreeOffset == key.Offset && key.Offset >= 0 ||
(i.tree != nil && i.tree.Hash.String() == key.Hash) {
tree = i.tree
} else {
var obj object.Object
obj, err = i.decoder.decode(
key.Repository,
plumbing.NewHash(key.Packfile),
key.Offset,
plumbing.NewHash(key.Hash),
)
if err != nil {
return nil, err
}
var ok bool
i.tree, ok = obj.(*object.Tree)
if !ok {
err = ErrInvalidObjectType.New(obj, "*object.Tree")
return nil, err
}
if len(i.hashes) > 0 && !hashContains(i.hashes, i.tree.Hash) {
continue
}
tree = i.tree
}
i.prevTreeOffset = key.Offset
i.entry = &TreeEntry{tree.Hash, tree.Entries[key.Pos]}
return treeEntryToRow(key.Repository, i.entry), nil
}
}
func (i *treeEntriesIndexIter) Close() error {
if i.decoder != nil {
if err := i.decoder.Close(); err != nil {
_ = i.index.Close()
return err
}
}
return i.index.Close()
}
| {
return err
} | conditional_block |
tree_entries.go | package gitbase
import (
"bytes"
"io"
"strconv"
"gopkg.in/src-d/go-mysql-server.v0/sql"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
)
type treeEntriesTable struct {
partitioned
filters []sql.Expression
index sql.IndexLookup
}
// TreeEntriesSchema is the schema for the tree entries table.
var TreeEntriesSchema = sql.Schema{
{Name: "repository_id", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_entry_name", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "blob_hash", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_hash", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_entry_mode", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
}
func newTreeEntriesTable() *treeEntriesTable {
return new(treeEntriesTable)
}
var _ Table = (*treeEntriesTable)(nil)
var _ Squashable = (*treeEntriesTable)(nil)
func (treeEntriesTable) isSquashable() {}
func (treeEntriesTable) isGitbaseTable() {}
func (treeEntriesTable) Name() string {
return TreeEntriesTableName
}
func (treeEntriesTable) Schema() sql.Schema {
return TreeEntriesSchema
}
func (r *treeEntriesTable) WithFilters(filters []sql.Expression) sql.Table {
nt := *r
nt.filters = filters
return &nt
}
func (r *treeEntriesTable) WithIndexLookup(idx sql.IndexLookup) sql.Table {
nt := *r
nt.index = idx
return &nt
}
func (r *treeEntriesTable) IndexLookup() sql.IndexLookup { return r.index }
func (r *treeEntriesTable) Filters() []sql.Expression { return r.filters }
func (r *treeEntriesTable) PartitionRows(
ctx *sql.Context,
p sql.Partition,
) (sql.RowIter, error) {
repo, err := getPartitionRepo(ctx, p)
if err != nil {
return nil, err
}
span, ctx := ctx.Span("gitbase.TreeEntriesTable")
iter, err := rowIterWithSelectors(
ctx, TreeEntriesSchema, TreeEntriesTableName,
r.filters,
r.handledColumns(),
func(selectors selectors) (sql.RowIter, error) {
hashes, err := selectors.textValues("tree_hash")
if err != nil {
return nil, err
}
if r.index != nil {
values, err := r.index.Values(p)
if err != nil {
return nil, err
}
session, err := getSession(ctx)
if err != nil {
return nil, err
}
return newTreeEntriesIndexIter(
values,
session.Pool,
stringsToHashes(hashes),
), nil
}
return &treeEntriesRowIter{
repo: repo,
hashes: stringsToHashes(hashes),
skipGitErrors: shouldSkipErrors(ctx),
}, nil
},
)
if err != nil {
span.Finish()
return nil, err
}
return sql.NewSpanIter(span, iter), nil
}
func (treeEntriesTable) HandledFilters(filters []sql.Expression) []sql.Expression {
return handledFilters(TreeEntriesTableName, TreeEntriesSchema, filters)
}
func (treeEntriesTable) handledColumns() []string {
return []string{"tree_hash"}
}
func (r treeEntriesTable) String() string {
return printTable(TreeEntriesTableName, TreeEntriesSchema)
}
// IndexKeyValues implements the sql.IndexableTable interface.
func (*treeEntriesTable) IndexKeyValues(
ctx *sql.Context,
colNames []string,
) (sql.PartitionIndexKeyValueIter, error) {
return newPartitionedIndexKeyValueIter(
ctx,
newTreeEntriesTable(),
colNames,
newTreeEntriesKeyValueIter,
)
}
type treeEntriesRowIter struct {
hashes []plumbing.Hash
pos int
tree *object.Tree
iter *object.TreeIter
cursor int
repo *Repository
skipGitErrors bool
}
func (i *treeEntriesRowIter) Next() (sql.Row, error) {
if len(i.hashes) > 0 {
return i.nextByHash()
}
return i.next()
}
func (i *treeEntriesRowIter) next() (sql.Row, error) {
for {
if i.iter == nil {
var err error
i.iter, err = i.repo.TreeObjects()
if err != nil {
if i.skipGitErrors {
return nil, io.EOF
}
return nil, err
}
}
if i.tree == nil {
var err error
i.tree, err = i.iter.Next()
if err != nil {
if err != io.EOF && i.skipGitErrors {
continue
}
return nil, err
}
i.cursor = 0
}
if i.cursor >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := &TreeEntry{i.tree.Hash, i.tree.Entries[i.cursor]}
i.cursor++
return treeEntryToRow(i.repo.ID, entry), nil
}
}
func (i *treeEntriesRowIter) nextByHash() (sql.Row, error) {
for {
if i.pos >= len(i.hashes) && i.tree == nil {
return nil, io.EOF
}
if i.tree == nil {
var err error
i.tree, err = i.repo.TreeObject(i.hashes[i.pos])
i.pos++
if err != nil {
if err == plumbing.ErrObjectNotFound || i.skipGitErrors {
continue
}
return nil, err
}
i.cursor = 0
}
if i.cursor >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := &TreeEntry{i.tree.Hash, i.tree.Entries[i.cursor]}
i.cursor++
return treeEntryToRow(i.repo.ID, entry), nil
}
}
func (i *treeEntriesRowIter) Close() error {
if i.iter != nil {
i.iter.Close()
}
i.repo.Close()
return nil
}
// TreeEntry is a tree entry object.
type TreeEntry struct {
TreeHash plumbing.Hash
object.TreeEntry
}
func treeEntryToRow(repoID string, entry *TreeEntry) sql.Row {
return sql.NewRow(
repoID,
entry.Name,
entry.Hash.String(),
entry.TreeHash.String(),
strconv.FormatInt(int64(entry.Mode), 8),
)
}
type treeEntriesIndexKey struct {
Repository string
Packfile string
Offset int64
Pos int
Hash string
}
func (k *treeEntriesIndexKey) encode() ([]byte, error) {
var buf bytes.Buffer
writeString(&buf, k.Repository)
writeHash(&buf, k.Packfile)
writeBool(&buf, k.Offset >= 0)
if k.Offset >= 0 {
writeInt64(&buf, k.Offset)
} else {
if err := writeHash(&buf, k.Hash); err != nil {
return nil, err
}
}
writeInt64(&buf, int64(k.Pos))
return buf.Bytes(), nil
}
func (k *treeEntriesIndexKey) decode(data []byte) error {
var buf = bytes.NewBuffer(data)
var err error
if k.Repository, err = readString(buf); err != nil {
return err
}
if k.Packfile, err = readHash(buf); err != nil {
return err
}
ok, err := readBool(buf)
if err != nil {
return err
}
if ok {
k.Hash = ""
if k.Offset, err = readInt64(buf); err != nil {
return err
}
} else {
k.Offset = -1
if k.Hash, err = readHash(buf); err != nil {
return err
}
}
pos, err := readInt64(buf)
if err != nil {
return err
}
k.Pos = int(pos)
return nil
}
type treeEntriesKeyValueIter struct {
pool *RepositoryPool
repo *Repository
idx *repositoryIndex
trees *object.TreeIter
tree *object.Tree
pos int
columns []string
}
func newTreeEntriesKeyValueIter(
pool *RepositoryPool,
repo *Repository,
columns []string,
) (sql.IndexKeyValueIter, error) {
trees, err := repo.TreeObjects()
if err != nil {
return nil, err
}
r := pool.repositories[repo.ID]
idx, err := newRepositoryIndex(r)
if err != nil {
return nil, err
}
return &treeEntriesKeyValueIter{
pool: pool,
repo: repo,
columns: columns,
idx: idx,
trees: trees,
}, nil
}
func (i *treeEntriesKeyValueIter) Next() ([]interface{}, []byte, error) {
for {
if i.tree == nil {
var err error
i.tree, err = i.trees.Next()
if err != nil {
return nil, nil, err
}
i.pos = 0
}
if i.pos >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := i.tree.Entries[i.pos]
i.pos++
offset, packfile, err := i.idx.find(i.tree.Hash)
if err != nil {
return nil, nil, err
}
var hash string
if offset < 0 {
hash = i.tree.Hash.String()
}
key, err := encodeIndexKey(&treeEntriesIndexKey{
Repository: i.repo.ID,
Packfile: packfile.String(),
Offset: offset,
Pos: i.pos - 1,
Hash: hash,
})
if err != nil {
return nil, nil, err
}
row := treeEntryToRow(i.repo.ID, &TreeEntry{i.tree.Hash, entry})
values, err := rowIndexValues(row, i.columns, TreeEntriesSchema)
if err != nil {
return nil, nil, err
}
return values, key, nil
}
}
func (i *treeEntriesKeyValueIter) | () error {
if i.trees != nil {
i.trees.Close()
}
if i.idx != nil {
i.idx.Close()
}
if i.repo != nil {
i.repo.Close()
}
return nil
}
type treeEntriesIndexIter struct {
index sql.IndexValueIter
decoder *objectDecoder
prevTreeOffset int64
hashes []plumbing.Hash
tree *object.Tree // holds the last obtained tree
entry *TreeEntry // holds the last obtained tree entry
repoID string // holds the repo ID of the last tree entry processed
}
func newTreeEntriesIndexIter(
index sql.IndexValueIter,
pool *RepositoryPool,
hashes []plumbing.Hash,
) *treeEntriesIndexIter {
return &treeEntriesIndexIter{
index: index,
decoder: newObjectDecoder(pool),
hashes: hashes,
}
}
func (i *treeEntriesIndexIter) Next() (sql.Row, error) {
for {
var err error
var data []byte
defer closeIndexOnError(&err, i.index)
data, err = i.index.Next()
if err != nil {
return nil, err
}
var key treeEntriesIndexKey
if err = decodeIndexKey(data, &key); err != nil {
return nil, err
}
i.repoID = key.Repository
var tree *object.Tree
if i.prevTreeOffset == key.Offset && key.Offset >= 0 ||
(i.tree != nil && i.tree.Hash.String() == key.Hash) {
tree = i.tree
} else {
var obj object.Object
obj, err = i.decoder.decode(
key.Repository,
plumbing.NewHash(key.Packfile),
key.Offset,
plumbing.NewHash(key.Hash),
)
if err != nil {
return nil, err
}
var ok bool
i.tree, ok = obj.(*object.Tree)
if !ok {
err = ErrInvalidObjectType.New(obj, "*object.Tree")
return nil, err
}
if len(i.hashes) > 0 && !hashContains(i.hashes, i.tree.Hash) {
continue
}
tree = i.tree
}
i.prevTreeOffset = key.Offset
i.entry = &TreeEntry{tree.Hash, tree.Entries[key.Pos]}
return treeEntryToRow(key.Repository, i.entry), nil
}
}
func (i *treeEntriesIndexIter) Close() error {
if i.decoder != nil {
if err := i.decoder.Close(); err != nil {
_ = i.index.Close()
return err
}
}
return i.index.Close()
}
| Close | identifier_name |
tree_entries.go | package gitbase
import (
"bytes"
"io"
"strconv"
"gopkg.in/src-d/go-mysql-server.v0/sql"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
)
type treeEntriesTable struct {
partitioned
filters []sql.Expression
index sql.IndexLookup
}
// TreeEntriesSchema is the schema for the tree entries table.
var TreeEntriesSchema = sql.Schema{
{Name: "repository_id", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_entry_name", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "blob_hash", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_hash", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
{Name: "tree_entry_mode", Type: sql.Text, Nullable: false, Source: TreeEntriesTableName},
}
func newTreeEntriesTable() *treeEntriesTable {
return new(treeEntriesTable)
}
var _ Table = (*treeEntriesTable)(nil)
var _ Squashable = (*treeEntriesTable)(nil)
func (treeEntriesTable) isSquashable() {}
func (treeEntriesTable) isGitbaseTable() {}
func (treeEntriesTable) Name() string {
return TreeEntriesTableName
}
func (treeEntriesTable) Schema() sql.Schema {
return TreeEntriesSchema
}
func (r *treeEntriesTable) WithFilters(filters []sql.Expression) sql.Table {
nt := *r
nt.filters = filters
return &nt
}
func (r *treeEntriesTable) WithIndexLookup(idx sql.IndexLookup) sql.Table {
nt := *r
nt.index = idx
return &nt
}
func (r *treeEntriesTable) IndexLookup() sql.IndexLookup { return r.index }
func (r *treeEntriesTable) Filters() []sql.Expression { return r.filters }
func (r *treeEntriesTable) PartitionRows(
ctx *sql.Context,
p sql.Partition,
) (sql.RowIter, error) {
repo, err := getPartitionRepo(ctx, p)
if err != nil {
return nil, err
}
span, ctx := ctx.Span("gitbase.TreeEntriesTable")
iter, err := rowIterWithSelectors(
ctx, TreeEntriesSchema, TreeEntriesTableName,
r.filters,
r.handledColumns(),
func(selectors selectors) (sql.RowIter, error) {
hashes, err := selectors.textValues("tree_hash")
if err != nil {
return nil, err
}
if r.index != nil {
values, err := r.index.Values(p)
if err != nil {
return nil, err
}
session, err := getSession(ctx)
if err != nil {
return nil, err
}
return newTreeEntriesIndexIter(
values,
session.Pool,
stringsToHashes(hashes),
), nil
}
return &treeEntriesRowIter{
repo: repo,
hashes: stringsToHashes(hashes),
skipGitErrors: shouldSkipErrors(ctx),
}, nil
},
)
if err != nil {
span.Finish()
return nil, err
}
return sql.NewSpanIter(span, iter), nil
}
func (treeEntriesTable) HandledFilters(filters []sql.Expression) []sql.Expression {
return handledFilters(TreeEntriesTableName, TreeEntriesSchema, filters)
}
func (treeEntriesTable) handledColumns() []string {
return []string{"tree_hash"}
}
func (r treeEntriesTable) String() string {
return printTable(TreeEntriesTableName, TreeEntriesSchema)
}
// IndexKeyValues implements the sql.IndexableTable interface.
func (*treeEntriesTable) IndexKeyValues(
ctx *sql.Context,
colNames []string,
) (sql.PartitionIndexKeyValueIter, error) {
return newPartitionedIndexKeyValueIter(
ctx,
newTreeEntriesTable(),
colNames,
newTreeEntriesKeyValueIter,
)
}
type treeEntriesRowIter struct {
hashes []plumbing.Hash
pos int
tree *object.Tree
iter *object.TreeIter
cursor int
repo *Repository
skipGitErrors bool
}
func (i *treeEntriesRowIter) Next() (sql.Row, error) {
if len(i.hashes) > 0 {
return i.nextByHash()
}
return i.next()
}
func (i *treeEntriesRowIter) next() (sql.Row, error) {
for {
if i.iter == nil {
var err error
i.iter, err = i.repo.TreeObjects()
if err != nil {
if i.skipGitErrors {
return nil, io.EOF
}
return nil, err
}
}
if i.tree == nil {
var err error
i.tree, err = i.iter.Next()
if err != nil {
if err != io.EOF && i.skipGitErrors {
continue
}
return nil, err
}
i.cursor = 0
}
if i.cursor >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := &TreeEntry{i.tree.Hash, i.tree.Entries[i.cursor]}
i.cursor++
return treeEntryToRow(i.repo.ID, entry), nil
}
}
func (i *treeEntriesRowIter) nextByHash() (sql.Row, error) {
for {
if i.pos >= len(i.hashes) && i.tree == nil {
return nil, io.EOF
}
if i.tree == nil {
var err error
i.tree, err = i.repo.TreeObject(i.hashes[i.pos])
i.pos++
if err != nil {
if err == plumbing.ErrObjectNotFound || i.skipGitErrors {
continue
}
return nil, err
}
i.cursor = 0
}
if i.cursor >= len(i.tree.Entries) {
i.tree = nil
continue
}
| }
}
func (i *treeEntriesRowIter) Close() error {
if i.iter != nil {
i.iter.Close()
}
i.repo.Close()
return nil
}
// TreeEntry is a tree entry object.
type TreeEntry struct {
TreeHash plumbing.Hash
object.TreeEntry
}
func treeEntryToRow(repoID string, entry *TreeEntry) sql.Row {
return sql.NewRow(
repoID,
entry.Name,
entry.Hash.String(),
entry.TreeHash.String(),
strconv.FormatInt(int64(entry.Mode), 8),
)
}
type treeEntriesIndexKey struct {
Repository string
Packfile string
Offset int64
Pos int
Hash string
}
func (k *treeEntriesIndexKey) encode() ([]byte, error) {
var buf bytes.Buffer
writeString(&buf, k.Repository)
writeHash(&buf, k.Packfile)
writeBool(&buf, k.Offset >= 0)
if k.Offset >= 0 {
writeInt64(&buf, k.Offset)
} else {
if err := writeHash(&buf, k.Hash); err != nil {
return nil, err
}
}
writeInt64(&buf, int64(k.Pos))
return buf.Bytes(), nil
}
func (k *treeEntriesIndexKey) decode(data []byte) error {
var buf = bytes.NewBuffer(data)
var err error
if k.Repository, err = readString(buf); err != nil {
return err
}
if k.Packfile, err = readHash(buf); err != nil {
return err
}
ok, err := readBool(buf)
if err != nil {
return err
}
if ok {
k.Hash = ""
if k.Offset, err = readInt64(buf); err != nil {
return err
}
} else {
k.Offset = -1
if k.Hash, err = readHash(buf); err != nil {
return err
}
}
pos, err := readInt64(buf)
if err != nil {
return err
}
k.Pos = int(pos)
return nil
}
type treeEntriesKeyValueIter struct {
pool *RepositoryPool
repo *Repository
idx *repositoryIndex
trees *object.TreeIter
tree *object.Tree
pos int
columns []string
}
func newTreeEntriesKeyValueIter(
pool *RepositoryPool,
repo *Repository,
columns []string,
) (sql.IndexKeyValueIter, error) {
trees, err := repo.TreeObjects()
if err != nil {
return nil, err
}
r := pool.repositories[repo.ID]
idx, err := newRepositoryIndex(r)
if err != nil {
return nil, err
}
return &treeEntriesKeyValueIter{
pool: pool,
repo: repo,
columns: columns,
idx: idx,
trees: trees,
}, nil
}
func (i *treeEntriesKeyValueIter) Next() ([]interface{}, []byte, error) {
for {
if i.tree == nil {
var err error
i.tree, err = i.trees.Next()
if err != nil {
return nil, nil, err
}
i.pos = 0
}
if i.pos >= len(i.tree.Entries) {
i.tree = nil
continue
}
entry := i.tree.Entries[i.pos]
i.pos++
offset, packfile, err := i.idx.find(i.tree.Hash)
if err != nil {
return nil, nil, err
}
var hash string
if offset < 0 {
hash = i.tree.Hash.String()
}
key, err := encodeIndexKey(&treeEntriesIndexKey{
Repository: i.repo.ID,
Packfile: packfile.String(),
Offset: offset,
Pos: i.pos - 1,
Hash: hash,
})
if err != nil {
return nil, nil, err
}
row := treeEntryToRow(i.repo.ID, &TreeEntry{i.tree.Hash, entry})
values, err := rowIndexValues(row, i.columns, TreeEntriesSchema)
if err != nil {
return nil, nil, err
}
return values, key, nil
}
}
func (i *treeEntriesKeyValueIter) Close() error {
if i.trees != nil {
i.trees.Close()
}
if i.idx != nil {
i.idx.Close()
}
if i.repo != nil {
i.repo.Close()
}
return nil
}
type treeEntriesIndexIter struct {
index sql.IndexValueIter
decoder *objectDecoder
prevTreeOffset int64
hashes []plumbing.Hash
tree *object.Tree // holds the last obtained tree
entry *TreeEntry // holds the last obtained tree entry
repoID string // holds the repo ID of the last tree entry processed
}
func newTreeEntriesIndexIter(
index sql.IndexValueIter,
pool *RepositoryPool,
hashes []plumbing.Hash,
) *treeEntriesIndexIter {
return &treeEntriesIndexIter{
index: index,
decoder: newObjectDecoder(pool),
hashes: hashes,
}
}
func (i *treeEntriesIndexIter) Next() (sql.Row, error) {
for {
var err error
var data []byte
defer closeIndexOnError(&err, i.index)
data, err = i.index.Next()
if err != nil {
return nil, err
}
var key treeEntriesIndexKey
if err = decodeIndexKey(data, &key); err != nil {
return nil, err
}
i.repoID = key.Repository
var tree *object.Tree
if i.prevTreeOffset == key.Offset && key.Offset >= 0 ||
(i.tree != nil && i.tree.Hash.String() == key.Hash) {
tree = i.tree
} else {
var obj object.Object
obj, err = i.decoder.decode(
key.Repository,
plumbing.NewHash(key.Packfile),
key.Offset,
plumbing.NewHash(key.Hash),
)
if err != nil {
return nil, err
}
var ok bool
i.tree, ok = obj.(*object.Tree)
if !ok {
err = ErrInvalidObjectType.New(obj, "*object.Tree")
return nil, err
}
if len(i.hashes) > 0 && !hashContains(i.hashes, i.tree.Hash) {
continue
}
tree = i.tree
}
i.prevTreeOffset = key.Offset
i.entry = &TreeEntry{tree.Hash, tree.Entries[key.Pos]}
return treeEntryToRow(key.Repository, i.entry), nil
}
}
func (i *treeEntriesIndexIter) Close() error {
if i.decoder != nil {
if err := i.decoder.Close(); err != nil {
_ = i.index.Close()
return err
}
}
return i.index.Close()
} | entry := &TreeEntry{i.tree.Hash, i.tree.Entries[i.cursor]}
i.cursor++
return treeEntryToRow(i.repo.ID, entry), nil | random_line_split |
api_op_CancelZonalShift.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package arczonalshift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/arczonalshift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
// Cancel a zonal shift in Amazon Route 53 Application Recovery Controller that
// you've started for a resource in your AWS account in an AWS Region.
func (c *Client) CancelZonalShift(ctx context.Context, params *CancelZonalShiftInput, optFns ...func(*Options)) (*CancelZonalShiftOutput, error) {
if params == nil {
params = &CancelZonalShiftInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CancelZonalShift", params, optFns, c.addOperationCancelZonalShiftMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CancelZonalShiftOutput)
out.ResultMetadata = metadata
return out, nil
}
type CancelZonalShiftInput struct {
// The internally-generated identifier of a zonal shift.
//
// This member is required.
ZonalShiftId *string
noSmithyDocumentSerde
}
type CancelZonalShiftOutput struct {
// The Availability Zone that traffic is moved away from for a resource when you
// start a zonal shift. Until the zonal shift expires or you cancel it, traffic for
// the resource is instead moved to other Availability Zones in the AWS Region.
//
// This member is required.
AwayFrom *string
// A comment that you enter about the zonal shift. Only the latest comment is
// retained; no comment history is maintained. A new comment overwrites any
// existing comment string.
//
// This member is required.
Comment *string
// The expiry time (expiration time) for the zonal shift. A zonal shift is
// temporary and must be set to expire when you start the zonal shift. You can
// initially set a zonal shift to expire in a maximum of three days (72 hours).
// However, you can update a zonal shift to set a new expiration at any time. When
// you start a zonal shift, you specify how long you want it to be active, which
// Route 53 ARC converts to an expiry time (expiration time). You can cancel a
// zonal shift, for example, if you're ready to restore traffic to the Availability
// Zone. Or you can update the zonal shift to specify another length of time to
// expire in.
//
// This member is required.
ExpiryTime *time.Time
// The identifier for the resource to include in a zonal shift. The identifier is
// the Amazon Resource Name (ARN) for the resource. At this time, you can only
// start a zonal shift for Network Load Balancers and Application Load Balancers
// with cross-zone load balancing turned off.
//
// This member is required.
ResourceIdentifier *string
// The time (UTC) when the zonal shift is started.
//
// This member is required.
StartTime *time.Time
// A status for a zonal shift. The Status for a zonal shift can have one of the
// following values:
// - ACTIVE: The zonal shift is started and active.
// - EXPIRED: The zonal shift has expired (the expiry time was exceeded).
// - CANCELED: The zonal shift was canceled.
//
// This member is required.
Status types.ZonalShiftStatus
// The identifier of a zonal shift.
//
// This member is required.
ZonalShiftId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCancelZonalShiftMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpCancelZonalShift{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCancelZonalShift{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCancelZonalShiftResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCancelZonalShiftValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelZonalShift(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCancelZonalShift(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "arc-zonal-shift",
OperationName: "CancelZonalShift",
}
}
type opCancelZonalShiftResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCancelZonalShiftResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCancelZonalShiftResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "arc-zonal-shift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "arc-zonal-shift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("arc-zonal-shift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCancelZonalShiftResolveEndpointMiddleware(stack *middleware.Stack, options Options) error | {
return stack.Serialize.Insert(&opCancelZonalShiftResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
} | identifier_body | |
api_op_CancelZonalShift.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package arczonalshift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/arczonalshift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
// Cancel a zonal shift in Amazon Route 53 Application Recovery Controller that
// you've started for a resource in your AWS account in an AWS Region.
func (c *Client) CancelZonalShift(ctx context.Context, params *CancelZonalShiftInput, optFns ...func(*Options)) (*CancelZonalShiftOutput, error) {
if params == nil {
params = &CancelZonalShiftInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CancelZonalShift", params, optFns, c.addOperationCancelZonalShiftMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CancelZonalShiftOutput)
out.ResultMetadata = metadata
return out, nil
}
type CancelZonalShiftInput struct {
// The internally-generated identifier of a zonal shift.
//
// This member is required.
ZonalShiftId *string
noSmithyDocumentSerde
}
type CancelZonalShiftOutput struct {
// The Availability Zone that traffic is moved away from for a resource when you
// start a zonal shift. Until the zonal shift expires or you cancel it, traffic for
// the resource is instead moved to other Availability Zones in the AWS Region.
//
// This member is required.
AwayFrom *string
// A comment that you enter about the zonal shift. Only the latest comment is
// retained; no comment history is maintained. A new comment overwrites any
// existing comment string.
//
// This member is required.
Comment *string
// The expiry time (expiration time) for the zonal shift. A zonal shift is
// temporary and must be set to expire when you start the zonal shift. You can
// initially set a zonal shift to expire in a maximum of three days (72 hours).
// However, you can update a zonal shift to set a new expiration at any time. When
// you start a zonal shift, you specify how long you want it to be active, which
// Route 53 ARC converts to an expiry time (expiration time). You can cancel a
// zonal shift, for example, if you're ready to restore traffic to the Availability
// Zone. Or you can update the zonal shift to specify another length of time to
// expire in.
//
// This member is required.
ExpiryTime *time.Time
// The identifier for the resource to include in a zonal shift. The identifier is
// the Amazon Resource Name (ARN) for the resource. At this time, you can only
// start a zonal shift for Network Load Balancers and Application Load Balancers
// with cross-zone load balancing turned off.
//
// This member is required.
ResourceIdentifier *string
// The time (UTC) when the zonal shift is started.
//
// This member is required.
StartTime *time.Time
// A status for a zonal shift. The Status for a zonal shift can have one of the
// following values:
// - ACTIVE: The zonal shift is started and active.
// - EXPIRED: The zonal shift has expired (the expiry time was exceeded).
// - CANCELED: The zonal shift was canceled.
//
// This member is required.
Status types.ZonalShiftStatus
// The identifier of a zonal shift.
//
// This member is required.
ZonalShiftId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCancelZonalShiftMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpCancelZonalShift{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCancelZonalShift{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCancelZonalShiftResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCancelZonalShiftValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelZonalShift(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCancelZonalShift(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "arc-zonal-shift",
OperationName: "CancelZonalShift",
}
}
type opCancelZonalShiftResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCancelZonalShiftResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCancelZonalShiftResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "arc-zonal-shift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "arc-zonal-shift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil |
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCancelZonalShiftResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opCancelZonalShiftResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| {
v4aScheme.SigningName = aws.String("arc-zonal-shift")
} | conditional_block |
api_op_CancelZonalShift.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package arczonalshift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/arczonalshift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
// Cancel a zonal shift in Amazon Route 53 Application Recovery Controller that
// you've started for a resource in your AWS account in an AWS Region.
func (c *Client) CancelZonalShift(ctx context.Context, params *CancelZonalShiftInput, optFns ...func(*Options)) (*CancelZonalShiftOutput, error) {
if params == nil {
params = &CancelZonalShiftInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CancelZonalShift", params, optFns, c.addOperationCancelZonalShiftMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CancelZonalShiftOutput)
out.ResultMetadata = metadata
return out, nil
}
type CancelZonalShiftInput struct {
// The internally-generated identifier of a zonal shift.
//
// This member is required.
ZonalShiftId *string
noSmithyDocumentSerde
}
type CancelZonalShiftOutput struct {
// The Availability Zone that traffic is moved away from for a resource when you
// start a zonal shift. Until the zonal shift expires or you cancel it, traffic for
// the resource is instead moved to other Availability Zones in the AWS Region.
//
// This member is required.
AwayFrom *string
// A comment that you enter about the zonal shift. Only the latest comment is
// retained; no comment history is maintained. A new comment overwrites any
// existing comment string.
//
// This member is required.
Comment *string
// The expiry time (expiration time) for the zonal shift. A zonal shift is
// temporary and must be set to expire when you start the zonal shift. You can
// initially set a zonal shift to expire in a maximum of three days (72 hours).
// However, you can update a zonal shift to set a new expiration at any time. When
// you start a zonal shift, you specify how long you want it to be active, which
// Route 53 ARC converts to an expiry time (expiration time). You can cancel a
// zonal shift, for example, if you're ready to restore traffic to the Availability
// Zone. Or you can update the zonal shift to specify another length of time to
// expire in.
//
// This member is required.
ExpiryTime *time.Time
// The identifier for the resource to include in a zonal shift. The identifier is
// the Amazon Resource Name (ARN) for the resource. At this time, you can only
// start a zonal shift for Network Load Balancers and Application Load Balancers
// with cross-zone load balancing turned off.
//
// This member is required.
ResourceIdentifier *string
// The time (UTC) when the zonal shift is started.
//
// This member is required.
StartTime *time.Time
// A status for a zonal shift. The Status for a zonal shift can have one of the
// following values:
// - ACTIVE: The zonal shift is started and active.
// - EXPIRED: The zonal shift has expired (the expiry time was exceeded).
// - CANCELED: The zonal shift was canceled.
//
// This member is required.
Status types.ZonalShiftStatus
// The identifier of a zonal shift.
//
// This member is required.
ZonalShiftId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCancelZonalShiftMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpCancelZonalShift{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCancelZonalShift{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
} | if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCancelZonalShiftResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCancelZonalShiftValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelZonalShift(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCancelZonalShift(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "arc-zonal-shift",
OperationName: "CancelZonalShift",
}
}
type opCancelZonalShiftResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCancelZonalShiftResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCancelZonalShiftResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "arc-zonal-shift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "arc-zonal-shift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("arc-zonal-shift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCancelZonalShiftResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opCancelZonalShiftResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
} | if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
} | random_line_split |
api_op_CancelZonalShift.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package arczonalshift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/arczonalshift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
// Cancel a zonal shift in Amazon Route 53 Application Recovery Controller that
// you've started for a resource in your AWS account in an AWS Region.
func (c *Client) | (ctx context.Context, params *CancelZonalShiftInput, optFns ...func(*Options)) (*CancelZonalShiftOutput, error) {
if params == nil {
params = &CancelZonalShiftInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CancelZonalShift", params, optFns, c.addOperationCancelZonalShiftMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CancelZonalShiftOutput)
out.ResultMetadata = metadata
return out, nil
}
type CancelZonalShiftInput struct {
// The internally-generated identifier of a zonal shift.
//
// This member is required.
ZonalShiftId *string
noSmithyDocumentSerde
}
type CancelZonalShiftOutput struct {
// The Availability Zone that traffic is moved away from for a resource when you
// start a zonal shift. Until the zonal shift expires or you cancel it, traffic for
// the resource is instead moved to other Availability Zones in the AWS Region.
//
// This member is required.
AwayFrom *string
// A comment that you enter about the zonal shift. Only the latest comment is
// retained; no comment history is maintained. A new comment overwrites any
// existing comment string.
//
// This member is required.
Comment *string
// The expiry time (expiration time) for the zonal shift. A zonal shift is
// temporary and must be set to expire when you start the zonal shift. You can
// initially set a zonal shift to expire in a maximum of three days (72 hours).
// However, you can update a zonal shift to set a new expiration at any time. When
// you start a zonal shift, you specify how long you want it to be active, which
// Route 53 ARC converts to an expiry time (expiration time). You can cancel a
// zonal shift, for example, if you're ready to restore traffic to the Availability
// Zone. Or you can update the zonal shift to specify another length of time to
// expire in.
//
// This member is required.
ExpiryTime *time.Time
// The identifier for the resource to include in a zonal shift. The identifier is
// the Amazon Resource Name (ARN) for the resource. At this time, you can only
// start a zonal shift for Network Load Balancers and Application Load Balancers
// with cross-zone load balancing turned off.
//
// This member is required.
ResourceIdentifier *string
// The time (UTC) when the zonal shift is started.
//
// This member is required.
StartTime *time.Time
// A status for a zonal shift. The Status for a zonal shift can have one of the
// following values:
// - ACTIVE: The zonal shift is started and active.
// - EXPIRED: The zonal shift has expired (the expiry time was exceeded).
// - CANCELED: The zonal shift was canceled.
//
// This member is required.
Status types.ZonalShiftStatus
// The identifier of a zonal shift.
//
// This member is required.
ZonalShiftId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCancelZonalShiftMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpCancelZonalShift{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCancelZonalShift{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCancelZonalShiftResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCancelZonalShiftValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelZonalShift(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCancelZonalShift(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "arc-zonal-shift",
OperationName: "CancelZonalShift",
}
}
type opCancelZonalShiftResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCancelZonalShiftResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCancelZonalShiftResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "arc-zonal-shift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "arc-zonal-shift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("arc-zonal-shift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCancelZonalShiftResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opCancelZonalShiftResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| CancelZonalShift | identifier_name |
admin_commands.go | package cmd
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/manyminds/api2go/jsonapi"
"github.com/urfave/cli"
"go.uber.org/multierr"
"github.com/smartcontractkit/chainlink/v2/core/utils"
"github.com/smartcontractkit/chainlink/v2/core/web/presenters"
)
func initAdminSubCmds(s *Shell) []cli.Command {
return []cli.Command{
{
Name: "chpass",
Usage: "Change your API password remotely",
Action: s.ChangePassword,
},
{
Name: "login",
Usage: "Login to remote client by creating a session cookie",
Action: s.RemoteLogin,
Flags: []cli.Flag{
cli.StringFlag{
Name: "file, f",
Usage: "text file holding the API email and password needed to create a session cookie",
},
cli.BoolFlag{
Name: "bypass-version-check",
Usage: "Bypass versioning check for compatibility of remote node",
},
},
},
{
Name: "logout",
Usage: "Delete any local sessions",
Action: s.Logout,
},
{
Name: "profile",
Usage: "Collects profile metrics from the node.",
Action: s.Profile,
Flags: []cli.Flag{
cli.Uint64Flag{
Name: "seconds, s",
Usage: "duration of profile capture",
Value: 8,
},
cli.StringFlag{
Name: "output_dir, o",
Usage: "output directory of the captured profile",
Value: "/tmp/",
},
},
},
{
Name: "status",
Usage: "Displays the health of various services running inside the node.",
Action: s.Status,
Flags: []cli.Flag{},
},
{
Name: "users",
Usage: "Create, edit permissions, or delete API users",
Subcommands: cli.Commands{
{
Name: "list",
Usage: "Lists all API users and their roles",
Action: s.ListUsers,
},
{
Name: "create",
Usage: "Create a new API user",
Action: s.CreateUser,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "Email of new user to create",
Required: true,
},
cli.StringFlag{
Name: "role",
Usage: "Permission level of new user. Options: 'admin', 'edit', 'run', 'view'.",
Required: true,
},
},
},
{
Name: "chrole",
Usage: "Changes an API user's role",
Action: s.ChangeRole,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "email of user to be edited",
Required: true,
},
cli.StringFlag{
Name: "new-role, newrole",
Usage: "new permission level role to set for user. Options: 'admin', 'edit', 'run', 'view'.",
Required: true,
},
},
},
{
Name: "delete",
Usage: "Delete an API user",
Action: s.DeleteUser,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "Email of API user to delete",
Required: true,
},
},
},
},
},
}
}
type AdminUsersPresenter struct {
JAID
presenters.UserResource
}
var adminUsersTableHeaders = []string{"Email", "Role", "Has API token", "Created at", "Updated at"}
func (p *AdminUsersPresenter) ToRow() []string |
// RenderTable implements TableRenderer
func (p *AdminUsersPresenter) RenderTable(rt RendererTable) error {
rows := [][]string{p.ToRow()}
renderList(adminUsersTableHeaders, rows, rt.Writer)
return utils.JustError(rt.Write([]byte("\n")))
}
type AdminUsersPresenters []AdminUsersPresenter
// RenderTable implements TableRenderer
func (ps AdminUsersPresenters) RenderTable(rt RendererTable) error {
rows := [][]string{}
for _, p := range ps {
rows = append(rows, p.ToRow())
}
if _, err := rt.Write([]byte("Users\n")); err != nil {
return err
}
renderList(adminUsersTableHeaders, rows, rt.Writer)
return utils.JustError(rt.Write([]byte("\n")))
}
// ListUsers renders all API users and their roles
func (s *Shell) ListUsers(_ *cli.Context) (err error) {
resp, err := s.HTTP.Get("/v2/users/", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(resp, &AdminUsersPresenters{})
}
// CreateUser creates a new user by prompting for email, password, and role
func (s *Shell) CreateUser(c *cli.Context) (err error) {
resp, err := s.HTTP.Get("/v2/users/", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
var links jsonapi.Links
var users AdminUsersPresenters
if err := s.deserializeAPIResponse(resp, &users, &links); err != nil {
return s.errorOut(err)
}
for _, user := range users {
if strings.EqualFold(user.Email, c.String("email")) {
return s.errorOut(fmt.Errorf("user with email %s already exists", user.Email))
}
}
fmt.Println("Password of new user:")
pwd := s.PasswordPrompter.Prompt()
request := struct {
Email string `json:"email"`
Role string `json:"role"`
Password string `json:"password"`
}{
Email: c.String("email"),
Role: c.String("role"),
Password: pwd,
}
requestData, err := json.Marshal(request)
if err != nil {
return s.errorOut(err)
}
buf := bytes.NewBuffer(requestData)
response, err := s.HTTP.Post("/v2/users", buf)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully created new API user")
}
// ChangeRole can change a user's role
func (s *Shell) ChangeRole(c *cli.Context) (err error) {
request := struct {
Email string `json:"email"`
NewRole string `json:"newRole"`
}{
Email: c.String("email"),
NewRole: c.String("new-role"),
}
requestData, err := json.Marshal(request)
if err != nil {
return s.errorOut(err)
}
buf := bytes.NewBuffer(requestData)
response, err := s.HTTP.Patch("/v2/users", buf)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully updated API user")
}
// DeleteUser deletes an API user by email
func (s *Shell) DeleteUser(c *cli.Context) (err error) {
email := c.String("email")
if email == "" {
return s.errorOut(errors.New("email flag is empty, must specify an email"))
}
response, err := s.HTTP.Delete(fmt.Sprintf("/v2/users/%s", email))
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully deleted API user")
}
// Status will display the health of various services
func (s *Shell) Status(_ *cli.Context) error {
resp, err := s.HTTP.Get("/health?full=1", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(resp, &HealthCheckPresenters{})
}
// Profile will collect pprof metrics and store them in a folder.
func (s *Shell) Profile(c *cli.Context) error {
seconds := c.Uint("seconds")
baseDir := c.String("output_dir")
genDir := filepath.Join(baseDir, fmt.Sprintf("debuginfo-%s", time.Now().Format(time.RFC3339)))
err := os.Mkdir(genDir, 0o755)
if err != nil {
return s.errorOut(err)
}
var wgPprof sync.WaitGroup
vitals := []string{
"allocs", // A sampling of all past memory allocations
"block", // Stack traces that led to blocking on synchronization primitives
"cmdline", // The command line invocation of the current program
"goroutine", // Stack traces of all current goroutines
"heap", // A sampling of memory allocations of live objects.
"mutex", // Stack traces of holders of contended mutexes
"profile", // CPU profile.
"threadcreate", // Stack traces that led to the creation of new OS threads
"trace", // A trace of execution of the current program.
}
wgPprof.Add(len(vitals))
s.Logger.Infof("Collecting profiles: %v", vitals)
s.Logger.Infof("writing debug info to %s", genDir)
errs := make(chan error, len(vitals))
for _, vt := range vitals {
go func(vt string) {
defer wgPprof.Done()
uri := fmt.Sprintf("/v2/debug/pprof/%s?seconds=%d", vt, seconds)
resp, err := s.HTTP.Get(uri)
if err != nil {
errs <- fmt.Errorf("error collecting %s: %w", vt, err)
return
}
defer func() {
if resp.Body != nil {
resp.Body.Close()
}
}()
if resp.StatusCode == http.StatusUnauthorized {
errs <- fmt.Errorf("error collecting %s: %w", vt, errUnauthorized)
return
}
if resp.StatusCode == http.StatusBadRequest {
// best effort to interpret the underlying problem
pprofVersion := resp.Header.Get("X-Go-Pprof")
if pprofVersion == "1" {
b, err := io.ReadAll(resp.Body)
if err != nil {
errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest)
return
}
respContent := string(b)
// taken from pprof.Profile https://github.com/golang/go/blob/release-branch.go1.20/src/net/http/pprof/pprof.go#L133
if strings.Contains(respContent, "profile duration exceeds server's WriteTimeout") {
errs <- fmt.Errorf("%w: %s", ErrProfileTooLong, respContent)
} else {
errs <- fmt.Errorf("error collecting %s: %w: %s", vt, errBadRequest, respContent)
}
} else {
errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest)
}
return
}
// write to file
f, err := os.Create(filepath.Join(genDir, vt))
if err != nil {
errs <- fmt.Errorf("error creating file for %s: %w", vt, err)
return
}
wc := utils.NewDeferableWriteCloser(f)
defer wc.Close()
_, err = io.Copy(wc, resp.Body)
if err != nil {
errs <- fmt.Errorf("error writing to file for %s: %w", vt, err)
return
}
err = wc.Close()
if err != nil {
errs <- fmt.Errorf("error closing file for %s: %w", vt, err)
return
}
}(vt)
}
wgPprof.Wait()
close(errs)
// Atmost one err is emitted per vital.
s.Logger.Infof("collected %d/%d profiles", len(vitals)-len(errs), len(vitals))
if len(errs) > 0 {
var merr error
for err := range errs {
merr = errors.Join(merr, err)
}
return s.errorOut(fmt.Errorf("profile collection failed:\n%v", merr))
}
return nil
}
| {
row := []string{
p.ID,
string(p.Role),
p.HasActiveApiToken,
p.CreatedAt.String(),
p.UpdatedAt.String(),
}
return row
} | identifier_body |
admin_commands.go | package cmd
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/manyminds/api2go/jsonapi"
"github.com/urfave/cli"
"go.uber.org/multierr"
"github.com/smartcontractkit/chainlink/v2/core/utils"
"github.com/smartcontractkit/chainlink/v2/core/web/presenters"
)
func initAdminSubCmds(s *Shell) []cli.Command {
return []cli.Command{
{
Name: "chpass",
Usage: "Change your API password remotely",
Action: s.ChangePassword,
},
{
Name: "login",
Usage: "Login to remote client by creating a session cookie",
Action: s.RemoteLogin,
Flags: []cli.Flag{
cli.StringFlag{
Name: "file, f",
Usage: "text file holding the API email and password needed to create a session cookie",
},
cli.BoolFlag{
Name: "bypass-version-check",
Usage: "Bypass versioning check for compatibility of remote node",
},
},
},
{
Name: "logout",
Usage: "Delete any local sessions",
Action: s.Logout,
},
{
Name: "profile",
Usage: "Collects profile metrics from the node.",
Action: s.Profile,
Flags: []cli.Flag{
cli.Uint64Flag{
Name: "seconds, s",
Usage: "duration of profile capture",
Value: 8,
},
cli.StringFlag{
Name: "output_dir, o",
Usage: "output directory of the captured profile",
Value: "/tmp/",
},
},
},
{
Name: "status",
Usage: "Displays the health of various services running inside the node.",
Action: s.Status,
Flags: []cli.Flag{},
},
{
Name: "users",
Usage: "Create, edit permissions, or delete API users",
Subcommands: cli.Commands{
{
Name: "list",
Usage: "Lists all API users and their roles",
Action: s.ListUsers,
},
{
Name: "create",
Usage: "Create a new API user",
Action: s.CreateUser,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "Email of new user to create",
Required: true,
},
cli.StringFlag{
Name: "role",
Usage: "Permission level of new user. Options: 'admin', 'edit', 'run', 'view'.",
Required: true,
},
},
},
{
Name: "chrole",
Usage: "Changes an API user's role",
Action: s.ChangeRole,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "email of user to be edited",
Required: true,
},
cli.StringFlag{
Name: "new-role, newrole",
Usage: "new permission level role to set for user. Options: 'admin', 'edit', 'run', 'view'.",
Required: true,
},
},
},
{
Name: "delete",
Usage: "Delete an API user",
Action: s.DeleteUser,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "Email of API user to delete",
Required: true,
},
},
},
},
},
}
}
type AdminUsersPresenter struct {
JAID
presenters.UserResource
}
var adminUsersTableHeaders = []string{"Email", "Role", "Has API token", "Created at", "Updated at"}
func (p *AdminUsersPresenter) | () []string {
row := []string{
p.ID,
string(p.Role),
p.HasActiveApiToken,
p.CreatedAt.String(),
p.UpdatedAt.String(),
}
return row
}
// RenderTable implements TableRenderer
func (p *AdminUsersPresenter) RenderTable(rt RendererTable) error {
rows := [][]string{p.ToRow()}
renderList(adminUsersTableHeaders, rows, rt.Writer)
return utils.JustError(rt.Write([]byte("\n")))
}
type AdminUsersPresenters []AdminUsersPresenter
// RenderTable implements TableRenderer
func (ps AdminUsersPresenters) RenderTable(rt RendererTable) error {
rows := [][]string{}
for _, p := range ps {
rows = append(rows, p.ToRow())
}
if _, err := rt.Write([]byte("Users\n")); err != nil {
return err
}
renderList(adminUsersTableHeaders, rows, rt.Writer)
return utils.JustError(rt.Write([]byte("\n")))
}
// ListUsers renders all API users and their roles
func (s *Shell) ListUsers(_ *cli.Context) (err error) {
resp, err := s.HTTP.Get("/v2/users/", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(resp, &AdminUsersPresenters{})
}
// CreateUser creates a new user by prompting for email, password, and role
func (s *Shell) CreateUser(c *cli.Context) (err error) {
resp, err := s.HTTP.Get("/v2/users/", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
var links jsonapi.Links
var users AdminUsersPresenters
if err := s.deserializeAPIResponse(resp, &users, &links); err != nil {
return s.errorOut(err)
}
for _, user := range users {
if strings.EqualFold(user.Email, c.String("email")) {
return s.errorOut(fmt.Errorf("user with email %s already exists", user.Email))
}
}
fmt.Println("Password of new user:")
pwd := s.PasswordPrompter.Prompt()
request := struct {
Email string `json:"email"`
Role string `json:"role"`
Password string `json:"password"`
}{
Email: c.String("email"),
Role: c.String("role"),
Password: pwd,
}
requestData, err := json.Marshal(request)
if err != nil {
return s.errorOut(err)
}
buf := bytes.NewBuffer(requestData)
response, err := s.HTTP.Post("/v2/users", buf)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully created new API user")
}
// ChangeRole can change a user's role
func (s *Shell) ChangeRole(c *cli.Context) (err error) {
request := struct {
Email string `json:"email"`
NewRole string `json:"newRole"`
}{
Email: c.String("email"),
NewRole: c.String("new-role"),
}
requestData, err := json.Marshal(request)
if err != nil {
return s.errorOut(err)
}
buf := bytes.NewBuffer(requestData)
response, err := s.HTTP.Patch("/v2/users", buf)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully updated API user")
}
// DeleteUser deletes an API user by email
func (s *Shell) DeleteUser(c *cli.Context) (err error) {
email := c.String("email")
if email == "" {
return s.errorOut(errors.New("email flag is empty, must specify an email"))
}
response, err := s.HTTP.Delete(fmt.Sprintf("/v2/users/%s", email))
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully deleted API user")
}
// Status will display the health of various services
func (s *Shell) Status(_ *cli.Context) error {
resp, err := s.HTTP.Get("/health?full=1", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(resp, &HealthCheckPresenters{})
}
// Profile will collect pprof metrics and store them in a folder.
func (s *Shell) Profile(c *cli.Context) error {
seconds := c.Uint("seconds")
baseDir := c.String("output_dir")
genDir := filepath.Join(baseDir, fmt.Sprintf("debuginfo-%s", time.Now().Format(time.RFC3339)))
err := os.Mkdir(genDir, 0o755)
if err != nil {
return s.errorOut(err)
}
var wgPprof sync.WaitGroup
vitals := []string{
"allocs", // A sampling of all past memory allocations
"block", // Stack traces that led to blocking on synchronization primitives
"cmdline", // The command line invocation of the current program
"goroutine", // Stack traces of all current goroutines
"heap", // A sampling of memory allocations of live objects.
"mutex", // Stack traces of holders of contended mutexes
"profile", // CPU profile.
"threadcreate", // Stack traces that led to the creation of new OS threads
"trace", // A trace of execution of the current program.
}
wgPprof.Add(len(vitals))
s.Logger.Infof("Collecting profiles: %v", vitals)
s.Logger.Infof("writing debug info to %s", genDir)
errs := make(chan error, len(vitals))
for _, vt := range vitals {
go func(vt string) {
defer wgPprof.Done()
uri := fmt.Sprintf("/v2/debug/pprof/%s?seconds=%d", vt, seconds)
resp, err := s.HTTP.Get(uri)
if err != nil {
errs <- fmt.Errorf("error collecting %s: %w", vt, err)
return
}
defer func() {
if resp.Body != nil {
resp.Body.Close()
}
}()
if resp.StatusCode == http.StatusUnauthorized {
errs <- fmt.Errorf("error collecting %s: %w", vt, errUnauthorized)
return
}
if resp.StatusCode == http.StatusBadRequest {
// best effort to interpret the underlying problem
pprofVersion := resp.Header.Get("X-Go-Pprof")
if pprofVersion == "1" {
b, err := io.ReadAll(resp.Body)
if err != nil {
errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest)
return
}
respContent := string(b)
// taken from pprof.Profile https://github.com/golang/go/blob/release-branch.go1.20/src/net/http/pprof/pprof.go#L133
if strings.Contains(respContent, "profile duration exceeds server's WriteTimeout") {
errs <- fmt.Errorf("%w: %s", ErrProfileTooLong, respContent)
} else {
errs <- fmt.Errorf("error collecting %s: %w: %s", vt, errBadRequest, respContent)
}
} else {
errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest)
}
return
}
// write to file
f, err := os.Create(filepath.Join(genDir, vt))
if err != nil {
errs <- fmt.Errorf("error creating file for %s: %w", vt, err)
return
}
wc := utils.NewDeferableWriteCloser(f)
defer wc.Close()
_, err = io.Copy(wc, resp.Body)
if err != nil {
errs <- fmt.Errorf("error writing to file for %s: %w", vt, err)
return
}
err = wc.Close()
if err != nil {
errs <- fmt.Errorf("error closing file for %s: %w", vt, err)
return
}
}(vt)
}
wgPprof.Wait()
close(errs)
// Atmost one err is emitted per vital.
s.Logger.Infof("collected %d/%d profiles", len(vitals)-len(errs), len(vitals))
if len(errs) > 0 {
var merr error
for err := range errs {
merr = errors.Join(merr, err)
}
return s.errorOut(fmt.Errorf("profile collection failed:\n%v", merr))
}
return nil
}
| ToRow | identifier_name |
admin_commands.go | package cmd
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/manyminds/api2go/jsonapi"
"github.com/urfave/cli"
"go.uber.org/multierr"
"github.com/smartcontractkit/chainlink/v2/core/utils"
"github.com/smartcontractkit/chainlink/v2/core/web/presenters"
)
func initAdminSubCmds(s *Shell) []cli.Command {
return []cli.Command{
{
Name: "chpass",
Usage: "Change your API password remotely",
Action: s.ChangePassword,
},
{
Name: "login",
Usage: "Login to remote client by creating a session cookie",
Action: s.RemoteLogin,
Flags: []cli.Flag{
cli.StringFlag{
Name: "file, f",
Usage: "text file holding the API email and password needed to create a session cookie",
},
cli.BoolFlag{
Name: "bypass-version-check",
Usage: "Bypass versioning check for compatibility of remote node",
},
},
},
{
Name: "logout",
Usage: "Delete any local sessions",
Action: s.Logout,
},
{
Name: "profile",
Usage: "Collects profile metrics from the node.",
Action: s.Profile,
Flags: []cli.Flag{
cli.Uint64Flag{
Name: "seconds, s",
Usage: "duration of profile capture",
Value: 8,
},
cli.StringFlag{
Name: "output_dir, o",
Usage: "output directory of the captured profile",
Value: "/tmp/",
},
},
},
{
Name: "status",
Usage: "Displays the health of various services running inside the node.",
Action: s.Status,
Flags: []cli.Flag{},
},
{
Name: "users",
Usage: "Create, edit permissions, or delete API users",
Subcommands: cli.Commands{
{
Name: "list",
Usage: "Lists all API users and their roles",
Action: s.ListUsers,
},
{
Name: "create",
Usage: "Create a new API user",
Action: s.CreateUser,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "Email of new user to create",
Required: true,
},
cli.StringFlag{
Name: "role",
Usage: "Permission level of new user. Options: 'admin', 'edit', 'run', 'view'.",
Required: true,
},
},
},
{
Name: "chrole",
Usage: "Changes an API user's role",
Action: s.ChangeRole,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "email of user to be edited",
Required: true,
},
cli.StringFlag{
Name: "new-role, newrole",
Usage: "new permission level role to set for user. Options: 'admin', 'edit', 'run', 'view'.",
Required: true,
},
},
},
{
Name: "delete",
Usage: "Delete an API user",
Action: s.DeleteUser,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "Email of API user to delete",
Required: true,
},
},
},
},
},
}
}
type AdminUsersPresenter struct {
JAID
presenters.UserResource
}
var adminUsersTableHeaders = []string{"Email", "Role", "Has API token", "Created at", "Updated at"}
func (p *AdminUsersPresenter) ToRow() []string {
row := []string{
p.ID,
string(p.Role),
p.HasActiveApiToken,
p.CreatedAt.String(),
p.UpdatedAt.String(),
}
return row
}
// RenderTable implements TableRenderer
func (p *AdminUsersPresenter) RenderTable(rt RendererTable) error {
rows := [][]string{p.ToRow()}
renderList(adminUsersTableHeaders, rows, rt.Writer)
return utils.JustError(rt.Write([]byte("\n")))
}
type AdminUsersPresenters []AdminUsersPresenter
// RenderTable implements TableRenderer
func (ps AdminUsersPresenters) RenderTable(rt RendererTable) error {
rows := [][]string{}
for _, p := range ps {
rows = append(rows, p.ToRow())
}
if _, err := rt.Write([]byte("Users\n")); err != nil {
return err
}
renderList(adminUsersTableHeaders, rows, rt.Writer)
return utils.JustError(rt.Write([]byte("\n")))
}
// ListUsers renders all API users and their roles
func (s *Shell) ListUsers(_ *cli.Context) (err error) {
resp, err := s.HTTP.Get("/v2/users/", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(resp, &AdminUsersPresenters{})
}
// CreateUser creates a new user by prompting for email, password, and role
func (s *Shell) CreateUser(c *cli.Context) (err error) {
resp, err := s.HTTP.Get("/v2/users/", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
var links jsonapi.Links
var users AdminUsersPresenters
if err := s.deserializeAPIResponse(resp, &users, &links); err != nil |
for _, user := range users {
if strings.EqualFold(user.Email, c.String("email")) {
return s.errorOut(fmt.Errorf("user with email %s already exists", user.Email))
}
}
fmt.Println("Password of new user:")
pwd := s.PasswordPrompter.Prompt()
request := struct {
Email string `json:"email"`
Role string `json:"role"`
Password string `json:"password"`
}{
Email: c.String("email"),
Role: c.String("role"),
Password: pwd,
}
requestData, err := json.Marshal(request)
if err != nil {
return s.errorOut(err)
}
buf := bytes.NewBuffer(requestData)
response, err := s.HTTP.Post("/v2/users", buf)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully created new API user")
}
// ChangeRole can change a user's role
func (s *Shell) ChangeRole(c *cli.Context) (err error) {
request := struct {
Email string `json:"email"`
NewRole string `json:"newRole"`
}{
Email: c.String("email"),
NewRole: c.String("new-role"),
}
requestData, err := json.Marshal(request)
if err != nil {
return s.errorOut(err)
}
buf := bytes.NewBuffer(requestData)
response, err := s.HTTP.Patch("/v2/users", buf)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully updated API user")
}
// DeleteUser deletes an API user by email
func (s *Shell) DeleteUser(c *cli.Context) (err error) {
email := c.String("email")
if email == "" {
return s.errorOut(errors.New("email flag is empty, must specify an email"))
}
response, err := s.HTTP.Delete(fmt.Sprintf("/v2/users/%s", email))
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully deleted API user")
}
// Status will display the health of various services
func (s *Shell) Status(_ *cli.Context) error {
resp, err := s.HTTP.Get("/health?full=1", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(resp, &HealthCheckPresenters{})
}
// Profile will collect pprof metrics and store them in a folder.
func (s *Shell) Profile(c *cli.Context) error {
seconds := c.Uint("seconds")
baseDir := c.String("output_dir")
genDir := filepath.Join(baseDir, fmt.Sprintf("debuginfo-%s", time.Now().Format(time.RFC3339)))
err := os.Mkdir(genDir, 0o755)
if err != nil {
return s.errorOut(err)
}
var wgPprof sync.WaitGroup
vitals := []string{
"allocs", // A sampling of all past memory allocations
"block", // Stack traces that led to blocking on synchronization primitives
"cmdline", // The command line invocation of the current program
"goroutine", // Stack traces of all current goroutines
"heap", // A sampling of memory allocations of live objects.
"mutex", // Stack traces of holders of contended mutexes
"profile", // CPU profile.
"threadcreate", // Stack traces that led to the creation of new OS threads
"trace", // A trace of execution of the current program.
}
wgPprof.Add(len(vitals))
s.Logger.Infof("Collecting profiles: %v", vitals)
s.Logger.Infof("writing debug info to %s", genDir)
errs := make(chan error, len(vitals))
for _, vt := range vitals {
go func(vt string) {
defer wgPprof.Done()
uri := fmt.Sprintf("/v2/debug/pprof/%s?seconds=%d", vt, seconds)
resp, err := s.HTTP.Get(uri)
if err != nil {
errs <- fmt.Errorf("error collecting %s: %w", vt, err)
return
}
defer func() {
if resp.Body != nil {
resp.Body.Close()
}
}()
if resp.StatusCode == http.StatusUnauthorized {
errs <- fmt.Errorf("error collecting %s: %w", vt, errUnauthorized)
return
}
if resp.StatusCode == http.StatusBadRequest {
// best effort to interpret the underlying problem
pprofVersion := resp.Header.Get("X-Go-Pprof")
if pprofVersion == "1" {
b, err := io.ReadAll(resp.Body)
if err != nil {
errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest)
return
}
respContent := string(b)
// taken from pprof.Profile https://github.com/golang/go/blob/release-branch.go1.20/src/net/http/pprof/pprof.go#L133
if strings.Contains(respContent, "profile duration exceeds server's WriteTimeout") {
errs <- fmt.Errorf("%w: %s", ErrProfileTooLong, respContent)
} else {
errs <- fmt.Errorf("error collecting %s: %w: %s", vt, errBadRequest, respContent)
}
} else {
errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest)
}
return
}
// write to file
f, err := os.Create(filepath.Join(genDir, vt))
if err != nil {
errs <- fmt.Errorf("error creating file for %s: %w", vt, err)
return
}
wc := utils.NewDeferableWriteCloser(f)
defer wc.Close()
_, err = io.Copy(wc, resp.Body)
if err != nil {
errs <- fmt.Errorf("error writing to file for %s: %w", vt, err)
return
}
err = wc.Close()
if err != nil {
errs <- fmt.Errorf("error closing file for %s: %w", vt, err)
return
}
}(vt)
}
wgPprof.Wait()
close(errs)
// Atmost one err is emitted per vital.
s.Logger.Infof("collected %d/%d profiles", len(vitals)-len(errs), len(vitals))
if len(errs) > 0 {
var merr error
for err := range errs {
merr = errors.Join(merr, err)
}
return s.errorOut(fmt.Errorf("profile collection failed:\n%v", merr))
}
return nil
}
| {
return s.errorOut(err)
} | conditional_block |
admin_commands.go | package cmd
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/manyminds/api2go/jsonapi"
"github.com/urfave/cli"
"go.uber.org/multierr"
"github.com/smartcontractkit/chainlink/v2/core/utils"
"github.com/smartcontractkit/chainlink/v2/core/web/presenters"
)
func initAdminSubCmds(s *Shell) []cli.Command {
return []cli.Command{
{
Name: "chpass",
Usage: "Change your API password remotely",
Action: s.ChangePassword,
},
{
Name: "login",
Usage: "Login to remote client by creating a session cookie",
Action: s.RemoteLogin,
Flags: []cli.Flag{
cli.StringFlag{
Name: "file, f",
Usage: "text file holding the API email and password needed to create a session cookie",
},
cli.BoolFlag{
Name: "bypass-version-check",
Usage: "Bypass versioning check for compatibility of remote node",
},
},
},
{
Name: "logout",
Usage: "Delete any local sessions",
Action: s.Logout,
},
{
Name: "profile",
Usage: "Collects profile metrics from the node.",
Action: s.Profile,
Flags: []cli.Flag{
cli.Uint64Flag{
Name: "seconds, s",
Usage: "duration of profile capture",
Value: 8,
},
cli.StringFlag{
Name: "output_dir, o",
Usage: "output directory of the captured profile",
Value: "/tmp/",
},
},
},
{
Name: "status",
Usage: "Displays the health of various services running inside the node.",
Action: s.Status,
Flags: []cli.Flag{},
},
{
Name: "users",
Usage: "Create, edit permissions, or delete API users",
Subcommands: cli.Commands{
{
Name: "list",
Usage: "Lists all API users and their roles",
Action: s.ListUsers,
},
{
Name: "create",
Usage: "Create a new API user",
Action: s.CreateUser,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "Email of new user to create",
Required: true,
},
cli.StringFlag{
Name: "role", | {
Name: "chrole",
Usage: "Changes an API user's role",
Action: s.ChangeRole,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "email of user to be edited",
Required: true,
},
cli.StringFlag{
Name: "new-role, newrole",
Usage: "new permission level role to set for user. Options: 'admin', 'edit', 'run', 'view'.",
Required: true,
},
},
},
{
Name: "delete",
Usage: "Delete an API user",
Action: s.DeleteUser,
Flags: []cli.Flag{
cli.StringFlag{
Name: "email",
Usage: "Email of API user to delete",
Required: true,
},
},
},
},
},
}
}
type AdminUsersPresenter struct {
JAID
presenters.UserResource
}
var adminUsersTableHeaders = []string{"Email", "Role", "Has API token", "Created at", "Updated at"}
func (p *AdminUsersPresenter) ToRow() []string {
row := []string{
p.ID,
string(p.Role),
p.HasActiveApiToken,
p.CreatedAt.String(),
p.UpdatedAt.String(),
}
return row
}
// RenderTable implements TableRenderer
func (p *AdminUsersPresenter) RenderTable(rt RendererTable) error {
rows := [][]string{p.ToRow()}
renderList(adminUsersTableHeaders, rows, rt.Writer)
return utils.JustError(rt.Write([]byte("\n")))
}
type AdminUsersPresenters []AdminUsersPresenter
// RenderTable implements TableRenderer
func (ps AdminUsersPresenters) RenderTable(rt RendererTable) error {
rows := [][]string{}
for _, p := range ps {
rows = append(rows, p.ToRow())
}
if _, err := rt.Write([]byte("Users\n")); err != nil {
return err
}
renderList(adminUsersTableHeaders, rows, rt.Writer)
return utils.JustError(rt.Write([]byte("\n")))
}
// ListUsers renders all API users and their roles
func (s *Shell) ListUsers(_ *cli.Context) (err error) {
resp, err := s.HTTP.Get("/v2/users/", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(resp, &AdminUsersPresenters{})
}
// CreateUser creates a new user by prompting for email, password, and role
func (s *Shell) CreateUser(c *cli.Context) (err error) {
resp, err := s.HTTP.Get("/v2/users/", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
var links jsonapi.Links
var users AdminUsersPresenters
if err := s.deserializeAPIResponse(resp, &users, &links); err != nil {
return s.errorOut(err)
}
for _, user := range users {
if strings.EqualFold(user.Email, c.String("email")) {
return s.errorOut(fmt.Errorf("user with email %s already exists", user.Email))
}
}
fmt.Println("Password of new user:")
pwd := s.PasswordPrompter.Prompt()
request := struct {
Email string `json:"email"`
Role string `json:"role"`
Password string `json:"password"`
}{
Email: c.String("email"),
Role: c.String("role"),
Password: pwd,
}
requestData, err := json.Marshal(request)
if err != nil {
return s.errorOut(err)
}
buf := bytes.NewBuffer(requestData)
response, err := s.HTTP.Post("/v2/users", buf)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully created new API user")
}
// ChangeRole can change a user's role
func (s *Shell) ChangeRole(c *cli.Context) (err error) {
request := struct {
Email string `json:"email"`
NewRole string `json:"newRole"`
}{
Email: c.String("email"),
NewRole: c.String("new-role"),
}
requestData, err := json.Marshal(request)
if err != nil {
return s.errorOut(err)
}
buf := bytes.NewBuffer(requestData)
response, err := s.HTTP.Patch("/v2/users", buf)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully updated API user")
}
// DeleteUser deletes an API user by email
func (s *Shell) DeleteUser(c *cli.Context) (err error) {
email := c.String("email")
if email == "" {
return s.errorOut(errors.New("email flag is empty, must specify an email"))
}
response, err := s.HTTP.Delete(fmt.Sprintf("/v2/users/%s", email))
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := response.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully deleted API user")
}
// Status will display the health of various services
func (s *Shell) Status(_ *cli.Context) error {
resp, err := s.HTTP.Get("/health?full=1", nil)
if err != nil {
return s.errorOut(err)
}
defer func() {
if cerr := resp.Body.Close(); cerr != nil {
err = multierr.Append(err, cerr)
}
}()
return s.renderAPIResponse(resp, &HealthCheckPresenters{})
}
// Profile will collect pprof metrics and store them in a folder.
func (s *Shell) Profile(c *cli.Context) error {
seconds := c.Uint("seconds")
baseDir := c.String("output_dir")
genDir := filepath.Join(baseDir, fmt.Sprintf("debuginfo-%s", time.Now().Format(time.RFC3339)))
err := os.Mkdir(genDir, 0o755)
if err != nil {
return s.errorOut(err)
}
var wgPprof sync.WaitGroup
vitals := []string{
"allocs", // A sampling of all past memory allocations
"block", // Stack traces that led to blocking on synchronization primitives
"cmdline", // The command line invocation of the current program
"goroutine", // Stack traces of all current goroutines
"heap", // A sampling of memory allocations of live objects.
"mutex", // Stack traces of holders of contended mutexes
"profile", // CPU profile.
"threadcreate", // Stack traces that led to the creation of new OS threads
"trace", // A trace of execution of the current program.
}
wgPprof.Add(len(vitals))
s.Logger.Infof("Collecting profiles: %v", vitals)
s.Logger.Infof("writing debug info to %s", genDir)
errs := make(chan error, len(vitals))
for _, vt := range vitals {
go func(vt string) {
defer wgPprof.Done()
uri := fmt.Sprintf("/v2/debug/pprof/%s?seconds=%d", vt, seconds)
resp, err := s.HTTP.Get(uri)
if err != nil {
errs <- fmt.Errorf("error collecting %s: %w", vt, err)
return
}
defer func() {
if resp.Body != nil {
resp.Body.Close()
}
}()
if resp.StatusCode == http.StatusUnauthorized {
errs <- fmt.Errorf("error collecting %s: %w", vt, errUnauthorized)
return
}
if resp.StatusCode == http.StatusBadRequest {
// best effort to interpret the underlying problem
pprofVersion := resp.Header.Get("X-Go-Pprof")
if pprofVersion == "1" {
b, err := io.ReadAll(resp.Body)
if err != nil {
errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest)
return
}
respContent := string(b)
// taken from pprof.Profile https://github.com/golang/go/blob/release-branch.go1.20/src/net/http/pprof/pprof.go#L133
if strings.Contains(respContent, "profile duration exceeds server's WriteTimeout") {
errs <- fmt.Errorf("%w: %s", ErrProfileTooLong, respContent)
} else {
errs <- fmt.Errorf("error collecting %s: %w: %s", vt, errBadRequest, respContent)
}
} else {
errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest)
}
return
}
// write to file
f, err := os.Create(filepath.Join(genDir, vt))
if err != nil {
errs <- fmt.Errorf("error creating file for %s: %w", vt, err)
return
}
wc := utils.NewDeferableWriteCloser(f)
defer wc.Close()
_, err = io.Copy(wc, resp.Body)
if err != nil {
errs <- fmt.Errorf("error writing to file for %s: %w", vt, err)
return
}
err = wc.Close()
if err != nil {
errs <- fmt.Errorf("error closing file for %s: %w", vt, err)
return
}
}(vt)
}
wgPprof.Wait()
close(errs)
// Atmost one err is emitted per vital.
s.Logger.Infof("collected %d/%d profiles", len(vitals)-len(errs), len(vitals))
if len(errs) > 0 {
var merr error
for err := range errs {
merr = errors.Join(merr, err)
}
return s.errorOut(fmt.Errorf("profile collection failed:\n%v", merr))
}
return nil
} | Usage: "Permission level of new user. Options: 'admin', 'edit', 'run', 'view'.",
Required: true,
},
},
}, | random_line_split |
classes.py | from datetime import datetime
import uuid
import json
from random import choice
class Client:
guid = None
mailbox = None
background = None
user = None
environment = None
history = None
@classmethod
def get_next_guid(cls):
return str(uuid.uuid4())
def __init__(self, guid):
self.guid = guid
self.mailbox = MailBox()
self.background = UserBackground(None, None, None, None)
self.environment = Environment()
self.user = User()
self.history = History()
def responce(self):
return {
"rating": self.user.status['name'],
"money": self.user.money,
"env": self.environment.response(),
"news": NewsList.get_last().response(),
"events": {
"mails": self.mailbox.get_new_mails_count(),
"network": 0,
"devices": 0,
"software": 0,
}
}
class MailBox:
mails = {}
new = []
archive = []
def __init__(self):
pass
def add_new(self, mail):
self.new.append(mail)
self.mails[mail.id] = mail
def mail_processed(self, mail_id):
mail = self.mails[mail_id]
self.new.remove(mail)
self.archive.append(mail)
def get_new_mails(self):
return self.new
def get_new_mails_count(self):
return len(self.new)
def responce(self):
response = []
for mail in self.get_new_mails():
response.append(mail.response())
return {"mails": response}
class UserBackground:
boss = 'начальник'
bssid = 'TPLink 0707'
email = ''
ldap = 'office'
def __init__(self, boss, bssid, email, ldap):
if boss:
self.boss = boss
if bssid:
self.bssid = bssid
if ldap:
self.ldap = ldap
if email:
self.email = email
else:
self.email = self.ldap + '@mail.ru'
def form_mail_from(self, address_type):
if address_type == 'начальник':
return self.boss
def form_mail_to(self, address_type):
if not address_type:
return self.email
class Mail:
counter = 0
id = None
header = ''
attachments = []
text = ''
sent = ''
received = ''
date = None
actions = []
@classmethod
def get_id(cls):
cls.counter += 1
return cls.counter
def __init__(self, header, attachments, text, sent, date, actions, receiver):
self.id = Mail.get_id()
self.header = header
self.attachments = attachments
self.text = text
# self.sent = UserBackground.form_mail_from(sent)
self.sent = sent
if not date:
self.date = datetime.now()
# self.receiver = UserBackground.form_mail_to(receiver)
self.receiver = receiver
self.actions = []
for i, action in enumerate(actions):
action['id'] = i
self.actions.append(Action(
i,
action['text'],
action['correct'],
action['rating'],
action['environment'],
action['answer'],
''
))
def perform_action(self, action_id, client):
responce = self.actions[action_id].perform(client)
client.mailbox.mail_processed(self.id)
return responce
def response(self):
actions = []
for action in self.actions:
actions.append(action.response())
return {
"id": self.id,
"header": self.header,
"text": self.text,
"sent": self.sent,
"receiver": self.receiver,
"date": self.date,
"attachments": self.attachments,
"actions": actions,
}
class Action:
id = 0
feedback = None
text = ''
correct = True
rating = 0
environment = 0
answer = ''
def __init__(self, id, text, correct, rating, environment, answer, link):
self.id = id
self.text = text
self.correct = correct
self.rating = rating
self.environment = environment
self.answer = answer
self.link = link
self.feedback = Feedback(correct, answer, link)
def perform(self, client):
client.user.add_rating(self.rating)
client.environment.add_scores(self.environment)
return self.feedback.response()
def response(self):
return {
"id": self.id,
"text": self.text,
}
class Feedback:
correct = True
text = 'OK'
link = ''
def __init__(self, correct, text, link):
self.correct = correct
self.text = text
self.link = link
def response(self):
return {
"result": self.correct,
"feedback": {
"text": self.text,
"link": self.link,
}
}
class User:
money = 4000
last_salary = None
statuses = (
{
'name': 'Ламер',
'salary': 1000,
'next level': 16,
}, {
'name': 'Уверенный пользователь',
'salary': 1000,
'next level': 10,
}, {
'name': 'Эникей',
'salary': 1000,
'next level': 50,
}, {
'name': 'Админ',
'salary': 1000,
'next level': 200,
}, {
'name': 'Хакер',
'salary': 1000,
'next level': 1000000,
},
)
status = statuses[0]
rating = 4
name = 'Ivan'
def __init__(self):
self.last_salary = datetime.now()
self.rating = 14
self.status = User.statuses[0]
self.money = 4000
def salary(self):
self.money += self.status['salary']
self.last_salary = datetime.now()
def add_rating(self, rating):
self.rating += rating
if self.rating >= self.status['next level']:
self.status = User.statuses[
User.statuses.index(self.status) + 1
]
def purchase(self, cost):
if self.money - cost >= 0:
self.money -= cost
else:
from exceptions import NotEnoughtMoney
raise NotEnoughtMoney()
class Environment:
states = [
{
'state': 0,
'cpu': 20,
'ram': 1000,
'network': 100,
'scores': 45,
}, {
'state': 1,
'cpu': 45,
'ram': 2500,
'network': 135,
'scores': 20,
}, {
'state': 2,
'cpu': 80,
'ram': 3800,
'network': 350,
'scores': 0,
}
]
scores = 0
last_av_check = None
state = states[0]
def __init__(self):
self.last_av_check = datetime.now()
self.scores = 50
self.state = Environment.states[0]
def add_scores(self, scores):
self.scores += scores
index = self.state['state']
if index < len(Environment.states) - 1:
if self.scores <= Environment.states[index + 1]['scores']:
self.state = Environment.states[index + 1]
if index > 0:
if self.scores > Environment.states[index - 1]['scores']:
self.state = Environment.states[index - 1]
def av_checked(self):
self.last_av_check = datetime.now()
def adminated(self):
self.state = Environment.states[0]
self.scores = 50
self.av_checked()
def response(self):
return {
"state": self.state['state'],
"cpu": self.state['cpu'],
"ram": self.state['ram'],
"network": self.state['network'],
"lastAvCheck": str(self.last_av_check),
}
class History:
events = []
def add(self, header, body, event_type, result):
self.events.append((header, body, event_type, result))
def response(self):
pass
class Rating:
players = {
4: 'Dmitriy',
7: 'Sergei',
24: 'Semyon',
79: 'Vladimir'
}
@classmethod
def get_rating(cls, client):
# it's a dummy!
response = []
cls.players[client.user.rating] = client.user.name
for key in sorted(cls.players, reverse=True):
response.append({
'rating': key,
'name': cls.players[key]
})
cls.players = {
4: 'Dmitriy',
7: 'Sergei',
24: 'Semyon',
79: 'Vladimir'
}
return response
class NewsList:
news = []
@classmethod
def set_hot_news(cls, news):
cls.news = [news]
# it's a dummy!
@classmethod
def add_news(cls, news):
cls.news.append(news)
@classmethod
def get_last(cls):
return choice(cls.news)
# it's a dummy too!
class News:
tag = None
text = None
link = None
def __init__(self, tag, text, link):
self.tag = tag
| .text = text
self.link = link
def response(self):
return {
"tag": self.tag,
"text": self.text,
"link": self.link,
}
class Buff:
name = ""
cost = 0
description = ""
id = 0
@classmethod
def action(cls, client):
pass
class BetterCallAdmin(Buff):
name = "Вызвать сисадмина"
cost = 500
description = "Вызвать системного администратора, который осуществит техническое обслуживание."
id = 1
@classmethod
def action(cls, client):
client.environment.adminated()
client.user.purchase(cls.cost)
class UpgrageAntivirus(Buff):
name = "Купить месяц pro-версии антивируса"
cost = 5000
description = "pro-версия антивируса автоматически проверяет файл при скачивании. Больше никаких зловредов в почте!"
id = 2
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class DiagnosePc(Buff):
name = "Провести автоматическую диагностику"
cost = 100
description = "За небольшую сумму Вы получаете подсказку, как решить проблему самостоятельно."
id = 3
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class MakeBackup(Buff):
name = "Сделать облачный бэкап"
cost = 200
description = "Сделать резервный образ системы и загрузить его в облако, дабы защитить от атак на файловую систему."
id = 4
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class Offense:
name = ""
cost = 0
description = ""
id = 100
@classmethod
def action(cls, client):
pass
class SpamEverywhere(Offense):
name = "Заказать спам"
cost = 1000
description = "Заказать спам на всех игроков, кроме себя. Просто ради забавы."
id = 101
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class UseKnownExploit(Offense):
name = "Заказать Exploit-атаку"
cost = 2000
description = "Заказать атаку по нескольким игрокам, используя известную уязвимость. Успех зависит от версии антивирусных баз."
id = 102
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class SpamMailExploit(Offense):
name = "Заказать спам с червем"
cost = 5000
description = "Заказать спам со зловредом по игрокам, при каждом открытии которого Вы получите немного денег."
id = 103
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class Shop:
buffs = [
BetterCallAdmin,
UpgrageAntivirus,
DiagnosePc,
MakeBackup,
]
offenses = [
SpamEverywhere,
UseKnownExploit,
SpamMailExploit
]
@classmethod
def response(cls):
buffs_offers = []
for buff in cls.buffs:
buffs_offers.append({
"name": buff.name,
"cost": buff.cost,
"description": buff.description,
"id": buff.id,
})
offenses_offers = []
for offense in cls.offenses:
offenses_offers.append({
"name": offense.name,
"cost": offense.cost,
"description": offense.description,
"id": offense.id,
})
return {
"buff": buffs_offers,
"attack": offenses_offers,
}
| self | identifier_name |
classes.py | from datetime import datetime
import uuid
import json
from random import choice
class Client:
guid = None
mailbox = None
background = None
user = None
environment = None
history = None
@classmethod
def get_next_guid(cls):
return str(uuid.uuid4())
def __init__(self, guid):
self.guid = guid
self.mailbox = MailBox()
self.background = UserBackground(None, None, None, None)
self.environment = Environment()
self.user = User()
self.history = History()
def responce(self):
return {
"rating": self.user.status['name'],
"money": self.user.money,
"env": self.environment.response(),
"news": NewsList.get_last().response(),
"events": {
"mails": self.mailbox.get_new_mails_count(),
"network": 0,
"devices": 0,
"software": 0,
}
}
class MailBox:
mails = {}
new = []
archive = []
def __init__(self):
pass
def add_new(self, mail):
self.new.append(mail)
self.mails[mail.id] = mail
def mail_processed(self, mail_id):
mail = self.mails[mail_id]
self.new.remove(mail)
self.archive.append(mail)
def get_new_mails(self):
return self.new
def get_new_mails_count(self):
return len(self.new)
def responce(self):
response = []
for mail in self.get_new_mails():
response.append(mail.response())
return {"mails": response}
class UserBackground:
boss = 'начальник'
bssid = 'TPLink 0707'
email = ''
ldap = 'office'
def __init__(self, boss, bssid, email, ldap):
if boss:
self.boss = boss
if bssid:
self.bssid = bssid
if ldap:
self.ldap = ldap
if email:
self.email = email
else:
self.email = self.ldap + '@mail.ru'
def form_mail_from(self, address_type):
if address_type == 'начальник':
return self.boss
def form_mail_to(self, address_type):
if not address_type:
return self.email
class Mail:
counter = 0
id = None
header = ''
attachments = []
text = ''
sent = ''
received = ''
date = None
actions = []
@classmethod
def get_id(cls):
cls.counter += 1
return cls.counter
def __init__(self, header, attachments, text, sent, date, actions, receiver):
self.id = Mail.get_id()
self.header = header
self.attachments = attachments
self.text = text
# self.sent = UserBackground.form_mail_from(sent)
self.sent = sent
if not date:
self.date = datetime.now()
# self.receiver = UserBackground.form_mail_to(receiver)
self.receiver = receiver
self.actions = []
for i, action in enumerate(actions):
action['id'] = i
self.actions.append(Action(
i,
action['text'],
action['correct'],
action['rating'],
action['environment'],
action['answer'],
''
))
def perform_action(self, action_id, client):
responce = self.actions[action_id].perform(client)
client.mailbox.mail_processed(self.id)
return responce
def response(self):
actions = []
for action in self.actions:
actions.append(action.response())
return {
"id": self.id,
"header": self.header,
"text": self.text,
"sent": self.sent,
"receiver": self.receiver,
"date": self.date,
"attachments": self.attachments,
"actions": actions,
}
class Action:
id = 0
feedback = None
text = ''
correct = True
rating = 0
environment = 0
answer = ''
def __init__(self, id, text, correct, rating, environment, answer, link):
self.id = id
self.text = text
self.correct = correct
self.rating = rating
self.environment = environment
self.answer = answer
self.link = link
self.feedback = Feedback(correct, answer, link)
def perform(self, client):
client.user.add_rating(self.rating)
client.environment.add_scores(self.environment)
return self.feedback.response()
def response(self):
return {
"id": self.id,
"text": self.text,
}
class Feedback:
correct = True
text = 'OK'
link = ''
def __init__(self, correct, text, link):
self.correct = correct
self.text = text
self.link = link
def response(self):
return {
"result": self.correct,
"feedback": {
"text": self.text,
"link": self.link,
}
}
class User:
money = 4000
last_salary = None
statuses = (
{
'name': 'Ламер',
'salary': 1000,
'next level': 16,
}, {
'name': 'Уверенный пользователь',
'salary': 1000,
'next level': 10,
}, {
'name': 'Эникей',
'salary': 1000,
'next level': 50,
}, {
'name': 'Админ',
'salary': 1000,
'next level': 200,
}, {
'name': 'Хакер',
'salary': 1000,
'next level': 1000000,
},
)
status = statuses[0]
rating = 4
name = 'Ivan'
def __init__(self):
self.last_salary = datetime.now()
self.rating = 14
self.status = User.statuses[0]
self.money = 4000
def salary(self):
self.money += self.status['salary']
self.last_salary = datetime.now()
def add_rating(self, rating):
self.rating += rating
if self.rating >= self.status['next level']:
self.status = User.statuses[
User.statuses.index(self.status) + 1
]
def purchase(self, cost):
if self.money - cost >= 0:
self.money -= cost
else:
from exceptions import NotEnoughtMoney
raise NotEnoughtMoney()
class Environment:
states = [
{
'state': 0,
'cpu': 20,
'ram': 1000,
'network': 100,
'scores': 45,
}, {
'state': 1,
'cpu': 45,
'ram': 2500,
'network': 135,
'scores': 20,
}, {
'state': 2,
'cpu': 80,
'ram': 3800,
'network': 350,
'scores': 0,
}
]
scores = 0
last_av_check = None
state = states[0]
def __init__(self):
self.last_av_check = datetime.now()
self.scores = 50
self.state = Environment.states[0]
def add_scores(self, scores):
self.scores += scores
index = self.state['state']
if index < len(Environment.states) - 1:
if self.scores <= Environment.states[index + 1]['scores']:
self.state = Environment.states[index + 1]
if index > 0:
if self.scores > Environment.states[index - 1]['scores']:
self.state = Environment.states[index - 1]
def av_checked(self):
self.last_av_check = datetime.now()
def adminated(self): | self.state = Environment.states[0]
self.scores = 50
self.av_checked()
def response(self):
return {
"state": self.state['state'],
"cpu": self.state['cpu'],
"ram": self.state['ram'],
"network": self.state['network'],
"lastAvCheck": str(self.last_av_check),
}
class History:
events = []
def add(self, header, body, event_type, result):
self.events.append((header, body, event_type, result))
def response(self):
pass
class Rating:
players = {
4: 'Dmitriy',
7: 'Sergei',
24: 'Semyon',
79: 'Vladimir'
}
@classmethod
def get_rating(cls, client):
# it's a dummy!
response = []
cls.players[client.user.rating] = client.user.name
for key in sorted(cls.players, reverse=True):
response.append({
'rating': key,
'name': cls.players[key]
})
cls.players = {
4: 'Dmitriy',
7: 'Sergei',
24: 'Semyon',
79: 'Vladimir'
}
return response
class NewsList:
news = []
@classmethod
def set_hot_news(cls, news):
cls.news = [news]
# it's a dummy!
@classmethod
def add_news(cls, news):
cls.news.append(news)
@classmethod
def get_last(cls):
return choice(cls.news)
# it's a dummy too!
class News:
tag = None
text = None
link = None
def __init__(self, tag, text, link):
self.tag = tag
self.text = text
self.link = link
def response(self):
return {
"tag": self.tag,
"text": self.text,
"link": self.link,
}
class Buff:
name = ""
cost = 0
description = ""
id = 0
@classmethod
def action(cls, client):
pass
class BetterCallAdmin(Buff):
name = "Вызвать сисадмина"
cost = 500
description = "Вызвать системного администратора, который осуществит техническое обслуживание."
id = 1
@classmethod
def action(cls, client):
client.environment.adminated()
client.user.purchase(cls.cost)
class UpgrageAntivirus(Buff):
name = "Купить месяц pro-версии антивируса"
cost = 5000
description = "pro-версия антивируса автоматически проверяет файл при скачивании. Больше никаких зловредов в почте!"
id = 2
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class DiagnosePc(Buff):
name = "Провести автоматическую диагностику"
cost = 100
description = "За небольшую сумму Вы получаете подсказку, как решить проблему самостоятельно."
id = 3
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class MakeBackup(Buff):
name = "Сделать облачный бэкап"
cost = 200
description = "Сделать резервный образ системы и загрузить его в облако, дабы защитить от атак на файловую систему."
id = 4
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class Offense:
name = ""
cost = 0
description = ""
id = 100
@classmethod
def action(cls, client):
pass
class SpamEverywhere(Offense):
name = "Заказать спам"
cost = 1000
description = "Заказать спам на всех игроков, кроме себя. Просто ради забавы."
id = 101
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class UseKnownExploit(Offense):
name = "Заказать Exploit-атаку"
cost = 2000
description = "Заказать атаку по нескольким игрокам, используя известную уязвимость. Успех зависит от версии антивирусных баз."
id = 102
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class SpamMailExploit(Offense):
name = "Заказать спам с червем"
cost = 5000
description = "Заказать спам со зловредом по игрокам, при каждом открытии которого Вы получите немного денег."
id = 103
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class Shop:
buffs = [
BetterCallAdmin,
UpgrageAntivirus,
DiagnosePc,
MakeBackup,
]
offenses = [
SpamEverywhere,
UseKnownExploit,
SpamMailExploit
]
@classmethod
def response(cls):
buffs_offers = []
for buff in cls.buffs:
buffs_offers.append({
"name": buff.name,
"cost": buff.cost,
"description": buff.description,
"id": buff.id,
})
offenses_offers = []
for offense in cls.offenses:
offenses_offers.append({
"name": offense.name,
"cost": offense.cost,
"description": offense.description,
"id": offense.id,
})
return {
"buff": buffs_offers,
"attack": offenses_offers,
} | random_line_split | |
classes.py | from datetime import datetime
import uuid
import json
from random import choice
class Client:
guid = None
mailbox = None
background = None
user = None
environment = None
history = None
@classmethod
def get_next_guid(cls):
return str(uuid.uuid4())
def __init__(self, guid):
self.guid = guid
self.mailbox = MailBox()
self.background = UserBackground(None, None, None, None)
self.environment = Environment()
self.user = User()
self.history = History()
def responce(self):
return {
"rating": self.user.status['name'],
"money": self.user.money,
"env": self.environment.response(),
"news": NewsList.get_last().response(),
"events": {
"mails": self.mailbox.get_new_mails_count(),
"network": 0,
"devices": 0,
"software": 0,
}
}
class MailBox:
|
class UserBackground:
boss = 'начальник'
bssid = 'TPLink 0707'
email = ''
ldap = 'office'
def __init__(self, boss, bssid, email, ldap):
if boss:
self.boss = boss
if bssid:
self.bssid = bssid
if ldap:
self.ldap = ldap
if email:
self.email = email
else:
self.email = self.ldap + '@mail.ru'
def form_mail_from(self, address_type):
if address_type == 'начальник':
return self.boss
def form_mail_to(self, address_type):
if not address_type:
return self.email
class Mail:
counter = 0
id = None
header = ''
attachments = []
text = ''
sent = ''
received = ''
date = None
actions = []
@classmethod
def get_id(cls):
cls.counter += 1
return cls.counter
def __init__(self, header, attachments, text, sent, date, actions, receiver):
self.id = Mail.get_id()
self.header = header
self.attachments = attachments
self.text = text
# self.sent = UserBackground.form_mail_from(sent)
self.sent = sent
if not date:
self.date = datetime.now()
# self.receiver = UserBackground.form_mail_to(receiver)
self.receiver = receiver
self.actions = []
for i, action in enumerate(actions):
action['id'] = i
self.actions.append(Action(
i,
action['text'],
action['correct'],
action['rating'],
action['environment'],
action['answer'],
''
))
def perform_action(self, action_id, client):
responce = self.actions[action_id].perform(client)
client.mailbox.mail_processed(self.id)
return responce
def response(self):
actions = []
for action in self.actions:
actions.append(action.response())
return {
"id": self.id,
"header": self.header,
"text": self.text,
"sent": self.sent,
"receiver": self.receiver,
"date": self.date,
"attachments": self.attachments,
"actions": actions,
}
class Action:
id = 0
feedback = None
text = ''
correct = True
rating = 0
environment = 0
answer = ''
def __init__(self, id, text, correct, rating, environment, answer, link):
self.id = id
self.text = text
self.correct = correct
self.rating = rating
self.environment = environment
self.answer = answer
self.link = link
self.feedback = Feedback(correct, answer, link)
def perform(self, client):
client.user.add_rating(self.rating)
client.environment.add_scores(self.environment)
return self.feedback.response()
def response(self):
return {
"id": self.id,
"text": self.text,
}
class Feedback:
correct = True
text = 'OK'
link = ''
def __init__(self, correct, text, link):
self.correct = correct
self.text = text
self.link = link
def response(self):
return {
"result": self.correct,
"feedback": {
"text": self.text,
"link": self.link,
}
}
class User:
money = 4000
last_salary = None
statuses = (
{
'name': 'Ламер',
'salary': 1000,
'next level': 16,
}, {
'name': 'Уверенный пользователь',
'salary': 1000,
'next level': 10,
}, {
'name': 'Эникей',
'salary': 1000,
'next level': 50,
}, {
'name': 'Админ',
'salary': 1000,
'next level': 200,
}, {
'name': 'Хакер',
'salary': 1000,
'next level': 1000000,
},
)
status = statuses[0]
rating = 4
name = 'Ivan'
def __init__(self):
self.last_salary = datetime.now()
self.rating = 14
self.status = User.statuses[0]
self.money = 4000
def salary(self):
self.money += self.status['salary']
self.last_salary = datetime.now()
def add_rating(self, rating):
self.rating += rating
if self.rating >= self.status['next level']:
self.status = User.statuses[
User.statuses.index(self.status) + 1
]
def purchase(self, cost):
if self.money - cost >= 0:
self.money -= cost
else:
from exceptions import NotEnoughtMoney
raise NotEnoughtMoney()
class Environment:
states = [
{
'state': 0,
'cpu': 20,
'ram': 1000,
'network': 100,
'scores': 45,
}, {
'state': 1,
'cpu': 45,
'ram': 2500,
'network': 135,
'scores': 20,
}, {
'state': 2,
'cpu': 80,
'ram': 3800,
'network': 350,
'scores': 0,
}
]
scores = 0
last_av_check = None
state = states[0]
def __init__(self):
self.last_av_check = datetime.now()
self.scores = 50
self.state = Environment.states[0]
def add_scores(self, scores):
self.scores += scores
index = self.state['state']
if index < len(Environment.states) - 1:
if self.scores <= Environment.states[index + 1]['scores']:
self.state = Environment.states[index + 1]
if index > 0:
if self.scores > Environment.states[index - 1]['scores']:
self.state = Environment.states[index - 1]
def av_checked(self):
self.last_av_check = datetime.now()
def adminated(self):
self.state = Environment.states[0]
self.scores = 50
self.av_checked()
def response(self):
return {
"state": self.state['state'],
"cpu": self.state['cpu'],
"ram": self.state['ram'],
"network": self.state['network'],
"lastAvCheck": str(self.last_av_check),
}
class History:
events = []
def add(self, header, body, event_type, result):
self.events.append((header, body, event_type, result))
def response(self):
pass
class Rating:
players = {
4: 'Dmitriy',
7: 'Sergei',
24: 'Semyon',
79: 'Vladimir'
}
@classmethod
def get_rating(cls, client):
# it's a dummy!
response = []
cls.players[client.user.rating] = client.user.name
for key in sorted(cls.players, reverse=True):
response.append({
'rating': key,
'name': cls.players[key]
})
cls.players = {
4: 'Dmitriy',
7: 'Sergei',
24: 'Semyon',
79: 'Vladimir'
}
return response
class NewsList:
news = []
@classmethod
def set_hot_news(cls, news):
cls.news = [news]
# it's a dummy!
@classmethod
def add_news(cls, news):
cls.news.append(news)
@classmethod
def get_last(cls):
return choice(cls.news)
# it's a dummy too!
class News:
tag = None
text = None
link = None
def __init__(self, tag, text, link):
self.tag = tag
self.text = text
self.link = link
def response(self):
return {
"tag": self.tag,
"text": self.text,
"link": self.link,
}
class Buff:
name = ""
cost = 0
description = ""
id = 0
@classmethod
def action(cls, client):
pass
class BetterCallAdmin(Buff):
name = "Вызвать сисадмина"
cost = 500
description = "Вызвать системного администратора, который осуществит техническое обслуживание."
id = 1
@classmethod
def action(cls, client):
client.environment.adminated()
client.user.purchase(cls.cost)
class UpgrageAntivirus(Buff):
name = "Купить месяц pro-версии антивируса"
cost = 5000
description = "pro-версия антивируса автоматически проверяет файл при скачивании. Больше никаких зловредов в почте!"
id = 2
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class DiagnosePc(Buff):
name = "Провести автоматическую диагностику"
cost = 100
description = "За небольшую сумму Вы получаете подсказку, как решить проблему самостоятельно."
id = 3
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class MakeBackup(Buff):
name = "Сделать облачный бэкап"
cost = 200
description = "Сделать резервный образ системы и загрузить его в облако, дабы защитить от атак на файловую систему."
id = 4
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class Offense:
name = ""
cost = 0
description = ""
id = 100
@classmethod
def action(cls, client):
pass
class SpamEverywhere(Offense):
name = "Заказать спам"
cost = 1000
description = "Заказать спам на всех игроков, кроме себя. Просто ради забавы."
id = 101
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class UseKnownExploit(Offense):
name = "Заказать Exploit-атаку"
cost = 2000
description = "Заказать атаку по нескольким игрокам, используя известную уязвимость. Успех зависит от версии антивирусных баз."
id = 102
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class SpamMailExploit(Offense):
name = "Заказать спам с червем"
cost = 5000
description = "Заказать спам со зловредом по игрокам, при каждом открытии которого Вы получите немного денег."
id = 103
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class Shop:
buffs = [
BetterCallAdmin,
UpgrageAntivirus,
DiagnosePc,
MakeBackup,
]
offenses = [
SpamEverywhere,
UseKnownExploit,
SpamMailExploit
]
@classmethod
def response(cls):
buffs_offers = []
for buff in cls.buffs:
buffs_offers.append({
"name": buff.name,
"cost": buff.cost,
"description": buff.description,
"id": buff.id,
})
offenses_offers = []
for offense in cls.offenses:
offenses_offers.append({
"name": offense.name,
"cost": offense.cost,
"description": offense.description,
"id": offense.id,
})
return {
"buff": buffs_offers,
"attack": offenses_offers,
}
| mails = {}
new = []
archive = []
def __init__(self):
pass
def add_new(self, mail):
self.new.append(mail)
self.mails[mail.id] = mail
def mail_processed(self, mail_id):
mail = self.mails[mail_id]
self.new.remove(mail)
self.archive.append(mail)
def get_new_mails(self):
return self.new
def get_new_mails_count(self):
return len(self.new)
def responce(self):
response = []
for mail in self.get_new_mails():
response.append(mail.response())
return {"mails": response} | identifier_body |
classes.py | from datetime import datetime
import uuid
import json
from random import choice
class Client:
guid = None
mailbox = None
background = None
user = None
environment = None
history = None
@classmethod
def get_next_guid(cls):
return str(uuid.uuid4())
def __init__(self, guid):
self.guid = guid
self.mailbox = MailBox()
self.background = UserBackground(None, None, None, None)
self.environment = Environment()
self.user = User()
self.history = History()
def responce(self):
return {
"rating": self.user.status['name'],
"money": self.user.money,
"env": self.environment.response(),
"news": NewsList.get_last().response(),
"events": {
"mails": self.mailbox.get_new_mails_count(),
"network": 0,
"devices": 0,
"software": 0,
}
}
class MailBox:
mails = {}
new = []
archive = []
def __init__(self):
pass
def add_new(self, mail):
self.new.append(mail)
self.mails[mail.id] = mail
def mail_processed(self, mail_id):
mail = self.mails[mail_id]
self.new.remove(mail)
self.archive.append(mail)
def get_new_mails(self):
return self.new
def get_new_mails_count(self):
return len(self.new)
def responce(self):
response = []
for mail in self.get_new_mails():
response.append(mail.response())
return {"mails": response}
class UserBackground:
boss = 'начальник'
bssid = 'TPLink 0707'
email = ''
ldap = 'office'
def __init__(self, boss, bssid, email, ldap):
if boss:
self.boss = boss
if bssid:
self.bssid = bssid
if ldap:
self.ldap = ldap
if email:
self.email = email
else:
self.email = self.ldap + '@mail.ru'
def form_mail_from(self, address_type):
if address_type == 'начальник':
return self.boss
def form_mail_to(self, address_type):
if not address_type:
return self.email
class Mail:
counter = 0
id = None
header = ''
attachments = []
text = ''
sent = ''
received = ''
date = None
actions = []
@classmethod
def get_id(cls):
cls.counter += 1
return cls.counter
def __init__(self, header, attachments, text, sent, date, actions, receiver):
self.id = Mail.get_id()
self.header = header
self.attachments = attachments
self.text = text
# self.sent = UserBackground.form_mail_from(sent)
self.sent = sent
if not date:
self.date = datetime.now()
# self.receiver = UserBackground.form_mail_to(receiver)
self.receiver = receiver
self.actions = []
for i, action in enumerate(actions):
action['id'] = i
self.actions.append(Action(
i,
action['text'],
action['correct'],
action['rating'],
action['environment'],
action['answer'],
''
))
def perform_action(self, action_id, client):
responce = self.actions[action_id].perform(client)
client.mailbox.mail_processed(self.id)
return responce
def response(self):
actions = []
for action in self.actions:
actions.append(act | "id": self.id,
"header": self.header,
"text": self.text,
"sent": self.sent,
"receiver": self.receiver,
"date": self.date,
"attachments": self.attachments,
"actions": actions,
}
class Action:
id = 0
feedback = None
text = ''
correct = True
rating = 0
environment = 0
answer = ''
def __init__(self, id, text, correct, rating, environment, answer, link):
self.id = id
self.text = text
self.correct = correct
self.rating = rating
self.environment = environment
self.answer = answer
self.link = link
self.feedback = Feedback(correct, answer, link)
def perform(self, client):
client.user.add_rating(self.rating)
client.environment.add_scores(self.environment)
return self.feedback.response()
def response(self):
return {
"id": self.id,
"text": self.text,
}
class Feedback:
correct = True
text = 'OK'
link = ''
def __init__(self, correct, text, link):
self.correct = correct
self.text = text
self.link = link
def response(self):
return {
"result": self.correct,
"feedback": {
"text": self.text,
"link": self.link,
}
}
class User:
money = 4000
last_salary = None
statuses = (
{
'name': 'Ламер',
'salary': 1000,
'next level': 16,
}, {
'name': 'Уверенный пользователь',
'salary': 1000,
'next level': 10,
}, {
'name': 'Эникей',
'salary': 1000,
'next level': 50,
}, {
'name': 'Админ',
'salary': 1000,
'next level': 200,
}, {
'name': 'Хакер',
'salary': 1000,
'next level': 1000000,
},
)
status = statuses[0]
rating = 4
name = 'Ivan'
def __init__(self):
self.last_salary = datetime.now()
self.rating = 14
self.status = User.statuses[0]
self.money = 4000
def salary(self):
self.money += self.status['salary']
self.last_salary = datetime.now()
def add_rating(self, rating):
self.rating += rating
if self.rating >= self.status['next level']:
self.status = User.statuses[
User.statuses.index(self.status) + 1
]
def purchase(self, cost):
if self.money - cost >= 0:
self.money -= cost
else:
from exceptions import NotEnoughtMoney
raise NotEnoughtMoney()
class Environment:
states = [
{
'state': 0,
'cpu': 20,
'ram': 1000,
'network': 100,
'scores': 45,
}, {
'state': 1,
'cpu': 45,
'ram': 2500,
'network': 135,
'scores': 20,
}, {
'state': 2,
'cpu': 80,
'ram': 3800,
'network': 350,
'scores': 0,
}
]
scores = 0
last_av_check = None
state = states[0]
def __init__(self):
self.last_av_check = datetime.now()
self.scores = 50
self.state = Environment.states[0]
def add_scores(self, scores):
self.scores += scores
index = self.state['state']
if index < len(Environment.states) - 1:
if self.scores <= Environment.states[index + 1]['scores']:
self.state = Environment.states[index + 1]
if index > 0:
if self.scores > Environment.states[index - 1]['scores']:
self.state = Environment.states[index - 1]
def av_checked(self):
self.last_av_check = datetime.now()
def adminated(self):
self.state = Environment.states[0]
self.scores = 50
self.av_checked()
def response(self):
return {
"state": self.state['state'],
"cpu": self.state['cpu'],
"ram": self.state['ram'],
"network": self.state['network'],
"lastAvCheck": str(self.last_av_check),
}
class History:
events = []
def add(self, header, body, event_type, result):
self.events.append((header, body, event_type, result))
def response(self):
pass
class Rating:
players = {
4: 'Dmitriy',
7: 'Sergei',
24: 'Semyon',
79: 'Vladimir'
}
@classmethod
def get_rating(cls, client):
# it's a dummy!
response = []
cls.players[client.user.rating] = client.user.name
for key in sorted(cls.players, reverse=True):
response.append({
'rating': key,
'name': cls.players[key]
})
cls.players = {
4: 'Dmitriy',
7: 'Sergei',
24: 'Semyon',
79: 'Vladimir'
}
return response
class NewsList:
news = []
@classmethod
def set_hot_news(cls, news):
cls.news = [news]
# it's a dummy!
@classmethod
def add_news(cls, news):
cls.news.append(news)
@classmethod
def get_last(cls):
return choice(cls.news)
# it's a dummy too!
class News:
tag = None
text = None
link = None
def __init__(self, tag, text, link):
self.tag = tag
self.text = text
self.link = link
def response(self):
return {
"tag": self.tag,
"text": self.text,
"link": self.link,
}
class Buff:
name = ""
cost = 0
description = ""
id = 0
@classmethod
def action(cls, client):
pass
class BetterCallAdmin(Buff):
name = "Вызвать сисадмина"
cost = 500
description = "Вызвать системного администратора, который осуществит техническое обслуживание."
id = 1
@classmethod
def action(cls, client):
client.environment.adminated()
client.user.purchase(cls.cost)
class UpgrageAntivirus(Buff):
name = "Купить месяц pro-версии антивируса"
cost = 5000
description = "pro-версия антивируса автоматически проверяет файл при скачивании. Больше никаких зловредов в почте!"
id = 2
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class DiagnosePc(Buff):
name = "Провести автоматическую диагностику"
cost = 100
description = "За небольшую сумму Вы получаете подсказку, как решить проблему самостоятельно."
id = 3
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class MakeBackup(Buff):
name = "Сделать облачный бэкап"
cost = 200
description = "Сделать резервный образ системы и загрузить его в облако, дабы защитить от атак на файловую систему."
id = 4
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class Offense:
name = ""
cost = 0
description = ""
id = 100
@classmethod
def action(cls, client):
pass
class SpamEverywhere(Offense):
name = "Заказать спам"
cost = 1000
description = "Заказать спам на всех игроков, кроме себя. Просто ради забавы."
id = 101
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class UseKnownExploit(Offense):
name = "Заказать Exploit-атаку"
cost = 2000
description = "Заказать атаку по нескольким игрокам, используя известную уязвимость. Успех зависит от версии антивирусных баз."
id = 102
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class SpamMailExploit(Offense):
name = "Заказать спам с червем"
cost = 5000
description = "Заказать спам со зловредом по игрокам, при каждом открытии которого Вы получите немного денег."
id = 103
@classmethod
def action(cls, client):
client.user.purchase(cls.cost)
class Shop:
buffs = [
BetterCallAdmin,
UpgrageAntivirus,
DiagnosePc,
MakeBackup,
]
offenses = [
SpamEverywhere,
UseKnownExploit,
SpamMailExploit
]
@classmethod
def response(cls):
buffs_offers = []
for buff in cls.buffs:
buffs_offers.append({
"name": buff.name,
"cost": buff.cost,
"description": buff.description,
"id": buff.id,
})
offenses_offers = []
for offense in cls.offenses:
offenses_offers.append({
"name": offense.name,
"cost": offense.cost,
"description": offense.description,
"id": offense.id,
})
return {
"buff": buffs_offers,
"attack": offenses_offers,
}
| ion.response())
return {
| conditional_block |
model.rs | use chrono::{DateTime, Utc};
use failure::{format_err, Error};
use serde::{Deserialize, Serialize};
use std::{borrow::Borrow, convert::TryFrom, fmt};
#[derive(Debug, Default)]
pub struct Root {
pub channel_url: Option<String>,
pub cache_url: Option<String>,
pub git_revision: Option<String>,
pub fetch_time: Option<DateTime<Utc>>,
pub status: RootStatus,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum RootStatus {
Pending,
Downloading,
Available,
}
impl Default for RootStatus {
fn default() -> Self {
Self::Pending
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct Nar {
pub store_path: StorePath,
pub meta: NarMeta,
pub references: String,
}
// https://github.com/NixOS/nix/blob/61e816217bfdfffd39c130c7cd24f07e640098fc/src/libstore/schema.sql
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct NarMeta {
pub url: String,
pub compression: Option<String>,
pub file_hash: Option<String>,
pub file_size: Option<u64>,
pub nar_hash: String,
pub nar_size: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub deriver: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sig: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ca: Option<String>,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum NarStatus {
Pending,
Available,
Trashed,
}
impl Default for NarStatus {
fn default() -> Self {
Self::Pending
}
}
impl Nar {
fn ref_paths(&self) -> impl Iterator<Item = Result<StorePath, Error>> + '_ {
// Yield nothing on empty string.
self.references.split_terminator(" ").map(move |basename| {
StorePath::try_from(format!("{}/{}", self.store_path.root(), basename))
})
}
pub fn ref_hashes(&self) -> impl Iterator<Item = Result<StorePathHash, Error>> + '_ {
self.ref_paths().map(|r| r.map(|path| path.hash()))
}
pub fn format_nar_info<'a>(&'a self) -> impl fmt::Display + 'a {
struct Fmt<'a>(&'a Nar);
impl fmt::Display for Fmt<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let (nar, meta) = (&self.0, &self.0.meta);
write!(f, "StorePath: {}\n", nar.store_path)?;
write!(f, "URL: {}\n", meta.url)?;
if let Some(comp) = &meta.compression {
write!(f, "Compression: {}\n", comp)?;
}
if let Some(hash) = &meta.file_hash {
write!(f, "FileHash: {}\n", hash)?;
}
if let Some(size) = &meta.file_size {
write!(f, "FileSize: {}\n", size)?;
}
write!(f, "NarHash: {}\n", meta.nar_hash)?;
write!(f, "NarSize: {}\n", meta.nar_size)?;
write!(f, "References: {}\n", nar.references)?;
if let Some(sig) = &meta.sig {
write!(f, "Sig: {}\n", sig)?;
}
if let Some(deriver) = &meta.deriver {
write!(f, "Deriver: {}\n", deriver)?;
}
if let Some(ca) = &meta.ca {
write!(f, "CA: {}\n", ca)?;
}
Ok(())
}
}
Fmt(self)
}
pub fn parse_nar_info(info: &str) -> Result<Self, Error> {
Self::parse_nar_info_inner(info).map_err(|err| format_err!("Invalid narinfo: {}", err))
}
fn parse_nar_info_inner(info: &str) -> Result<Self, &'static str> {
let (
mut store_path,
mut url,
mut compression,
mut file_hash,
mut file_size,
mut nar_hash,
mut nar_size,
mut references,
mut deriver,
mut sig,
mut ca,
) = Default::default();
for line in info.lines() {
if line.is_empty() {
continue;
}
let sep = line.find(": ").ok_or("Missing colon")?;
let (k, v) = (&line[..sep], &line[sep + 2..]);
match k {
"StorePath" => {
store_path = Some(StorePath::try_from(v).map_err(|_| "Invalid StorePath")?);
}
"URL" => url = Some(v),
"Compression" => compression = Some(v),
"FileHash" => file_hash = Some(v),
"FileSize" => file_size = Some(v.parse().map_err(|_| "Invalid FileSize")?),
"NarHash" => nar_hash = Some(v),
"NarSize" => nar_size = Some(v.parse().map_err(|_| "Invalid NarSize")?),
"References" => references = Some(v),
"Deriver" => deriver = Some(v),
"Sig" => sig = Some(v),
"CA" => ca = Some(v),
_ => return Err("Unknown field"),
}
}
Ok(Nar {
store_path: store_path.ok_or("Missing StorePath")?,
meta: NarMeta {
compression: compression.map(|s| s.to_owned()),
url: url.ok_or("Missing URL")?.to_owned(),
file_hash: file_hash.map(|s| s.to_owned()),
file_size: file_size,
nar_hash: nar_hash.ok_or("Missing NarHash")?.to_owned(),
nar_size: nar_size.ok_or("Missing NarSize")?,
deriver: deriver.map(|s| s.to_owned()),
sig: sig.map(|s| s.to_owned()),
ca: ca.map(|s| s.to_owned()),
},
references: references.ok_or("Missing References")?.to_owned(),
})
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct StorePathHash([u8; Self::LEN]);
impl StorePathHash {
pub const LEN: usize = 32;
pub fn as_str(&self) -> &str {
std::str::from_utf8(&self.0).unwrap()
}
}
impl fmt::Display for StorePathHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl fmt::Debug for StorePathHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("StorePathHash")
.field(&self.to_string())
.finish()
}
}
impl Borrow<[u8]> for StorePathHash {
fn borrow(&self) -> &[u8] {
&self.0
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct StorePath {
path: String,
}
// FIXME: Allow non-default store root.
impl StorePath {
const STORE_PREFIX: &'static str = "/nix/store/";
const SEP_POS: usize = Self::STORE_PREFIX.len() + StorePathHash::LEN;
const MIN_LEN: usize = Self::SEP_POS + 1 + 1;
const MAX_LEN: usize = 212;
pub fn path(&self) -> &str {
&self.path
}
pub fn root(&self) -> &str {
&Self::STORE_PREFIX[..Self::STORE_PREFIX.len() - 1]
}
pub fn hash_str(&self) -> &str {
&self.path[Self::STORE_PREFIX.len()..Self::SEP_POS]
}
pub fn | (&self) -> StorePathHash {
StorePathHash(
<[u8; StorePathHash::LEN]>::try_from(
self.path[Self::STORE_PREFIX.len()..Self::SEP_POS].as_bytes(),
)
.unwrap(),
)
}
pub fn name(&self) -> &str {
&self.path[Self::SEP_POS + 1..]
}
}
impl TryFrom<String> for StorePath {
type Error = Error;
// https://github.com/NixOS/nix/blob/abb8ef619ba2fab3ae16fb5b5430215905bac723/src/libstore/store-api.cc#L85
fn try_from(path: String) -> Result<Self, Self::Error> {
use failure::ensure;
fn is_valid_hash(s: &[u8]) -> bool {
s.iter().all(|&b| match b {
b'e' | b'o' | b'u' | b't' => false,
b'a'..=b'z' | b'0'..=b'9' => true,
_ => false,
})
}
fn is_valid_name(s: &[u8]) -> bool {
const VALID_CHARS: &[u8] = b"+-._?=";
s.iter()
.all(|&b| b.is_ascii_alphanumeric() || VALID_CHARS.contains(&b))
}
ensure!(
Self::MIN_LEN <= path.len() && path.len() <= Self::MAX_LEN,
"Length {} is not in range [{}, {}]",
path.len(),
Self::MIN_LEN,
Self::MAX_LEN,
);
ensure!(path.is_ascii(), "Not ascii string: {}", path);
ensure!(
path.as_bytes()[Self::SEP_POS] == b'-',
"Hash seperator `-` not found",
);
let hash = &path[Self::STORE_PREFIX.len()..Self::SEP_POS];
let name = &path[Self::SEP_POS + 1..];
ensure!(is_valid_hash(hash.as_bytes()), "Invalid hash '{}'", hash);
ensure!(is_valid_name(name.as_bytes()), "Invalid name '{}'", name);
// Already checked
Ok(Self {
path: path.to_owned(),
})
}
}
impl TryFrom<&'_ str> for StorePath {
type Error = Error;
fn try_from(path: &'_ str) -> Result<Self, Self::Error> {
Self::try_from(path.to_owned())
}
}
impl fmt::Display for StorePath {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self.path(), f)
}
}
#[cfg(test)]
mod tests {
use super::*;
use insta::assert_snapshot;
#[test]
fn test_nar_info_format() {
let mut nar = Nar {
store_path: StorePath::try_from(
"/nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10",
)
.unwrap(),
meta: NarMeta {
url: "some/url".to_owned(),
compression: Some("xz".to_owned()),
file_hash: Some("file:hash".to_owned()),
file_size: Some(123),
nar_hash: "nar:hash".to_owned(),
nar_size: 456,
deriver: Some("some.drv".to_owned()),
sig: Some("s:i/g 2".to_owned()),
ca: Some("fixed:hash".to_owned()),
},
references: "ref1 ref2".to_owned(),
};
assert_snapshot!(nar.format_nar_info().to_string(), @r###"
StorePath: /nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10
URL: some/url
Compression: xz
FileHash: file:hash
FileSize: 123
NarHash: nar:hash
NarSize: 456
References: ref1 ref2
Sig: s:i/g 2
Deriver: some.drv
CA: fixed:hash
"###);
nar.references = String::new();
nar.meta.deriver = None;
nar.meta.ca = None;
assert_snapshot!(nar.format_nar_info().to_string(), @r###"
StorePath: /nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10
URL: some/url
Compression: xz
FileHash: file:hash
FileSize: 123
NarHash: nar:hash
NarSize: 456
References:
Sig: s:i/g 2
"###);
}
#[test]
fn test_nar_info_parse() {
let raw = r###"
StorePath: /nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10
URL: some/url
Compression: xz
FileHash: file:hash
FileSize: 123
NarHash: nar:hash
NarSize: 456
References: ref1 ref2
Sig: s:i/g 2
Deriver: some.drv
CA: fixed:hash
"###;
let nar = Nar {
store_path: StorePath::try_from(
"/nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10",
)
.unwrap(),
meta: NarMeta {
url: "some/url".to_owned(),
compression: Some("xz".to_owned()),
file_hash: Some("file:hash".to_owned()),
file_size: Some(123),
nar_hash: "nar:hash".to_owned(),
nar_size: 456,
deriver: Some("some.drv".to_owned()),
sig: Some("s:i/g 2".to_owned()),
ca: Some("fixed:hash".to_owned()),
},
references: "ref1 ref2".to_owned(),
};
assert_eq!(Nar::parse_nar_info(raw).unwrap(), nar);
}
}
| hash | identifier_name |
model.rs | use chrono::{DateTime, Utc};
use failure::{format_err, Error};
use serde::{Deserialize, Serialize};
use std::{borrow::Borrow, convert::TryFrom, fmt};
#[derive(Debug, Default)]
pub struct Root {
pub channel_url: Option<String>,
pub cache_url: Option<String>,
pub git_revision: Option<String>,
pub fetch_time: Option<DateTime<Utc>>,
pub status: RootStatus,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum RootStatus {
Pending,
Downloading,
Available,
}
impl Default for RootStatus {
fn default() -> Self {
Self::Pending
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct Nar {
pub store_path: StorePath,
pub meta: NarMeta,
pub references: String,
}
// https://github.com/NixOS/nix/blob/61e816217bfdfffd39c130c7cd24f07e640098fc/src/libstore/schema.sql
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct NarMeta {
pub url: String,
pub compression: Option<String>,
pub file_hash: Option<String>,
pub file_size: Option<u64>,
pub nar_hash: String,
pub nar_size: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub deriver: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sig: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ca: Option<String>,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum NarStatus {
Pending,
Available,
Trashed,
}
impl Default for NarStatus {
fn default() -> Self {
Self::Pending
}
}
impl Nar {
fn ref_paths(&self) -> impl Iterator<Item = Result<StorePath, Error>> + '_ {
// Yield nothing on empty string.
self.references.split_terminator(" ").map(move |basename| {
StorePath::try_from(format!("{}/{}", self.store_path.root(), basename))
})
}
pub fn ref_hashes(&self) -> impl Iterator<Item = Result<StorePathHash, Error>> + '_ {
self.ref_paths().map(|r| r.map(|path| path.hash()))
}
pub fn format_nar_info<'a>(&'a self) -> impl fmt::Display + 'a {
struct Fmt<'a>(&'a Nar);
impl fmt::Display for Fmt<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let (nar, meta) = (&self.0, &self.0.meta);
write!(f, "StorePath: {}\n", nar.store_path)?;
write!(f, "URL: {}\n", meta.url)?;
if let Some(comp) = &meta.compression {
write!(f, "Compression: {}\n", comp)?;
}
if let Some(hash) = &meta.file_hash {
write!(f, "FileHash: {}\n", hash)?;
}
if let Some(size) = &meta.file_size {
write!(f, "FileSize: {}\n", size)?;
}
write!(f, "NarHash: {}\n", meta.nar_hash)?;
write!(f, "NarSize: {}\n", meta.nar_size)?;
write!(f, "References: {}\n", nar.references)?;
if let Some(sig) = &meta.sig {
write!(f, "Sig: {}\n", sig)?;
}
if let Some(deriver) = &meta.deriver {
write!(f, "Deriver: {}\n", deriver)?;
}
if let Some(ca) = &meta.ca {
write!(f, "CA: {}\n", ca)?;
}
Ok(())
}
}
Fmt(self)
}
pub fn parse_nar_info(info: &str) -> Result<Self, Error> {
Self::parse_nar_info_inner(info).map_err(|err| format_err!("Invalid narinfo: {}", err))
}
fn parse_nar_info_inner(info: &str) -> Result<Self, &'static str> {
let (
mut store_path,
mut url,
mut compression,
mut file_hash,
mut file_size,
mut nar_hash,
mut nar_size,
mut references,
mut deriver,
mut sig,
mut ca,
) = Default::default();
for line in info.lines() {
if line.is_empty() {
continue;
}
let sep = line.find(": ").ok_or("Missing colon")?;
let (k, v) = (&line[..sep], &line[sep + 2..]);
match k {
"StorePath" => {
store_path = Some(StorePath::try_from(v).map_err(|_| "Invalid StorePath")?);
}
"URL" => url = Some(v),
"Compression" => compression = Some(v),
"FileHash" => file_hash = Some(v),
"FileSize" => file_size = Some(v.parse().map_err(|_| "Invalid FileSize")?),
"NarHash" => nar_hash = Some(v),
"NarSize" => nar_size = Some(v.parse().map_err(|_| "Invalid NarSize")?),
"References" => references = Some(v),
"Deriver" => deriver = Some(v),
"Sig" => sig = Some(v),
"CA" => ca = Some(v),
_ => return Err("Unknown field"),
}
}
Ok(Nar {
store_path: store_path.ok_or("Missing StorePath")?,
meta: NarMeta {
compression: compression.map(|s| s.to_owned()),
url: url.ok_or("Missing URL")?.to_owned(),
file_hash: file_hash.map(|s| s.to_owned()),
file_size: file_size,
nar_hash: nar_hash.ok_or("Missing NarHash")?.to_owned(),
nar_size: nar_size.ok_or("Missing NarSize")?,
deriver: deriver.map(|s| s.to_owned()),
sig: sig.map(|s| s.to_owned()),
ca: ca.map(|s| s.to_owned()),
},
references: references.ok_or("Missing References")?.to_owned(),
})
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct StorePathHash([u8; Self::LEN]);
impl StorePathHash {
pub const LEN: usize = 32;
pub fn as_str(&self) -> &str {
std::str::from_utf8(&self.0).unwrap()
}
}
impl fmt::Display for StorePathHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl fmt::Debug for StorePathHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("StorePathHash")
.field(&self.to_string())
.finish()
}
}
impl Borrow<[u8]> for StorePathHash {
fn borrow(&self) -> &[u8] {
&self.0
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct StorePath {
path: String,
}
// FIXME: Allow non-default store root.
impl StorePath {
const STORE_PREFIX: &'static str = "/nix/store/";
const SEP_POS: usize = Self::STORE_PREFIX.len() + StorePathHash::LEN;
const MIN_LEN: usize = Self::SEP_POS + 1 + 1;
const MAX_LEN: usize = 212;
pub fn path(&self) -> &str |
pub fn root(&self) -> &str {
&Self::STORE_PREFIX[..Self::STORE_PREFIX.len() - 1]
}
pub fn hash_str(&self) -> &str {
&self.path[Self::STORE_PREFIX.len()..Self::SEP_POS]
}
pub fn hash(&self) -> StorePathHash {
StorePathHash(
<[u8; StorePathHash::LEN]>::try_from(
self.path[Self::STORE_PREFIX.len()..Self::SEP_POS].as_bytes(),
)
.unwrap(),
)
}
pub fn name(&self) -> &str {
&self.path[Self::SEP_POS + 1..]
}
}
impl TryFrom<String> for StorePath {
type Error = Error;
// https://github.com/NixOS/nix/blob/abb8ef619ba2fab3ae16fb5b5430215905bac723/src/libstore/store-api.cc#L85
fn try_from(path: String) -> Result<Self, Self::Error> {
use failure::ensure;
fn is_valid_hash(s: &[u8]) -> bool {
s.iter().all(|&b| match b {
b'e' | b'o' | b'u' | b't' => false,
b'a'..=b'z' | b'0'..=b'9' => true,
_ => false,
})
}
fn is_valid_name(s: &[u8]) -> bool {
const VALID_CHARS: &[u8] = b"+-._?=";
s.iter()
.all(|&b| b.is_ascii_alphanumeric() || VALID_CHARS.contains(&b))
}
ensure!(
Self::MIN_LEN <= path.len() && path.len() <= Self::MAX_LEN,
"Length {} is not in range [{}, {}]",
path.len(),
Self::MIN_LEN,
Self::MAX_LEN,
);
ensure!(path.is_ascii(), "Not ascii string: {}", path);
ensure!(
path.as_bytes()[Self::SEP_POS] == b'-',
"Hash seperator `-` not found",
);
let hash = &path[Self::STORE_PREFIX.len()..Self::SEP_POS];
let name = &path[Self::SEP_POS + 1..];
ensure!(is_valid_hash(hash.as_bytes()), "Invalid hash '{}'", hash);
ensure!(is_valid_name(name.as_bytes()), "Invalid name '{}'", name);
// Already checked
Ok(Self {
path: path.to_owned(),
})
}
}
impl TryFrom<&'_ str> for StorePath {
type Error = Error;
fn try_from(path: &'_ str) -> Result<Self, Self::Error> {
Self::try_from(path.to_owned())
}
}
impl fmt::Display for StorePath {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self.path(), f)
}
}
#[cfg(test)]
mod tests {
use super::*;
use insta::assert_snapshot;
#[test]
fn test_nar_info_format() {
let mut nar = Nar {
store_path: StorePath::try_from(
"/nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10",
)
.unwrap(),
meta: NarMeta {
url: "some/url".to_owned(),
compression: Some("xz".to_owned()),
file_hash: Some("file:hash".to_owned()),
file_size: Some(123),
nar_hash: "nar:hash".to_owned(),
nar_size: 456,
deriver: Some("some.drv".to_owned()),
sig: Some("s:i/g 2".to_owned()),
ca: Some("fixed:hash".to_owned()),
},
references: "ref1 ref2".to_owned(),
};
assert_snapshot!(nar.format_nar_info().to_string(), @r###"
StorePath: /nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10
URL: some/url
Compression: xz
FileHash: file:hash
FileSize: 123
NarHash: nar:hash
NarSize: 456
References: ref1 ref2
Sig: s:i/g 2
Deriver: some.drv
CA: fixed:hash
"###);
nar.references = String::new();
nar.meta.deriver = None;
nar.meta.ca = None;
assert_snapshot!(nar.format_nar_info().to_string(), @r###"
StorePath: /nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10
URL: some/url
Compression: xz
FileHash: file:hash
FileSize: 123
NarHash: nar:hash
NarSize: 456
References:
Sig: s:i/g 2
"###);
}
#[test]
fn test_nar_info_parse() {
let raw = r###"
StorePath: /nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10
URL: some/url
Compression: xz
FileHash: file:hash
FileSize: 123
NarHash: nar:hash
NarSize: 456
References: ref1 ref2
Sig: s:i/g 2
Deriver: some.drv
CA: fixed:hash
"###;
let nar = Nar {
store_path: StorePath::try_from(
"/nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10",
)
.unwrap(),
meta: NarMeta {
url: "some/url".to_owned(),
compression: Some("xz".to_owned()),
file_hash: Some("file:hash".to_owned()),
file_size: Some(123),
nar_hash: "nar:hash".to_owned(),
nar_size: 456,
deriver: Some("some.drv".to_owned()),
sig: Some("s:i/g 2".to_owned()),
ca: Some("fixed:hash".to_owned()),
},
references: "ref1 ref2".to_owned(),
};
assert_eq!(Nar::parse_nar_info(raw).unwrap(), nar);
}
}
| {
&self.path
} | identifier_body |
model.rs | use chrono::{DateTime, Utc};
use failure::{format_err, Error};
use serde::{Deserialize, Serialize};
use std::{borrow::Borrow, convert::TryFrom, fmt};
#[derive(Debug, Default)]
pub struct Root {
pub channel_url: Option<String>,
pub cache_url: Option<String>,
pub git_revision: Option<String>,
pub fetch_time: Option<DateTime<Utc>>,
pub status: RootStatus,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum RootStatus {
Pending,
Downloading,
Available,
}
impl Default for RootStatus {
fn default() -> Self {
Self::Pending
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct Nar {
pub store_path: StorePath,
pub meta: NarMeta,
pub references: String,
}
// https://github.com/NixOS/nix/blob/61e816217bfdfffd39c130c7cd24f07e640098fc/src/libstore/schema.sql
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct NarMeta {
pub url: String,
pub compression: Option<String>,
pub file_hash: Option<String>,
pub file_size: Option<u64>,
pub nar_hash: String,
pub nar_size: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub deriver: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sig: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ca: Option<String>,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum NarStatus {
Pending,
Available,
Trashed,
}
impl Default for NarStatus {
fn default() -> Self {
Self::Pending
}
}
impl Nar {
fn ref_paths(&self) -> impl Iterator<Item = Result<StorePath, Error>> + '_ {
// Yield nothing on empty string.
self.references.split_terminator(" ").map(move |basename| {
StorePath::try_from(format!("{}/{}", self.store_path.root(), basename))
})
}
pub fn ref_hashes(&self) -> impl Iterator<Item = Result<StorePathHash, Error>> + '_ {
self.ref_paths().map(|r| r.map(|path| path.hash()))
}
pub fn format_nar_info<'a>(&'a self) -> impl fmt::Display + 'a {
struct Fmt<'a>(&'a Nar);
impl fmt::Display for Fmt<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let (nar, meta) = (&self.0, &self.0.meta);
write!(f, "StorePath: {}\n", nar.store_path)?;
write!(f, "URL: {}\n", meta.url)?;
if let Some(comp) = &meta.compression {
write!(f, "Compression: {}\n", comp)?;
}
if let Some(hash) = &meta.file_hash {
write!(f, "FileHash: {}\n", hash)?;
}
if let Some(size) = &meta.file_size {
write!(f, "FileSize: {}\n", size)?;
}
write!(f, "NarHash: {}\n", meta.nar_hash)?;
write!(f, "NarSize: {}\n", meta.nar_size)?;
write!(f, "References: {}\n", nar.references)?;
if let Some(sig) = &meta.sig {
write!(f, "Sig: {}\n", sig)?;
}
if let Some(deriver) = &meta.deriver {
write!(f, "Deriver: {}\n", deriver)?;
}
if let Some(ca) = &meta.ca {
write!(f, "CA: {}\n", ca)?;
}
Ok(())
}
}
Fmt(self)
}
pub fn parse_nar_info(info: &str) -> Result<Self, Error> {
Self::parse_nar_info_inner(info).map_err(|err| format_err!("Invalid narinfo: {}", err))
}
fn parse_nar_info_inner(info: &str) -> Result<Self, &'static str> {
let (
mut store_path,
mut url,
mut compression,
mut file_hash,
mut file_size,
mut nar_hash,
mut nar_size,
mut references,
mut deriver,
mut sig,
mut ca,
) = Default::default();
for line in info.lines() {
if line.is_empty() {
continue;
}
let sep = line.find(": ").ok_or("Missing colon")?;
let (k, v) = (&line[..sep], &line[sep + 2..]);
match k {
"StorePath" => {
store_path = Some(StorePath::try_from(v).map_err(|_| "Invalid StorePath")?);
}
"URL" => url = Some(v),
"Compression" => compression = Some(v),
"FileHash" => file_hash = Some(v),
"FileSize" => file_size = Some(v.parse().map_err(|_| "Invalid FileSize")?),
"NarHash" => nar_hash = Some(v),
"NarSize" => nar_size = Some(v.parse().map_err(|_| "Invalid NarSize")?),
"References" => references = Some(v),
"Deriver" => deriver = Some(v),
"Sig" => sig = Some(v),
"CA" => ca = Some(v),
_ => return Err("Unknown field"),
}
}
Ok(Nar {
store_path: store_path.ok_or("Missing StorePath")?,
meta: NarMeta {
compression: compression.map(|s| s.to_owned()),
url: url.ok_or("Missing URL")?.to_owned(),
file_hash: file_hash.map(|s| s.to_owned()),
file_size: file_size,
nar_hash: nar_hash.ok_or("Missing NarHash")?.to_owned(),
nar_size: nar_size.ok_or("Missing NarSize")?,
deriver: deriver.map(|s| s.to_owned()),
sig: sig.map(|s| s.to_owned()),
ca: ca.map(|s| s.to_owned()),
},
references: references.ok_or("Missing References")?.to_owned(),
})
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct StorePathHash([u8; Self::LEN]);
impl StorePathHash {
pub const LEN: usize = 32;
pub fn as_str(&self) -> &str {
std::str::from_utf8(&self.0).unwrap()
}
}
impl fmt::Display for StorePathHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl fmt::Debug for StorePathHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("StorePathHash")
.field(&self.to_string())
.finish()
}
}
impl Borrow<[u8]> for StorePathHash {
fn borrow(&self) -> &[u8] {
&self.0
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct StorePath {
path: String,
}
// FIXME: Allow non-default store root.
impl StorePath {
const STORE_PREFIX: &'static str = "/nix/store/";
const SEP_POS: usize = Self::STORE_PREFIX.len() + StorePathHash::LEN;
const MIN_LEN: usize = Self::SEP_POS + 1 + 1;
const MAX_LEN: usize = 212;
pub fn path(&self) -> &str {
&self.path
}
pub fn root(&self) -> &str {
&Self::STORE_PREFIX[..Self::STORE_PREFIX.len() - 1]
}
pub fn hash_str(&self) -> &str {
&self.path[Self::STORE_PREFIX.len()..Self::SEP_POS]
}
pub fn hash(&self) -> StorePathHash {
StorePathHash(
<[u8; StorePathHash::LEN]>::try_from(
self.path[Self::STORE_PREFIX.len()..Self::SEP_POS].as_bytes(),
)
.unwrap(),
) | }
}
impl TryFrom<String> for StorePath {
type Error = Error;
// https://github.com/NixOS/nix/blob/abb8ef619ba2fab3ae16fb5b5430215905bac723/src/libstore/store-api.cc#L85
fn try_from(path: String) -> Result<Self, Self::Error> {
use failure::ensure;
fn is_valid_hash(s: &[u8]) -> bool {
s.iter().all(|&b| match b {
b'e' | b'o' | b'u' | b't' => false,
b'a'..=b'z' | b'0'..=b'9' => true,
_ => false,
})
}
fn is_valid_name(s: &[u8]) -> bool {
const VALID_CHARS: &[u8] = b"+-._?=";
s.iter()
.all(|&b| b.is_ascii_alphanumeric() || VALID_CHARS.contains(&b))
}
ensure!(
Self::MIN_LEN <= path.len() && path.len() <= Self::MAX_LEN,
"Length {} is not in range [{}, {}]",
path.len(),
Self::MIN_LEN,
Self::MAX_LEN,
);
ensure!(path.is_ascii(), "Not ascii string: {}", path);
ensure!(
path.as_bytes()[Self::SEP_POS] == b'-',
"Hash seperator `-` not found",
);
let hash = &path[Self::STORE_PREFIX.len()..Self::SEP_POS];
let name = &path[Self::SEP_POS + 1..];
ensure!(is_valid_hash(hash.as_bytes()), "Invalid hash '{}'", hash);
ensure!(is_valid_name(name.as_bytes()), "Invalid name '{}'", name);
// Already checked
Ok(Self {
path: path.to_owned(),
})
}
}
impl TryFrom<&'_ str> for StorePath {
type Error = Error;
fn try_from(path: &'_ str) -> Result<Self, Self::Error> {
Self::try_from(path.to_owned())
}
}
impl fmt::Display for StorePath {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self.path(), f)
}
}
#[cfg(test)]
mod tests {
use super::*;
use insta::assert_snapshot;
#[test]
fn test_nar_info_format() {
let mut nar = Nar {
store_path: StorePath::try_from(
"/nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10",
)
.unwrap(),
meta: NarMeta {
url: "some/url".to_owned(),
compression: Some("xz".to_owned()),
file_hash: Some("file:hash".to_owned()),
file_size: Some(123),
nar_hash: "nar:hash".to_owned(),
nar_size: 456,
deriver: Some("some.drv".to_owned()),
sig: Some("s:i/g 2".to_owned()),
ca: Some("fixed:hash".to_owned()),
},
references: "ref1 ref2".to_owned(),
};
assert_snapshot!(nar.format_nar_info().to_string(), @r###"
StorePath: /nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10
URL: some/url
Compression: xz
FileHash: file:hash
FileSize: 123
NarHash: nar:hash
NarSize: 456
References: ref1 ref2
Sig: s:i/g 2
Deriver: some.drv
CA: fixed:hash
"###);
nar.references = String::new();
nar.meta.deriver = None;
nar.meta.ca = None;
assert_snapshot!(nar.format_nar_info().to_string(), @r###"
StorePath: /nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10
URL: some/url
Compression: xz
FileHash: file:hash
FileSize: 123
NarHash: nar:hash
NarSize: 456
References:
Sig: s:i/g 2
"###);
}
#[test]
fn test_nar_info_parse() {
let raw = r###"
StorePath: /nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10
URL: some/url
Compression: xz
FileHash: file:hash
FileSize: 123
NarHash: nar:hash
NarSize: 456
References: ref1 ref2
Sig: s:i/g 2
Deriver: some.drv
CA: fixed:hash
"###;
let nar = Nar {
store_path: StorePath::try_from(
"/nix/store/yhzvzdq82lzk0kvrp3i79yhjnhps6qpk-hello-2.10",
)
.unwrap(),
meta: NarMeta {
url: "some/url".to_owned(),
compression: Some("xz".to_owned()),
file_hash: Some("file:hash".to_owned()),
file_size: Some(123),
nar_hash: "nar:hash".to_owned(),
nar_size: 456,
deriver: Some("some.drv".to_owned()),
sig: Some("s:i/g 2".to_owned()),
ca: Some("fixed:hash".to_owned()),
},
references: "ref1 ref2".to_owned(),
};
assert_eq!(Nar::parse_nar_info(raw).unwrap(), nar);
}
} | }
pub fn name(&self) -> &str {
&self.path[Self::SEP_POS + 1..] | random_line_split |
main.rs | #![deny(bare_trait_objects)]
#![allow(dead_code)]
use lucet_module::{
FunctionSpec, Module, ModuleData, SerializedModule, TableElement, TrapManifest, TrapSite,
VersionInfo,
};
use byteorder::{LittleEndian, ReadBytesExt};
use colored::Colorize;
use object::{Object, ObjectSection, ObjectSymbol, SymbolKind, SymbolScope};
use std::env;
use std::fs::File;
use std::io::Cursor;
use std::io::Read;
use std::mem;
#[derive(Debug)]
struct ArtifactSummary<'a> {
buffer: &'a Vec<u8>,
obj: &'a object::File<'a>,
symbols: StandardSymbols<'a>,
data_segments: Option<DataSegments>,
serialized_module: Option<SerializedModule>,
exported_functions: Vec<&'a str>,
imported_symbols: Vec<&'a str>,
}
#[derive(Debug)]
struct StandardSymbols<'a> {
lucet_module: Option<object::read::Symbol<'a, 'a>>,
}
#[derive(Debug)]
struct DataSegments {
segments: Vec<DataSegment>,
}
#[derive(Debug)]
struct DataSegment {
offset: u32,
len: u32,
data: Vec<u8>,
}
impl<'a> ArtifactSummary<'a> {
fn new(buffer: &'a Vec<u8>, obj: &'a object::File<'_>) -> Self {
Self {
buffer,
obj,
symbols: StandardSymbols { lucet_module: None },
data_segments: None,
serialized_module: None,
exported_functions: Vec::new(),
imported_symbols: Vec::new(),
}
}
fn read_memory(&self, addr: u64, size: u64) -> Option<&'a [u8]> {
// `addr` is really more of an offset from the start of the segment.
for section in self.obj.sections() {
let bytes = section.data_range(addr, size).ok().flatten();
if bytes.is_some() {
return bytes;
}
}
None
}
fn gather(&mut self) {
for sym in self.obj.symbols() {
match sym.name() {
Ok(ref name) if name == &"lucet_module" => self.symbols.lucet_module = Some(sym),
Ok(ref name) if name == &"" => continue,
Err(_) => continue,
_ => {
if sym.kind() == SymbolKind::Text && sym.scope() == SymbolScope::Dynamic {
self.exported_functions.push(sym.name().unwrap());
} else if sym.scope() == SymbolScope::Unknown {
self.imported_symbols.push(sym.name().unwrap());
}
}
}
}
self.serialized_module = self.symbols.lucet_module.as_ref().map(|module_sym| {
let buffer = self
.read_memory(
module_sym.address(),
mem::size_of::<SerializedModule>() as u64,
)
.unwrap();
let mut rdr = Cursor::new(buffer);
let version = VersionInfo::read_from(&mut rdr).unwrap();
SerializedModule {
version,
module_data_ptr: rdr.read_u64::<LittleEndian>().unwrap(),
module_data_len: rdr.read_u64::<LittleEndian>().unwrap(),
tables_ptr: rdr.read_u64::<LittleEndian>().unwrap(),
tables_len: rdr.read_u64::<LittleEndian>().unwrap(),
function_manifest_ptr: rdr.read_u64::<LittleEndian>().unwrap(),
function_manifest_len: rdr.read_u64::<LittleEndian>().unwrap(),
}
});
}
fn get_symbol_name_for_addr(&self, addr: u64) -> Option<&str> {
self.obj.symbol_map().get(addr).map(|sym| sym.name())
}
}
fn main() {
let path = env::args().nth(1).unwrap();
let mut fd = File::open(path).expect("open");
let mut buffer = Vec::new();
fd.read_to_end(&mut buffer).expect("read");
let object = object::File::parse(&buffer).expect("parse");
let mut summary = ArtifactSummary::new(&buffer, &object);
summary.gather();
print_summary(summary);
}
/// Parse a trap manifest for function `f`, if it has one.
///
/// `parse_trap_manifest` may very understandably be confusing. Why not use `f.traps()`? In
/// `lucet-objdump` the module has been accessed by reading the file and following structures as
/// they exist at rest. This means pointers are not relocated, so slices that would be valid when
/// loaded through the platform's loader currently have pointers that are not valid for memory
/// access.
///
/// In particular, trap pointers are correct with respect to 0 being the start of the file (or,
/// buffer, after reading), which means we can (and must) rebuild a correct slice from the buffer.
fn parse_trap_manifest<'a>(
summary: &'a ArtifactSummary<'a>,
f: &FunctionSpec,
) -> Option<TrapManifest<'a>> {
if let Some(faulty_trap_manifest) = f.traps() {
let trap_ptr = faulty_trap_manifest.traps.as_ptr();
let traps_count = faulty_trap_manifest.traps.len();
let traps_byte_count = traps_count * std::mem::size_of::<TrapSite>();
if let Some(traps_byte_slice) =
summary.read_memory(trap_ptr as u64, traps_byte_count as u64)
{
let real_trap_ptr = traps_byte_slice.as_ptr() as *const TrapSite;
Some(TrapManifest {
traps: unsafe { std::slice::from_raw_parts(real_trap_ptr, traps_count) },
})
} else {
println!(
"Failed to read trap bytes for function {:?}, at {:p}",
f, trap_ptr
);
None
}
} else {
None
}
}
fn load_module<'b, 'a: 'b>(
summary: &'a ArtifactSummary<'a>,
serialized_module: &SerializedModule,
tables: &'b [&[TableElement]],
) -> Module<'b> {
let module_data_bytes = summary
.read_memory(
serialized_module.module_data_ptr,
serialized_module.module_data_len,
)
.unwrap();
let module_data =
ModuleData::deserialize(module_data_bytes).expect("ModuleData can be deserialized");
let function_manifest_bytes = summary
.read_memory(
serialized_module.function_manifest_ptr,
serialized_module.function_manifest_len,
)
.unwrap();
let function_manifest = unsafe {
std::slice::from_raw_parts(
function_manifest_bytes.as_ptr() as *const FunctionSpec,
serialized_module.function_manifest_len as usize,
)
};
Module {
version: serialized_module.version.clone(),
module_data,
tables,
function_manifest,
}
}
fn summarize_module<'a, 'b: 'a>(summary: &'a ArtifactSummary<'a>, module: &Module<'b>) {
let module_data = &module.module_data;
let tables = module.tables;
let function_manifest = module.function_manifest;
println!(" Heap Specification:");
if let Some(heap_spec) = module_data.heap_spec() {
println!(" {:9}: {} bytes", "Reserved", heap_spec.reserved_size);
println!(" {:9}: {} bytes", "Guard", heap_spec.guard_size);
println!(" {:9}: {} bytes", "Initial", heap_spec.initial_size);
if let Some(max_size) = heap_spec.max_size {
println!(" {:9}: {} bytes", "Maximum", max_size);
} else {
println!(" {:9}: None", "Maximum");
}
} else {
println!(" {}", "MISSING".red().bold());
}
println!();
println!(" Sparse Page Data:");
if let Some(sparse_page_data) = module_data.sparse_data() {
println!(" {:6}: {}", "Count", sparse_page_data.pages().len());
let mut allempty = true;
let mut anyempty = false;
for (i, page) in sparse_page_data.pages().iter().enumerate() {
match page {
Some(page) => {
allempty = false;
println!(
" Page[{}]: {:p}, size: {}",
i,
page.as_ptr(),
if page.len() != 4096 {
format!(
"{} (page size, expected 4096)",
format!("{}", page.len()).bold().red()
)
.red()
} else {
format!("{}", page.len()).green()
}
);
}
None => {
anyempty = true;
}
};
}
if allempty && !sparse_page_data.pages().is_empty() {
println!(" (all pages empty)");
} else if anyempty {
println!(" (empty pages omitted)");
}
} else {
println!(" {}", "MISSING!".red().bold());
}
println!();
println!("Tables:");
if tables.is_empty() {
println!(" No tables.");
} else {
for (i, table) in tables.iter().enumerate() {
println!(" Table {}: {:?}", i, table);
}
}
println!();
println!("Signatures:");
for (i, s) in module_data.signatures().iter().enumerate() {
println!(" Signature {}: {}", i, s);
}
println!();
println!("Functions:");
if function_manifest.len() != module_data.function_info().len() {
println!(
" {} function manifest and function info have diverging function counts",
"lucetc bug:".red().bold()
);
println!(
" function_manifest length : {}",
function_manifest.len()
);
println!(
" module data function count : {}",
module_data.function_info().len()
);
println!(" Will attempt to display information about functions anyway, but trap/code information may be misaligned with symbols and signatures.");
}
for (i, f) in function_manifest.iter().enumerate() {
let header_name = summary.get_symbol_name_for_addr(f.ptr().as_usize() as u64);
if i >= module_data.function_info().len() {
// This is one form of the above-mentioned bug case
// Half the function information is missing, so just report the issue and continue.
println!(
" Function {} {}",
i,
"is missing the module data part of its declaration".red()
);
match header_name {
Some(name) => {
println!(" ELF header name: {}", name);
}
None => {
println!(" No corresponding ELF symbol.");
}
};
break;
}
let colorize_name = |x: Option<&str>| match x {
Some(name) => name.green(),
None => "None".red().bold(),
};
let fn_meta = &module_data.function_info()[i];
println!(" Function {} (name: {}):", i, colorize_name(fn_meta.name));
if fn_meta.name != header_name {
println!(
" Name {} with name declared in ELF headers: {}",
"DISAGREES".red().bold(),
colorize_name(header_name)
);
}
println!(
" Signature (index {}): {}",
fn_meta.signature.as_u32() as usize,
module_data.signatures()[fn_meta.signature.as_u32() as usize]
);
println!(" Start: {:#010x}", f.ptr().as_usize());
println!(" Code length: {} bytes", f.code_len());
if let Some(trap_manifest) = parse_trap_manifest(&summary, f) {
let trap_count = trap_manifest.traps.len();
println!(" Trap information:");
if trap_count > 0 {
println!(
" {} {} ...",
trap_manifest.traps.len(),
if trap_count == 1 { "trap" } else { "traps" },
);
for trap in trap_manifest.traps {
println!(" $+{:#06x}: {:?}", trap.offset, trap.code);
}
} else {
println!(" No traps for this function");
}
}
}
println!();
println!("Globals:");
if !module_data.globals_spec().is_empty() {
for global_spec in module_data.globals_spec().iter() {
println!(" {:?}", global_spec.global());
for name in global_spec.export_names() {
println!(" Exported as: {}", name);
}
}
} else {
println!(" None");
}
println!();
println!("Exported Functions/Symbols:");
let mut exported_symbols = summary.exported_functions.clone();
for export in module_data.export_functions() {
match module_data.function_info()[export.fn_idx.as_u32() as usize].name {
Some(name) => {
println!(" Internal name: {}", name);
// The "internal name" is probably the first exported name for this function.
// Remove it from the exported_symbols list to not double-count
if let Some(idx) = exported_symbols.iter().position(|x| *x == name) {
exported_symbols.remove(idx);
}
}
None => {
println!(" No internal name");
}
}
// Export names do not have the guest_func_ prefix that symbol names get, and as such do
// not need to be removed from `exported_symbols` (which is built entirely from
// ELF-declared exports, with namespaced names)
println!(" Exported as: {}", export.names.join(", "));
}
if !exported_symbols.is_empty() {
println!();
println!(" Other exported symbols (from ELF headers):");
for export in exported_symbols {
println!(" {}", export);
}
}
println!();
println!("Imported Functions/Symbols:");
let mut imported_symbols = summary.imported_symbols.clone();
for import in module_data.import_functions() {
match module_data.function_info()[import.fn_idx.as_u32() as usize].name {
Some(name) => {
println!(" Internal name: {}", name);
}
None => {
println!(" No internal name");
}
}
println!(" Imported as: {}/{}", import.module, import.name);
// Remove from the imported_symbols list to not double-count imported functions
if let Some(idx) = imported_symbols.iter().position(|x| x == &import.name) |
}
if !imported_symbols.is_empty() {
println!();
println!(" Other imported symbols (from ELF headers):");
for import in &imported_symbols {
println!(" {}", import);
}
}
}
fn print_summary(summary: ArtifactSummary<'_>) {
println!("Required Symbols:");
println!(
" {:30}: {}",
"lucet_module",
exists_to_str(&summary.symbols.lucet_module)
);
if let Some(ref serialized_module) = summary.serialized_module {
println!("Native module components:");
println!(
" {:30}: {}",
"module_data_ptr",
ptr_to_str(serialized_module.module_data_ptr)
);
println!(
" {:30}: {}",
"module_data_len", serialized_module.module_data_len
);
println!(
" {:30}: {}",
"tables_ptr",
ptr_to_str(serialized_module.tables_ptr)
);
println!(" {:30}: {}", "tables_len", serialized_module.tables_len);
println!(
" {:30}: {}",
"function_manifest_ptr",
ptr_to_str(serialized_module.function_manifest_ptr)
);
println!(
" {:30}: {}",
"function_manifest_len", serialized_module.function_manifest_len
);
let tables_bytes = summary
.read_memory(
serialized_module.tables_ptr,
serialized_module.tables_len * mem::size_of::<&[TableElement]>() as u64,
)
.unwrap();
let tables = unsafe {
std::slice::from_raw_parts(
tables_bytes.as_ptr() as *const &[TableElement],
serialized_module.tables_len as usize,
)
};
let mut reconstructed_tables = Vec::new();
// same situation as trap tables - these slices are valid as if the module was
// dlopen'd, but we just read it as a flat file. So read through the ELF view and use
// pointers to that for the real slices.
for table in tables {
let table_bytes = summary
.read_memory(
table.as_ptr() as usize as u64,
(table.len() * mem::size_of::<TableElement>()) as u64,
)
.unwrap();
reconstructed_tables.push(unsafe {
std::slice::from_raw_parts(
table_bytes.as_ptr() as *const TableElement,
table.len() as usize,
)
});
}
let module = load_module(&summary, serialized_module, &reconstructed_tables);
println!("\nModule:");
summarize_module(&summary, &module);
} else {
println!("The symbol `lucet_module` is {}, so lucet-objdump cannot look at most of the interesting parts.", "MISSING".red().bold());
}
println!();
println!("Data Segments:");
if let Some(data_segments) = summary.data_segments {
println!(" {:6}: {}", "Count", data_segments.segments.len());
for segment in &data_segments.segments {
println!(
" {:7}: {:6} {:6}: {:6}",
"Offset", segment.offset, "Length", segment.len,
);
}
} else {
println!(" {}", "MISSING!".red().bold());
}
}
fn ptr_to_str(p: u64) -> colored::ColoredString {
if p != 0 {
format!("exists; address: {:#x}", p).green()
} else {
"MISSING!".red().bold()
}
}
fn exists_to_str<T>(p: &Option<T>) -> colored::ColoredString {
match p {
Some(_) => "exists".green(),
None => "MISSING!".red().bold(),
}
}
| {
imported_symbols.remove(idx);
} | conditional_block |
main.rs | #![deny(bare_trait_objects)]
#![allow(dead_code)]
use lucet_module::{
FunctionSpec, Module, ModuleData, SerializedModule, TableElement, TrapManifest, TrapSite,
VersionInfo,
};
use byteorder::{LittleEndian, ReadBytesExt};
use colored::Colorize;
use object::{Object, ObjectSection, ObjectSymbol, SymbolKind, SymbolScope};
use std::env;
use std::fs::File;
use std::io::Cursor;
use std::io::Read;
use std::mem;
#[derive(Debug)]
struct ArtifactSummary<'a> {
buffer: &'a Vec<u8>,
obj: &'a object::File<'a>,
symbols: StandardSymbols<'a>,
data_segments: Option<DataSegments>,
serialized_module: Option<SerializedModule>,
exported_functions: Vec<&'a str>,
imported_symbols: Vec<&'a str>,
}
#[derive(Debug)]
struct StandardSymbols<'a> {
lucet_module: Option<object::read::Symbol<'a, 'a>>,
}
#[derive(Debug)]
struct DataSegments {
segments: Vec<DataSegment>,
}
#[derive(Debug)]
struct DataSegment {
offset: u32,
len: u32,
data: Vec<u8>,
}
impl<'a> ArtifactSummary<'a> {
fn new(buffer: &'a Vec<u8>, obj: &'a object::File<'_>) -> Self {
Self {
buffer,
obj,
symbols: StandardSymbols { lucet_module: None },
data_segments: None,
serialized_module: None,
exported_functions: Vec::new(),
imported_symbols: Vec::new(),
}
}
fn read_memory(&self, addr: u64, size: u64) -> Option<&'a [u8]> {
// `addr` is really more of an offset from the start of the segment.
for section in self.obj.sections() {
let bytes = section.data_range(addr, size).ok().flatten();
if bytes.is_some() {
return bytes;
}
}
None
}
fn gather(&mut self) {
for sym in self.obj.symbols() {
match sym.name() {
Ok(ref name) if name == &"lucet_module" => self.symbols.lucet_module = Some(sym),
Ok(ref name) if name == &"" => continue,
Err(_) => continue,
_ => {
if sym.kind() == SymbolKind::Text && sym.scope() == SymbolScope::Dynamic {
self.exported_functions.push(sym.name().unwrap());
} else if sym.scope() == SymbolScope::Unknown {
self.imported_symbols.push(sym.name().unwrap());
}
}
}
}
self.serialized_module = self.symbols.lucet_module.as_ref().map(|module_sym| {
let buffer = self
.read_memory(
module_sym.address(),
mem::size_of::<SerializedModule>() as u64,
)
.unwrap();
let mut rdr = Cursor::new(buffer);
let version = VersionInfo::read_from(&mut rdr).unwrap();
SerializedModule {
version,
module_data_ptr: rdr.read_u64::<LittleEndian>().unwrap(),
module_data_len: rdr.read_u64::<LittleEndian>().unwrap(),
tables_ptr: rdr.read_u64::<LittleEndian>().unwrap(),
tables_len: rdr.read_u64::<LittleEndian>().unwrap(),
function_manifest_ptr: rdr.read_u64::<LittleEndian>().unwrap(),
function_manifest_len: rdr.read_u64::<LittleEndian>().unwrap(),
}
});
}
fn get_symbol_name_for_addr(&self, addr: u64) -> Option<&str> {
self.obj.symbol_map().get(addr).map(|sym| sym.name())
}
}
fn main() {
let path = env::args().nth(1).unwrap();
let mut fd = File::open(path).expect("open");
let mut buffer = Vec::new();
fd.read_to_end(&mut buffer).expect("read");
let object = object::File::parse(&buffer).expect("parse");
let mut summary = ArtifactSummary::new(&buffer, &object);
summary.gather();
print_summary(summary);
}
/// Parse a trap manifest for function `f`, if it has one.
///
/// `parse_trap_manifest` may very understandably be confusing. Why not use `f.traps()`? In
/// `lucet-objdump` the module has been accessed by reading the file and following structures as
/// they exist at rest. This means pointers are not relocated, so slices that would be valid when
/// loaded through the platform's loader currently have pointers that are not valid for memory
/// access.
///
/// In particular, trap pointers are correct with respect to 0 being the start of the file (or,
/// buffer, after reading), which means we can (and must) rebuild a correct slice from the buffer.
fn parse_trap_manifest<'a>(
summary: &'a ArtifactSummary<'a>,
f: &FunctionSpec,
) -> Option<TrapManifest<'a>> {
if let Some(faulty_trap_manifest) = f.traps() {
let trap_ptr = faulty_trap_manifest.traps.as_ptr();
let traps_count = faulty_trap_manifest.traps.len();
let traps_byte_count = traps_count * std::mem::size_of::<TrapSite>();
if let Some(traps_byte_slice) =
summary.read_memory(trap_ptr as u64, traps_byte_count as u64)
{
let real_trap_ptr = traps_byte_slice.as_ptr() as *const TrapSite;
Some(TrapManifest {
traps: unsafe { std::slice::from_raw_parts(real_trap_ptr, traps_count) },
})
} else {
println!(
"Failed to read trap bytes for function {:?}, at {:p}",
f, trap_ptr
);
None
}
} else {
None
}
}
fn load_module<'b, 'a: 'b>(
summary: &'a ArtifactSummary<'a>,
serialized_module: &SerializedModule,
tables: &'b [&[TableElement]],
) -> Module<'b> {
let module_data_bytes = summary
.read_memory(
serialized_module.module_data_ptr,
serialized_module.module_data_len,
)
.unwrap();
let module_data =
ModuleData::deserialize(module_data_bytes).expect("ModuleData can be deserialized");
let function_manifest_bytes = summary
.read_memory(
serialized_module.function_manifest_ptr,
serialized_module.function_manifest_len,
)
.unwrap();
let function_manifest = unsafe {
std::slice::from_raw_parts(
function_manifest_bytes.as_ptr() as *const FunctionSpec,
serialized_module.function_manifest_len as usize,
)
};
Module {
version: serialized_module.version.clone(),
module_data,
tables,
function_manifest,
}
}
fn summarize_module<'a, 'b: 'a>(summary: &'a ArtifactSummary<'a>, module: &Module<'b>) {
let module_data = &module.module_data;
let tables = module.tables;
let function_manifest = module.function_manifest;
println!(" Heap Specification:");
if let Some(heap_spec) = module_data.heap_spec() {
println!(" {:9}: {} bytes", "Reserved", heap_spec.reserved_size);
println!(" {:9}: {} bytes", "Guard", heap_spec.guard_size);
println!(" {:9}: {} bytes", "Initial", heap_spec.initial_size);
if let Some(max_size) = heap_spec.max_size {
println!(" {:9}: {} bytes", "Maximum", max_size);
} else {
println!(" {:9}: None", "Maximum");
}
} else {
println!(" {}", "MISSING".red().bold());
}
println!();
println!(" Sparse Page Data:");
if let Some(sparse_page_data) = module_data.sparse_data() {
println!(" {:6}: {}", "Count", sparse_page_data.pages().len());
let mut allempty = true;
let mut anyempty = false;
for (i, page) in sparse_page_data.pages().iter().enumerate() {
match page {
Some(page) => {
allempty = false;
println!(
" Page[{}]: {:p}, size: {}",
i,
page.as_ptr(),
if page.len() != 4096 {
format!(
"{} (page size, expected 4096)",
format!("{}", page.len()).bold().red()
)
.red()
} else {
format!("{}", page.len()).green()
}
);
}
None => {
anyempty = true;
}
};
}
if allempty && !sparse_page_data.pages().is_empty() {
println!(" (all pages empty)");
} else if anyempty {
println!(" (empty pages omitted)");
}
} else {
println!(" {}", "MISSING!".red().bold());
}
println!();
println!("Tables:");
if tables.is_empty() {
println!(" No tables.");
} else {
for (i, table) in tables.iter().enumerate() {
println!(" Table {}: {:?}", i, table);
}
}
println!();
println!("Signatures:");
for (i, s) in module_data.signatures().iter().enumerate() {
println!(" Signature {}: {}", i, s);
}
println!();
println!("Functions:");
if function_manifest.len() != module_data.function_info().len() {
println!(
" {} function manifest and function info have diverging function counts",
"lucetc bug:".red().bold()
);
println!(
" function_manifest length : {}",
function_manifest.len()
);
println!(
" module data function count : {}",
module_data.function_info().len()
);
println!(" Will attempt to display information about functions anyway, but trap/code information may be misaligned with symbols and signatures.");
}
for (i, f) in function_manifest.iter().enumerate() {
let header_name = summary.get_symbol_name_for_addr(f.ptr().as_usize() as u64);
if i >= module_data.function_info().len() {
// This is one form of the above-mentioned bug case
// Half the function information is missing, so just report the issue and continue.
println!( | Some(name) => {
println!(" ELF header name: {}", name);
}
None => {
println!(" No corresponding ELF symbol.");
}
};
break;
}
let colorize_name = |x: Option<&str>| match x {
Some(name) => name.green(),
None => "None".red().bold(),
};
let fn_meta = &module_data.function_info()[i];
println!(" Function {} (name: {}):", i, colorize_name(fn_meta.name));
if fn_meta.name != header_name {
println!(
" Name {} with name declared in ELF headers: {}",
"DISAGREES".red().bold(),
colorize_name(header_name)
);
}
println!(
" Signature (index {}): {}",
fn_meta.signature.as_u32() as usize,
module_data.signatures()[fn_meta.signature.as_u32() as usize]
);
println!(" Start: {:#010x}", f.ptr().as_usize());
println!(" Code length: {} bytes", f.code_len());
if let Some(trap_manifest) = parse_trap_manifest(&summary, f) {
let trap_count = trap_manifest.traps.len();
println!(" Trap information:");
if trap_count > 0 {
println!(
" {} {} ...",
trap_manifest.traps.len(),
if trap_count == 1 { "trap" } else { "traps" },
);
for trap in trap_manifest.traps {
println!(" $+{:#06x}: {:?}", trap.offset, trap.code);
}
} else {
println!(" No traps for this function");
}
}
}
println!();
println!("Globals:");
if !module_data.globals_spec().is_empty() {
for global_spec in module_data.globals_spec().iter() {
println!(" {:?}", global_spec.global());
for name in global_spec.export_names() {
println!(" Exported as: {}", name);
}
}
} else {
println!(" None");
}
println!();
println!("Exported Functions/Symbols:");
let mut exported_symbols = summary.exported_functions.clone();
for export in module_data.export_functions() {
match module_data.function_info()[export.fn_idx.as_u32() as usize].name {
Some(name) => {
println!(" Internal name: {}", name);
// The "internal name" is probably the first exported name for this function.
// Remove it from the exported_symbols list to not double-count
if let Some(idx) = exported_symbols.iter().position(|x| *x == name) {
exported_symbols.remove(idx);
}
}
None => {
println!(" No internal name");
}
}
// Export names do not have the guest_func_ prefix that symbol names get, and as such do
// not need to be removed from `exported_symbols` (which is built entirely from
// ELF-declared exports, with namespaced names)
println!(" Exported as: {}", export.names.join(", "));
}
if !exported_symbols.is_empty() {
println!();
println!(" Other exported symbols (from ELF headers):");
for export in exported_symbols {
println!(" {}", export);
}
}
println!();
println!("Imported Functions/Symbols:");
let mut imported_symbols = summary.imported_symbols.clone();
for import in module_data.import_functions() {
match module_data.function_info()[import.fn_idx.as_u32() as usize].name {
Some(name) => {
println!(" Internal name: {}", name);
}
None => {
println!(" No internal name");
}
}
println!(" Imported as: {}/{}", import.module, import.name);
// Remove from the imported_symbols list to not double-count imported functions
if let Some(idx) = imported_symbols.iter().position(|x| x == &import.name) {
imported_symbols.remove(idx);
}
}
if !imported_symbols.is_empty() {
println!();
println!(" Other imported symbols (from ELF headers):");
for import in &imported_symbols {
println!(" {}", import);
}
}
}
fn print_summary(summary: ArtifactSummary<'_>) {
println!("Required Symbols:");
println!(
" {:30}: {}",
"lucet_module",
exists_to_str(&summary.symbols.lucet_module)
);
if let Some(ref serialized_module) = summary.serialized_module {
println!("Native module components:");
println!(
" {:30}: {}",
"module_data_ptr",
ptr_to_str(serialized_module.module_data_ptr)
);
println!(
" {:30}: {}",
"module_data_len", serialized_module.module_data_len
);
println!(
" {:30}: {}",
"tables_ptr",
ptr_to_str(serialized_module.tables_ptr)
);
println!(" {:30}: {}", "tables_len", serialized_module.tables_len);
println!(
" {:30}: {}",
"function_manifest_ptr",
ptr_to_str(serialized_module.function_manifest_ptr)
);
println!(
" {:30}: {}",
"function_manifest_len", serialized_module.function_manifest_len
);
let tables_bytes = summary
.read_memory(
serialized_module.tables_ptr,
serialized_module.tables_len * mem::size_of::<&[TableElement]>() as u64,
)
.unwrap();
let tables = unsafe {
std::slice::from_raw_parts(
tables_bytes.as_ptr() as *const &[TableElement],
serialized_module.tables_len as usize,
)
};
let mut reconstructed_tables = Vec::new();
// same situation as trap tables - these slices are valid as if the module was
// dlopen'd, but we just read it as a flat file. So read through the ELF view and use
// pointers to that for the real slices.
for table in tables {
let table_bytes = summary
.read_memory(
table.as_ptr() as usize as u64,
(table.len() * mem::size_of::<TableElement>()) as u64,
)
.unwrap();
reconstructed_tables.push(unsafe {
std::slice::from_raw_parts(
table_bytes.as_ptr() as *const TableElement,
table.len() as usize,
)
});
}
let module = load_module(&summary, serialized_module, &reconstructed_tables);
println!("\nModule:");
summarize_module(&summary, &module);
} else {
println!("The symbol `lucet_module` is {}, so lucet-objdump cannot look at most of the interesting parts.", "MISSING".red().bold());
}
println!();
println!("Data Segments:");
if let Some(data_segments) = summary.data_segments {
println!(" {:6}: {}", "Count", data_segments.segments.len());
for segment in &data_segments.segments {
println!(
" {:7}: {:6} {:6}: {:6}",
"Offset", segment.offset, "Length", segment.len,
);
}
} else {
println!(" {}", "MISSING!".red().bold());
}
}
fn ptr_to_str(p: u64) -> colored::ColoredString {
if p != 0 {
format!("exists; address: {:#x}", p).green()
} else {
"MISSING!".red().bold()
}
}
fn exists_to_str<T>(p: &Option<T>) -> colored::ColoredString {
match p {
Some(_) => "exists".green(),
None => "MISSING!".red().bold(),
}
} | " Function {} {}",
i,
"is missing the module data part of its declaration".red()
);
match header_name { | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.